text stringlengths 4 1.02M | meta dict |
|---|---|
import unittest
from whitepy.stack import Stack
class TestStack(unittest.TestCase):
def test_isempty(self):
stack = Stack()
assert stack.isempty() is True
def test_push(self):
stack = Stack()
stack.push(True)
assert stack[-1]
def test_duplicate_empty(self):
stack = Stack()
stack.dup()
assert stack.isempty() is True
def test_dupicate_nonempty(self):
stack = Stack([True])
stack.dup()
assert len(stack) is 2 and stack[-1]
def test_swap(self):
stack = Stack('abc')
stack.swap()
assert stack[-1] is 'b' and stack[-2] is 'c'
def test_discard(self):
stack = Stack('abc')
stack.discard()
assert len(stack) is 2 and stack[-1] is 'b'
def test_add(self):
stack = Stack([1, 1])
stack.math.add()
assert len(stack) is 1 and stack[-1] is 2
def test_subtract(self):
stack = Stack([3, 1])
stack.math.subtract()
assert len(stack) is 1 and stack[-1] is 2
def test_multiply(self):
stack = Stack([3, 2])
stack.math.multiply()
assert len(stack) is 1 and stack[-1] is 6
def test_divide(self):
stack = Stack([6, 2])
stack.math.divide()
assert len(stack) is 1 and stack[-1] is 3
def test_modulo(self):
stack = Stack([5, 3])
stack.math.modulo()
assert len(stack) is 1 and stack[-1] is 2
def test_stack_opper_dup_add(self):
stack = Stack()
stack.append(1)
stack.dup()
stack.math.add()
assert len(stack) is 1 and stack[-1] is 2
def test_stack_opper_swap_subtract(self):
stack = Stack()
stack.append(5)
stack.append(10)
stack.swap()
stack.math.subtract()
assert len(stack) is 1 and stack[-1] is 5
| {
"content_hash": "a0a4ae58710874fd475f8c774ffc2bb1",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 52,
"avg_line_length": 25.43243243243243,
"alnum_prop": 0.5520722635494155,
"repo_name": "yasn77/whitepy",
"id": "04f5de5a7172600d8c944e88e73d2f1294619c12",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_stack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22136"
}
],
"symlink_target": ""
} |
from google.cloud import translate_v3
def sample_get_supported_languages():
# Create a client
client = translate_v3.TranslationServiceClient()
# Initialize request argument(s)
request = translate_v3.GetSupportedLanguagesRequest(
parent="parent_value",
)
# Make the request
response = client.get_supported_languages(request=request)
# Handle the response
print(response)
# [END translate_v3_generated_TranslationService_GetSupportedLanguages_sync]
| {
"content_hash": "0510bd88a821280e2c137472190b1c8f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 26.157894736842106,
"alnum_prop": 0.7323943661971831,
"repo_name": "googleapis/python-translate",
"id": "f3bd0209d301eb46898c70e67ead1167a83946a2",
"size": "1907",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/translate_v3_generated_translation_service_get_supported_languages_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "952045"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
} |
"""Reads iPhoto library info, and exports photos and movies."""
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import logging
import os
import re
import sys
import time
import unicodedata
from optparse import OptionParser
import MacOS
import appledata.iphotodata as iphotodata
import tilutil.exiftool as exiftool
import tilutil.systemutils as su
import tilutil.imageutils as imageutils
import phoshare.phoshare_version
# Maximum diff in file size to be not considered a change (to allow for
# meta data updates for example)
_MAX_FILE_DIFF = 60000
# Fudge factor for file modification times
_MTIME_FUDGE = 3
# List of extensions for image formats that support EXIF data. Sources:
# - iPhoto help topic: About digital cameras that support RAW files
# - Apple RAW Support listing: http://www.apple.com/aperture/specs/raw.html
# - ExifTool supported formats (R/W only): http://www.sno.phy.queensu.ca/~phil/exiftool/#supported
_EXIF_EXTENSIONS = ('3fr', 'arw', 'ciff', 'cr2', 'crw', 'dcr', 'erf', 'jpg', 'jpeg', 'k25', 'kdc',
'nef', 'nrw', 'orf', 'pef', 'png', 'raf', 'raw', 'rw2', 'rwl', 'sr2', 'srf',
'srw', 'tif', 'tiff')
# create logger
_logger = logging.getLogger('google')
_logger.setLevel(logging.DEBUG)
def region_matches(region1, region2):
"""Tests if two regions (rectangles) match."""
if len(region1) != len(region2):
return False
for i in xrange(len(region1)):
if abs(region1[i] - region2[i]) > 0.0000005:
return False
return True
def delete_album_file(album_file, albumdirectory, msg, options):
"""sanity check - only delete from album directory."""
if not album_file.startswith(albumdirectory):
print >> sys.stderr, (
"Internal error - attempting to delete file "
"that is not in album directory:\n %s") % (su.fsenc(album_file))
return False
if msg:
print "%s: %s" % (msg, su.fsenc(album_file))
if not imageutils.should_delete(options):
return False
if options.dryrun:
return True
try:
if os.path.isdir(album_file):
file_list = os.listdir(album_file)
for subfile in file_list:
delete_album_file(os.path.join(album_file, subfile),
albumdirectory, msg, options)
os.rmdir(album_file)
else:
os.remove(album_file)
return True
except OSError, ex:
print >> sys.stderr, "Could not delete %s: %s" % (su.fsenc(album_file),
ex)
return False
class ExportFile(object):
"""Describes an exported image."""
def __init__(self, photo, container, export_directory, base_name, options):
"""Creates a new ExportFile object."""
self.photo = photo
self.container = container
# We cannot resize movie files.
if options.size and not imageutils.is_movie_file(photo.image_path):
self.size = options.size
extension = 'jpg'
else:
self.size = None
extension = su.getfileextension(photo.image_path)
self.export_file = os.path.join(
export_directory, base_name + '.' + extension)
# Location of "Original" file, if any.
originals_folder = u"Originals"
if options.picasa:
if (os.path.exists(os.path.join(export_directory,
u".picasaoriginals")) or
not os.path.exists(os.path.join(export_directory,
u"Originals"))):
originals_folder = u".picasaoriginals"
if photo.originalpath:
self.original_export_file = os.path.join(
export_directory, originals_folder, base_name + "." +
su.getfileextension(photo.originalpath))
else:
self.original_export_file = None
def get_photo(self):
"""Gets the associated iPhotoImage."""
return self.photo
def _check_need_to_export(self, source_file, options):
"""Returns true if the image file needs to be exported.
Args:
source_file: path to image file, with aliases resolved.
options: processing options.
"""
if not os.path.exists(self.export_file):
return True
# In link mode, check the inode.
if options.link:
export_stat = os.stat(self.export_file)
source_stat = os.stat(source_file)
if export_stat.st_ino != source_stat.st_ino:
su.pout('Changed: %s: inodes don\'t match: %d vs. %d' %
(self.export_file, export_stat.st_ino, source_stat.st_ino))
return True
if (not options.reverse
and os.path.getmtime(self.export_file) + _MTIME_FUDGE <
os.path.getmtime(source_file)):
su.pout('Changed: %s: newer version is available: %s vs. %s' %
(self.export_file,
time.ctime(os.path.getmtime(self.export_file)),
time.ctime(os.path.getmtime(source_file))))
return True
if (options.reverse
and os.path.getmtime(source_file) + _MTIME_FUDGE <
os.path.getmtime(self.export_file)):
su.pout('Changed: %s: newer version is available: %s vs. %s' %
(self.export_file,
time.ctime(os.path.getmtime(source_file)),
time.ctime(os.path.getmtime(self.export_file))))
return True
if not self.size and not options.reverse:
# With creative renaming in iPhoto it is possible to get
# stale files if titles get swapped between images. Double
# check the size, allowing for some difference for meta data
# changes made in the exported copy
source_size = os.path.getsize(source_file)
export_size = os.path.getsize(self.export_file)
diff = abs(source_size - export_size)
if diff > _MAX_FILE_DIFF or (diff > 32 and options.link):
su.pout('Changed: %s: file size: %d vs. %d' %
(self.export_file, export_size, source_size))
return True
# In reverse mode, we don't check the file size (might have changed because
# of Preview regeneration), so we look at the image dimensions instead to catch
# some out-of-sync images.
if options.reverse and su.getfileextension(self.export_file) in _EXIF_EXTENSIONS:
(source_width, source_height) = imageutils.get_image_width_height(source_file)
(export_width, export_height) = imageutils.get_image_width_height(self.export_file)
if ((source_width and export_width and source_width != export_width) or
(source_height and export_height and source_height != export_height)):
su.pout('Changed: %s: dimensions: %dx%d vs. %dx%d' % (
self.export_file, source_width, source_height, export_width, export_height))
return True
# In link mode, we don't need to check the modification date in the
# database because we catch the changes by the size check above.
#if (not options.link and
# datetime.datetime.fromtimestamp(os.path.getmtime(
# self.export_file)) < self.photo.mod_date):
# su.pout('Changed: %s: modified in iPhoto: %s vs. %s ' % (
# self.export_file,
# time.ctime(os.path.getmtime(self.export_file)),
# self.photo.mod_date))
# return True
return False
def _generate_original(self, options):
"""Exports the original file."""
do_original_export = False
export_dir = os.path.split(self.original_export_file)[0]
if not os.path.exists(export_dir):
su.pout("Creating folder " + export_dir)
if not options.dryrun:
os.mkdir(export_dir)
original_source_file = su.resolve_alias(self.photo.originalpath)
if os.path.exists(self.original_export_file):
# In link mode, check the inode.
if options.link:
export_stat = os.stat(self.original_export_file)
source_stat = os.stat(original_source_file)
if export_stat.st_ino != source_stat.st_ino:
su.pout('Changed: %s: inodes don\'t match: %d vs. %d' %
(self.original_export_file, export_stat.st_ino, source_stat.st_ino))
do_original_export = True
if (os.path.getmtime(self.original_export_file) + _MTIME_FUDGE <
os.path.getmtime(original_source_file)):
su.pout('Changed: %s: newer version is available: %s vs. %s' %
(self.original_export_file,
time.ctime(os.path.getmtime(
self.original_export_file)),
time.ctime(os.path.getmtime(original_source_file))))
do_original_export = True
elif not self.size:
source_size = os.path.getsize(original_source_file)
export_size = os.path.getsize(self.original_export_file)
diff = abs(source_size - export_size)
if diff > _MAX_FILE_DIFF or (diff > 0 and options.link):
su.pout(u'Changed: %s: file size: %d vs. %d' %
(self.original_export_file,
export_size, source_size))
do_original_export = True
else:
do_original_export = True
do_iptc = (options.iptc == 1 and
do_original_export) or options.iptc == 2
if do_iptc and (options.link or options.iptc_masters):
if self.check_iptc_data(original_source_file, options,
is_original=True, file_updated=do_original_export):
do_original_export = True
exists = True # True if the file exists or was updated.
if do_original_export:
exists = imageutils.copy_or_link_file(original_source_file,
self.original_export_file,
options.dryrun,
options.link,
self.size,
options)
else:
_logger.debug(u'%s up to date.', self.original_export_file)
if exists and do_iptc and not options.link:
self.check_iptc_data(self.original_export_file, options,
is_original=True, file_updated=do_original_export)
def generate(self, options):
"""makes sure all files exist in other album, and generates if
necessary."""
try:
source_file = su.resolve_alias(self.photo.image_path)
do_export = self._check_need_to_export(source_file, options)
# if we use links, we update the IPTC data in the original file
do_iptc = (options.iptc == 1 and do_export) or options.iptc == 2
if do_iptc and options.link:
if self.check_iptc_data(source_file, options, file_updated=do_export):
do_export = True
exists = True # True if the file exists or was updated.
if do_export:
exists = imageutils.copy_or_link_file(source_file,
self.export_file,
options.dryrun,
options.link,
self.size,
options)
else:
_logger.debug(u'%s up to date.', self.export_file)
# if we copy, we update the IPTC data in the copied file
if exists and do_iptc and not options.link:
self.check_iptc_data(self.export_file, options, file_updated=do_export)
if (options.originals and self.photo.originalpath and
not self.photo.rotation_is_only_edit):
self._generate_original(options)
except (OSError, MacOS.Error) as ose:
su.perr(u"Failed to export %s to %s: %s" % (self.photo.image_path, self.export_file,
ose))
def get_export_keywords(self, do_face_keywords):
"""Returns the list of keywords that should be in the exported image."""
new_keywords = []
if self.photo.keywords:
for keyword in self.photo.keywords:
if keyword and not keyword in new_keywords:
new_keywords.append(keyword)
if do_face_keywords:
for keyword in self.photo.getfaces():
if keyword and not keyword in new_keywords:
new_keywords.append(keyword)
return new_keywords
def _check_person_iptc_data(self, export_file,
region_rectangles, region_names, do_faces, messages):
"""Tests if the person names or regions in the export file need to be
updated.
Returns: (new_rectangles, new_persons), or (None, None)
"""
if do_faces:
photo_rectangles = self.photo.face_rectangles
photo_faces = self.photo.faces
else:
photo_rectangles = []
photo_faces = []
combined_region_names = ','.join(region_names)
combined_photo_faces = ','.join(photo_faces)
if combined_region_names != combined_photo_faces:
messages.append(u' Persons (%s instead of %s)'
% (combined_region_names, combined_photo_faces))
return (photo_rectangles, photo_faces)
if len(region_rectangles) != len(photo_rectangles):
messages.append(u' Number of regions (%d vs %d)' %
(len(region_rectangles), len(photo_rectangles)))
#su.pout('%s vs %s' % (combined_region_names, combined_photo_faces))
#su.pout('%s vs %s' % (region_rectangles, photo_rectangles))
return (photo_rectangles, photo_faces)
for p in xrange(len(region_rectangles)):
if not region_matches(region_rectangles[p], photo_rectangles[p]):
messages.append(u' Region for %s '
'(%s vs %s)' %
(region_names[p],
','.join(str(c) for c in region_rectangles[p]),
','.join(str(c) for c in photo_rectangles[p])))
return (photo_rectangles, photo_faces)
return (None, None)
def check_iptc_data(self, export_file, options, is_original=False, file_updated=False):
"""Tests if a file has the proper keywords and caption in the meta
data."""
if not su.getfileextension(export_file) in _EXIF_EXTENSIONS:
return False
messages = []
iptc_data = exiftool.get_iptc_data(export_file)
new_caption = imageutils.get_photo_caption(self.photo, self.container,
options.captiontemplate)
if not su.equalscontent(iptc_data.caption, new_caption):
messages.append(u' File caption: %s' % (su.nn_string(iptc_data.caption).strip()))
messages.append(u' iPhoto caption: %s' % (new_caption))
else:
new_caption = None
new_keywords = None
new_date = None
new_rating = -1
if not options.aperture:
new_keywords = self.get_export_keywords(options.face_keywords)
if not imageutils.compare_keywords(new_keywords, iptc_data.keywords):
messages.append(u' File keywords: %s' % (u','.join(iptc_data.keywords)))
if new_keywords == None:
messages.append(u' iPhoto keywords: <None>')
else:
messages.append(u' iPhoto keywords: %s' % (u','.join(new_keywords)))
else:
new_keywords = None
#if self.photo.date and date_time_original != self.photo.date:
# messages.append(u' File date: %s' % (date_time_original))
# messages.append(u' iPhoto date: %s' % (self.photo.date))
# new_date = self.photo.date
if self.photo.rating != None and iptc_data.rating != self.photo.rating:
messages.append(u' File rating: %d' % (iptc_data.rating))
messages.append(u' iPhoto rating: %d' % (self.photo.rating))
new_rating = self.photo.rating
else:
if options.face_keywords:
merged_keywords = iptc_data.keywords[:]
for keyword in self.photo.getfaces():
if not keyword in merged_keywords:
merged_keywords.append(keyword)
new_keywords = merged_keywords
if iptc_data.hierarchical_subject and not options.reverse:
messages.append(u' File subjects: %s' % (u','.join(iptc_data.hierarchical_subject)))
new_gps = None
if options.gps and self.photo.gps:
if (not iptc_data.gps or not self.photo.gps.is_same(iptc_data.gps)):
if iptc_data.gps:
old_gps = iptc_data.gps
else:
old_gps = imageutils.GpsLocation()
messages.append(u' File GPS: %s' % (old_gps.to_string()))
messages.append(u' iPhoto GPS: %s' % (self.photo.gps.to_string()))
new_gps = self.photo.gps
# Don't export the faces into the original file (could have been
# cropped).
do_faces = options.faces and not is_original
(new_rectangles, new_persons) = self._check_person_iptc_data(
export_file, iptc_data.region_rectangles, iptc_data.region_names, do_faces, messages)
if (new_caption != None or new_keywords != None or new_date or
(not options.reverse and iptc_data.hierarchical_subject) or
new_gps or new_rating != -1 or new_rectangles != None or new_persons != None):
su.pout(u'Updating IPTC for %s because of\n%s' % (export_file, u'\n'.join(messages)))
if (file_updated or imageutils.should_update(options)) and not options.dryrun:
exiftool.update_iptcdata(export_file, new_caption, new_keywords,
new_date, new_rating, new_gps,
new_rectangles, new_persons, iptc_data.image_width,
iptc_data.image_height, hierarchical_subject=[])
return True
return False
def is_part_of(self, file_name):
"""Checks if <file> is part of this image."""
return self.export_file == file_name
_YEAR_PATTERN_INDEX = re.compile(r'([0-9][0-9][0-9][0-9]) (.*)')
class ExportDirectory(object):
"""Tracks an album folder in the export location."""
def __init__(self, name, iphoto_container, albumdirectory):
self.name = name
self.iphoto_container = iphoto_container
self.albumdirectory = albumdirectory
self.files = {} # lower case file names -> ExportFile
def add_iphoto_images(self, images, options):
"""Works through an image folder tree, and builds data for exporting."""
entries = 0
template = options.nametemplate
if images is not None:
entry_digits = len(str(len(images)))
for image in images:
if image.ismovie() and not options.movies:
continue
entries += 1
image_basename = self.make_album_basename(
image,
entries,
str(entries).zfill(entry_digits),
template)
picture_file = ExportFile(image, self.iphoto_container, self.albumdirectory,
image_basename, options)
self.files[image_basename.lower()] = picture_file
return entries
def make_album_basename(self, photo, index, padded_index,
name_template):
"""creates unique file name."""
base_name = imageutils.format_photo_name(photo,
self.iphoto_container.name,
index,
padded_index,
name_template)
index = 0
while True:
album_basename = base_name
if index > 0:
album_basename += "_%d" % (index)
if self.files.get(album_basename.lower()) is None:
return album_basename
index += 1
return base_name
def load_album(self, options):
"""walks the album directory tree, and scans it for existing files."""
if not os.path.exists(self.albumdirectory):
su.pout("Creating folder " + self.albumdirectory)
if not options.dryrun:
os.makedirs(self.albumdirectory)
else:
return
file_list = os.listdir(self.albumdirectory)
if file_list is None:
return
for f in sorted(file_list):
# we won't touch some files
if imageutils.is_ignore(f):
continue
album_file = unicodedata.normalize("NFC",
os.path.join(self.albumdirectory,
f))
if os.path.isdir(album_file):
if (options.originals and
(f == "Originals" or (options.picasa and
f == ".picasaoriginals"))):
self.scan_originals(album_file, options)
continue
else:
delete_album_file(album_file, self.albumdirectory,
"Obsolete export directory", options)
continue
base_name = unicodedata.normalize("NFC",
su.getfilebasename(album_file))
master_file = self.files.get(base_name.lower())
# everything else must have a master, or will have to go
if master_file is None or not master_file.is_part_of(album_file):
delete_album_file(album_file, self.albumdirectory,
"Obsolete exported file", options)
def scan_originals(self, folder, options):
"""Scan a folder of Original images, and delete obsolete ones."""
file_list = os.listdir(folder)
if not file_list:
return
for f in file_list:
# We won't touch some files.
if imageutils.is_ignore(f):
continue
originalfile = unicodedata.normalize("NFC", os.path.join(folder, f))
if os.path.isdir(originalfile):
delete_album_file(originalfile, self.albumdirectory,
"Obsolete export Originals directory",
options)
continue
base_name = unicodedata.normalize("NFC",
su.getfilebasename(originalfile))
master_file = self.files.get(base_name.lower())
# everything else must have a master, or will have to go
if (not master_file or
originalfile != master_file.original_export_file or
master_file.photo.rotation_is_only_edit):
delete_album_file(originalfile, originalfile,
"Obsolete Original", options)
def generate_files(self, options):
"""Generates the files in the export location."""
if not os.path.exists(self.albumdirectory) and not options.dryrun:
os.makedirs(self.albumdirectory)
for f in sorted(self.files):
self.files[f].generate(options)
class IPhotoFace(iphotodata.IPhotoContainer):
"""A photo container based on a face."""
def __init__(self, face, images):
data = {}
data["KeyList"] = []
iphotodata.IPhotoContainer.__init__(self, data, "Face", False, images)
self.images = images
self.name = face
class ExportLibrary(object):
"""The root of the export tree."""
def __init__(self, albumdirectory):
self.albumdirectory = albumdirectory
self.named_folders = {}
self._abort = False
def abort(self):
"""Signals that a currently running export should be aborted as soon
as possible.
"""
self._abort = True
def _check_abort(self):
if self._abort:
print "Export cancelled."
return True
return False
def _find_unused_folder(self, folder):
"""Returns a folder name based on folder that isn't used yet"""
i = 0
while True:
if i > 0:
proposed = u'%s_(%d)' % (folder, i)
else:
proposed = folder
if self.named_folders.get(proposed) is None:
return proposed
i += 1
def process_albums(self, albums, album_types, folder_prefix, includes,
excludes, options, matched=False):
"""Walks trough an iPhoto album tree, and discovers albums
(directories)."""
include_pattern = re.compile(su.unicode_string(includes))
exclude_pattern = None
if excludes:
exclude_pattern = re.compile(su.unicode_string(excludes))
# Figure out the folder patterns (if any)
folderpatterns = []
if options.folderpatterns:
for pattern in su.unicode_string(options.folderpatterns).split(','):
(expression, folder) = pattern.split('/', 2)
folderpatterns.append((re.compile(expression), folder))
# first, do the sub-albums
for sub_album in albums:
if self._check_abort():
return
sub_name = sub_album.name.replace('/','-')
if not sub_name:
print "Found an album with no name: " + sub_album.albumid
sub_name = "xxx"
# check the album type
if sub_album.albumtype == "Folder" or sub_album.albums:
sub_matched = matched
if include_pattern.match(sub_name):
sub_matched = True
new_name = folder_prefix
if sub_album.albumtype == "Folder":
new_name += imageutils.make_foldername(sub_name) + "/"
self.process_albums(sub_album.albums, album_types, new_name,
includes, excludes, options, sub_matched)
continue
elif (sub_album.albumtype == "None" or
not sub_album.albumtype in album_types):
# print "Ignoring " + sub_album.name + " of type " + \
# sub_album.albumtype
continue
if not matched and not include_pattern.match(sub_name):
_logger.debug(u'Skipping "%s" because it does not match pattern.', sub_name)
continue
if exclude_pattern and exclude_pattern.match(sub_name):
_logger.debug(u'Skipping "%s" because it is excluded.', sub_name)
continue
_logger.debug(u'Loading "%s".', sub_name)
folder_hint = None
if sub_name.find('/') != -1:
(folder_hint, sub_name) = sub_name.split('/', 1)
if not folder_hint and options.folderhints:
folder_hint = sub_album.getfolderhint()
if not folder_hint and folderpatterns:
for (pattern, folder) in folderpatterns:
if pattern.match(sub_album.name):
if options.verbose:
su.pout("Using folder %s for album %s." % (folder, sub_album.name))
folder_hint = folder
break
prefix = folder_prefix
if folder_hint is not None:
prefix = prefix + imageutils.make_foldername(folder_hint) + "/"
formatted_name = imageutils.format_album_name(
sub_album, sub_name, options.foldertemplate)
sub_name = prefix + imageutils.make_foldername(formatted_name)
sub_name = self._find_unused_folder(sub_name)
# first, do the sub-albums
self.process_albums(sub_album.albums, album_types, folder_prefix,
includes, excludes, options, matched)
# now the album itself
picture_directory = ExportDirectory(
sub_name, sub_album,
os.path.join(self.albumdirectory, sub_name))
if picture_directory.add_iphoto_images(sub_album.images,
options) > 0:
self.named_folders[sub_name] = picture_directory
return len(self.named_folders)
def load_album(self, options):
"""Loads an existing album (export folder)."""
if not os.path.exists(self.albumdirectory) and not options.dryrun:
os.makedirs(self.albumdirectory)
album_directories = {}
for folder in sorted(self.named_folders.values()):
if self._check_abort():
return
album_directories[folder.albumdirectory] = True
folder.load_album(options)
self.check_directories(self.albumdirectory, "", album_directories,
options)
def check_directories(self, directory, rel_path, album_directories,
options):
"""Checks an export directory for obsolete files."""
if options.ignore:
exclude_pattern = re.compile(su.fsdec(options.ignore))
if exclude_pattern.match(os.path.split(directory)[1]):
return True
if not os.path.exists(directory):
return True
contains_albums = False
for f in su.os_listdir_unicode(directory):
if self._check_abort():
return
album_file = os.path.join(directory, f)
if os.path.isdir(album_file):
if f == "iPod Photo Cache":
su.pout("Skipping " + album_file)
continue
rel_path_file = os.path.join(rel_path, f)
if album_file in album_directories:
contains_albums = True
elif not self.check_directories(album_file, rel_path_file,
album_directories, options):
delete_album_file(album_file, directory,
"Obsolete directory", options)
else:
contains_albums = True
else:
# we won't touch some files
if imageutils.is_ignore(f):
continue
delete_album_file(album_file, directory, "Obsolete",
options)
return contains_albums
def generate_files(self, options):
"""Walks through the export tree and sync the files."""
if not os.path.exists(self.albumdirectory) and not options.dryrun:
os.makedirs(self.albumdirectory)
for ndir in sorted(self.named_folders):
if self._check_abort():
break
self.named_folders[ndir].generate_files(options)
def export_iphoto(library, data, excludes, options):
"""Main routine for exporting iPhoto images."""
print "Scanning iPhoto data for photos to export..."
if options.events:
library.process_albums(data.root_album.albums, ["Event"], u'',
options.events, excludes, options)
if options.albums:
# ignore: Selected Event Album, Special Roll, Special Month
library.process_albums(data.root_album.albums,
["Regular", "Published"], u'',
options.albums, excludes, options)
if options.smarts:
library.process_albums(data.root_album.albums,
["Smart", "Special Roll", "Special Month", "Flagged"], u'',
options.smarts, excludes, options)
if options.facealbums:
library.process_albums(data.getfacealbums(), ["Face"],
unicode(options.facealbum_prefix),
".", excludes, options)
print "Scanning existing files in export folder..."
library.load_album(options)
print "Exporting photos from iPhoto to export folder..."
library.generate_files(options)
USAGE = """usage: %prog [options]
Exports images and movies from an iPhoto library into a folder.
Launches as an application if no options are specified.
"""
def get_option_parser():
"""Gets an OptionParser for the Phoshare command line tool options."""
p = OptionParser(usage=USAGE)
p.add_option(
"-a", "--albums",
help="""Export matching regular albums. The argument
is a regular expression. Use -a . to export all regular albums.""")
p.add_option(
"--aperture",
help="""Treat library as Aperture library.""")
p.add_option(
'--captiontemplate', default='{description}',
help='Template for IPTC image captions. Default: "{description}".')
p.add_option(
'--checkalbumsize',
help='''If set, list any event or album containing more than the
specified number of images.''')
p.add_option(
"-d", "--delete", action="store_true",
help="Delete obsolete files that are no longer in your iPhoto library.")
p.add_option(
"--dryrun", action="store_true",
help="""Show what would have been done, but don't change or copy any
files.""")
p.add_option("-e", "--events",
help="""Export matching events. The argument is
a regular expression. Use -e . to export all events.""")
p.add_option("--export",
help="""Export images and movies to specified folder.
Any files found in this folder that are not part of the
export set will be deleted, and files that match will be
overwritten if the iPhoto version of the file is
different. d""")
p.add_option("--facealbums", action='store_true',
help="Create albums (folders) for faces")
p.add_option("--facealbum_prefix", default="",
help='Prefix for face folders (use with --facealbums)')
p.add_option("--face_keywords", action="store_true",
help="Copy face names into keywords.")
p.add_option("-f", "--faces", action="store_true",
help="Copy faces into metadata.")
p.add_option("--folderhints", dest="folderhints", action="store_true",
help="Scan event and album descriptions for folder hints.")
p.add_option("--folderpatterns",
help="""List of regular expressions and folder names, for
mapping events and albums to folers. Format is
<pattern1>/<folder1>,<pattern2>/<folder2>,...""")
p.add_option("--foldertemplate", default="{name}",
help="""Template for naming folders. Default: "{name}".""")
p.add_option("--gps", action="store_true",
help="Process GPS location information")
p.add_option('--ignore',
help="""Pattern for folders to ignore in the export folder (use
with --delete if you have extra folders folders that you
don't want iphoto_export to delete.""")
p.add_option("--iphoto",
help="""Path to iPhoto library, e.g.
"%s/Pictures/iPhoto Library".""",
default="~/Pictures/iPhoto Library")
p.add_option(
"-k", "--iptc", action="store_const", const=1, dest="iptc",
help="""Check the IPTC data of all new or updated files. Checks for
keywords and descriptions. Requires the program "exiftool" (see
http://www.sno.phy.queensu.ca/~phil/exiftool/).""")
p.add_option(
"-K", "--iptcall", action="store_const", const=2, dest="iptc",
help="""Check the IPTC data of all files. Checks for
keywords and descriptions. Requires the program "exiftool" (see
http://www.sno.phy.queensu.ca/~phil/exiftool/).""")
p.add_option("--iptc_masters",
action="store_true",
help="""Check and update IPTC data in the master files in the library.""")
p.add_option(
"-l", "--link", action="store_true",
help="""Use links instead of copying files. Use with care, as changes made
to the exported files might affect the image that is stored in the iPhoto
library.""")
p.add_option("--max_create", type='int', default=-1,
help='Maximum number of images to create.')
p.add_option("--max_delete", type='int', default=-1,
help='Maximum number of images to delete.')
p.add_option("--max_update", type='int', default=-1,
help='Maximum number of images to update.')
p.add_option(
"-n", "--nametemplate", default="{title}",
help="""Template for naming image files. Default: "{title}".""")
p.add_option("-o", "--originals", action="store_true",
help="Export original files into Originals.")
p.add_option("--picasa", action="store_true",
help="Store originals in .picasaoriginals")
p.add_option('--picasapassword',
help='PicasaWeb password (optional).')
p.add_option('--picasaweb',
help="""Export to PicasaWeb albums of specified user
(available in future version of Phoshare).""")
p.add_option("--pictures", action="store_false", dest="movies",
default=True,
help="Export pictures only (no movies).")
p.add_option("--ratings",
help="""Only export pictures with matching rating (comma separate list)""")
p.add_option("--reverse",
help="""Reverse sync mode - check if changes in the export folders need to
be sync'ed back to the library. Implies --dryrun.""")
p.add_option(
"--size", type='int', help="""Resize images so that neither width or
height exceeds this size. Converts all images to jpeg.""")
p.add_option(
"-s", "--smarts",
help="""Export matching smart albums. The argument
is a regular expression. Use -s . to export all smart albums.""")
p.add_option("-u", "--update", action="store_true",
help="Update existing files.")
p.add_option(
"-x", "--exclude",
help="""Don't export matching albums or events. The pattern is a
regular expression.""")
p.add_option('--verbose', action='store_true',
help='Print verbose messages.')
p.add_option('--version', action='store_true',
help='Print build version and exit.')
return p
def run_phoshare(cmd_args):
"""main routine for phoshare."""
parser = get_option_parser()
(options, args) = parser.parse_args(cmd_args)
if len(args) != 0:
parser.error("Found some unrecognized arguments on the command line.")
if options.version:
print '%s %s' % (phoshare.phoshare_version.PHOSHARE_VERSION,
phoshare.phoshare_version.PHOSHARE_BUILD)
return 1
if options.iptc > 0 and not exiftool.check_exif_tool():
print >> sys.stderr, ("Exiftool is needed for the --itpc or --iptcall" +
" options.")
return 1
if options.size and options.link:
parser.error("Cannot use --size and --link together.")
if not options.iphoto:
parser.error("Need to specify the iPhoto library with the --iphoto "
"option.")
if options.export or options.picasaweb or options.checkalbumsize:
if not (options.albums or options.events or options.smarts or
options.facealbums):
parser.error("Need to specify at least one event, album, or smart "
"album for exporting, using the -e, -a, or -s "
"options.")
else:
parser.error("No action specified. Use --export to export from your "
"iPhoto library.")
if options.picasaweb:
if options.picasapassword:
google_password = options.picasapassword
else:
google_password = getpass.getpass('Google password for %s: ' %
options.picasaweb)
if options.ratings:
options.ratings = [int(r) for r in options.ratings.split(",")]
if options.reverse:
if not options.dryrun:
su.pout(u"Turning on dryrun mode because of --reverse option.")
options.dryrun = True
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging.DEBUG if options.verbose else logging.INFO)
_logger.addHandler(logging_handler)
album_xml_file = iphotodata.get_album_xmlfile(
su.expand_home_folder(options.iphoto))
data = iphotodata.get_iphoto_data(album_xml_file, ratings=options.ratings,
verbose=options.verbose, aperture=options.aperture)
if options.originals and options.export:
data.load_aperture_originals()
options.aperture = data.aperture and not data.aperture_data
options.foldertemplate = unicode(options.foldertemplate)
options.nametemplate = unicode(options.nametemplate)
options.captiontemplate = unicode(options.captiontemplate)
if options.checkalbumsize:
data.checkalbumsizes(int(options.checkalbumsize))
if options.export:
album = ExportLibrary(su.expand_home_folder(options.export))
export_iphoto(album, data, options.exclude, options)
if options.picasaweb:
try:
import phoshare.picasaweb as picasaweb
albums = picasaweb.PicasaAlbums(options.picasaweb, google_password)
export_iphoto(albums, data, options.exclude, options)
except ImportError:
su.perr('Sorry, this version of Phoshare does not support uploading to PicasaWeb.')
def main():
run_phoshare(sys.argv[1:])
if __name__ == "__main__":
main()
| {
"content_hash": "05e8a3fbe94a47a7a704d70278d530e9",
"timestamp": "",
"source": "github",
"line_count": 990,
"max_line_length": 99,
"avg_line_length": 44.481818181818184,
"alnum_prop": 0.555237641074551,
"repo_name": "benwaa/iPhotoCloudSync",
"id": "2f4a6ada30d291ffd614f93e1dad084429690be2",
"size": "44059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/phoshare/phoshare_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35837"
},
{
"name": "Makefile",
"bytes": "63736"
},
{
"name": "Python",
"bytes": "214412"
},
{
"name": "Shell",
"bytes": "939"
}
],
"symlink_target": ""
} |
"""MetricWriter for Pytorch summary files.
Use this writer for the Pytorch-based code.
"""
from typing import Any, Mapping, Optional
from absl import logging
from clu.metric_writers import interface
from torch.utils import tensorboard
Array = interface.Array
Scalar = interface.Scalar
class TorchTensorboardWriter(interface.MetricWriter):
"""MetricWriter that writes Pytorch summary files."""
def __init__(self, logdir: str):
super().__init__()
self._writer = tensorboard.SummaryWriter(log_dir=logdir)
def write_summaries(
self, step: int,
values: Mapping[str, Array],
metadata: Optional[Mapping[str, Any]] = None):
logging.log_first_n(
logging.WARNING,
"TorchTensorboardWriter does not support writing raw summaries.", 1)
def write_scalars(self, step: int, scalars: Mapping[str, Scalar]):
for key, value in scalars.items():
self._writer.add_scalar(key, value, global_step=step)
def write_images(self, step: int, images: Mapping[str, Array]):
for key, value in images.items():
self._writer.add_image(key, value, global_step=step, dataformats="HWC")
def write_videos(self, step: int, videos: Mapping[str, Array]):
logging.log_first_n(
logging.WARNING,
"TorchTensorBoardWriter does not support writing videos.", 1)
def write_audios(
self, step: int, audios: Mapping[str, Array], *, sample_rate: int):
for key, value in audios.items():
self._writer.add_audio(
key, value, global_step=step, sample_rate=sample_rate)
def write_texts(self, step: int, texts: Mapping[str, str]):
for key, value in texts.items():
self._writer.text(key, value, global_step=step)
def write_histograms(self,
step: int,
arrays: Mapping[str, Array],
num_buckets: Optional[Mapping[str, int]] = None):
for tag, values in arrays.items():
bins = None if num_buckets is None else num_buckets.get(tag)
self._writer.add_histogram(
tag, values, global_step=step, bins="auto", max_bins=bins)
def write_hparams(self, hparams: Mapping[str, Any]):
self._writer.add_hparams(hparams, {})
def flush(self):
self._writer.flush()
def close(self):
self._writer.close()
| {
"content_hash": "c4b566181b70be44cb67058445bb54f0",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 77,
"avg_line_length": 31.410958904109588,
"alnum_prop": 0.6589620584387266,
"repo_name": "google/CommonLoopUtils",
"id": "74c02c501b351f7ba6212a07fdf68a5c4a2ea396",
"size": "2874",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clu/metric_writers/torch_tensorboard_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "573544"
},
{
"name": "Python",
"bytes": "277949"
},
{
"name": "Shell",
"bytes": "373"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
from collections import OrderedDict
from pants.build_graph.address import BuildFileAddress
from pants.engine.objects import Serializable
from pants.util.memo import memoized_property
from pants.util.objects import datatype
class MappingError(Exception):
"""Indicates an error mapping addressable objects."""
class UnaddressableObjectError(MappingError):
"""Indicates an un-addressable object was found at the top level."""
class DuplicateNameError(MappingError):
"""Indicates more than one top-level object was found with the same name."""
class AddressMap(datatype('AddressMap', ['path', 'objects_by_name'])):
"""Maps addressable Serializable objects from a byte source.
To construct an AddressMap, use `parse`.
:param path: The path to the byte source this address map's objects were pased from.
:param objects_by_name: A dict mapping from object name to the parsed 'thin' addressable object.
"""
@classmethod
def parse(cls, filepath, filecontent, parser):
"""Parses a source for addressable Serializable objects.
No matter the parser used, the parsed and mapped addressable objects are all 'thin'; ie: any
objects they point to in other namespaces or even in the same namespace but from a seperate
source are left as unresolved pointers.
:param string filepath: The path to the byte source containing serialized objects.
:param string filecontent: The content of byte source containing serialized objects to be parsed.
:param symbol_table: The symbol table cls to expose a symbol table dict.
:type symbol_table: Instance of :class:`pants.engine.parser.SymbolTable`.
:param parser: The parser cls to use.
:type parser: A :class:`pants.engine.parser.Parser`.
"""
try:
objects = parser.parse(filepath, filecontent)
except Exception as e:
raise MappingError('Failed to parse {}:\n{}'.format(filepath, e))
objects_by_name = {}
for obj in objects:
if not Serializable.is_serializable(obj):
raise UnaddressableObjectError('Parsed a non-serializable object: {!r}'.format(obj))
attributes = obj._asdict()
name = attributes.get('name')
if not name:
raise UnaddressableObjectError('Parsed a non-addressable object: {!r}'.format(obj))
if name in objects_by_name:
raise DuplicateNameError('An object already exists at {!r} with name {!r}: {!r}. Cannot '
'map {!r}'.format(filepath, name, objects_by_name[name], obj))
objects_by_name[name] = obj
return cls(filepath, OrderedDict(sorted(objects_by_name.items())))
class DifferingFamiliesError(MappingError):
"""Indicates an attempt was made to merge address maps from different families together."""
class AddressFamily(datatype('AddressFamily', ['namespace', 'objects_by_name'])):
"""Represents the family of addressed objects in a namespace.
To create an AddressFamily, use `create`.
An address family can be composed of the addressed objects from zero or more underlying address
sources. An "empty" AddressFamily is legal, and is the result when there are not build files in a
particular namespace.
:param namespace: The namespace path of this address family.
:param objects_by_name: A dict mapping from object name to the parsed 'thin' addressable object.
"""
@classmethod
def create(cls, spec_path, address_maps):
"""Creates an address family from the given set of address maps.
:param spec_path: The directory prefix shared by all address_maps.
:param address_maps: The family of maps that form this namespace.
:type address_maps: :class:`collections.Iterable` of :class:`AddressMap`
:returns: a new address family.
:rtype: :class:`AddressFamily`
:raises: :class:`MappingError` if the given address maps do not form a family.
"""
if spec_path == b'.':
spec_path = ''
for address_map in address_maps:
if not address_map.path.startswith(spec_path):
raise DifferingFamiliesError('Expected AddressMaps to share the same parent directory {}, '
'but received: {}'
.format(spec_path, address_map.path))
objects_by_name = {}
for address_map in address_maps:
current_path = address_map.path
for name, obj in address_map.objects_by_name.items():
previous = objects_by_name.get(name)
if previous:
previous_path, _ = previous
raise DuplicateNameError('An object with name {name!r} is already defined in '
'{previous_path!r}, will not overwrite with {obj!r} from '
'{current_path!r}.'
.format(name=name,
previous_path=previous_path,
obj=obj,
current_path=current_path))
objects_by_name[name] = (current_path, obj)
return AddressFamily(namespace=spec_path,
objects_by_name=OrderedDict((name, (path, obj)) for name, (path, obj)
in sorted(objects_by_name.items())))
@memoized_property
def addressables(self):
"""Return a mapping from BuildFileAddress to thin addressable objects in this namespace.
:rtype: dict from :class:`pants.build_graph.address.BuildFileAddress` to thin addressable
objects.
"""
return {
BuildFileAddress(rel_path=path, target_name=name): obj
for name, (path, obj) in self.objects_by_name.items()
}
def __eq__(self, other):
if not type(other) == type(self):
return NotImplemented
return self.namespace == other.namespace
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.namespace)
def __repr__(self):
return 'AddressFamily(namespace={!r}, objects_by_name={!r})'.format(
self.namespace, self.objects_by_name.keys())
class ResolveError(MappingError):
"""Indicates an error resolving targets."""
class AddressMapper(object):
"""Configuration to parse build files matching a filename pattern."""
def __init__(self,
parser,
build_patterns=None,
build_ignore_patterns=None,
exclude_target_regexps=None,
subproject_roots=None):
"""Create an AddressMapper.
Both the set of files that define a mappable BUILD files and the parser used to parse those
files can be customized. See the `pants.engine.parsers` module for example parsers.
:param parser: The BUILD file parser to use.
:type parser: An instance of :class:`pants.engine.parser.Parser`.
:param tuple build_patterns: A tuple of fnmatch-compatible patterns for identifying BUILD files
used to resolve addresses.
:param list build_ignore_patterns: A list of path ignore patterns used when searching for BUILD files.
:param list exclude_target_regexps: A list of regular expressions for excluding targets.
"""
self.parser = parser
self.build_patterns = tuple(build_patterns or [b'BUILD', b'BUILD.*'])
self.build_ignore_patterns = tuple(build_ignore_patterns or [])
self._exclude_target_regexps = exclude_target_regexps or []
self.exclude_patterns = [re.compile(pattern) for pattern in self._exclude_target_regexps]
self.subproject_roots = subproject_roots or []
def __eq__(self, other):
if self is other:
return True
if type(other) != type(self):
return NotImplemented
return (other.build_patterns == self.build_patterns and
other.parser == self.parser)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Compiled regexes are not hashable.
return hash(self.parser)
def __repr__(self):
return 'AddressMapper(parser={}, build_patterns={})'.format(self.parser, self.build_patterns)
def __str__(self):
return repr(self)
| {
"content_hash": "f73e9cd4f7d28f6709d1c50ffc00e5bc",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 106,
"avg_line_length": 40.19512195121951,
"alnum_prop": 0.6605582524271845,
"repo_name": "UnrememberMe/pants",
"id": "f5c406a3fcc3dc296c7b44504f561366e76cce89",
"size": "8387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/engine/mapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "48321"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5767085"
},
{
"name": "Rust",
"bytes": "427157"
},
{
"name": "Scala",
"bytes": "75938"
},
{
"name": "Shell",
"bytes": "75470"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import shutil
import tempfile
import unittest
from unittest import mock
from mobly import signals
from mobly.base_instrumentation_test import _InstrumentationBlock
from mobly.base_instrumentation_test import _InstrumentationKnownStatusKeys
from mobly.base_instrumentation_test import _InstrumentationStructurePrefixes
from tests.lib import mock_instrumentation_test
# A random prefix to test that prefixes are added properly.
MOCK_PREFIX = 'my_prefix'
# A mock name for the instrumentation test subclass.
MOCK_INSTRUMENTATION_TEST_CLASS_NAME = 'MockInstrumentationTest'
MOCK_EMPTY_INSTRUMENTATION_TEST = """\
INSTRUMENTATION_RESULT: stream=
Time: 0.001
OK (0 tests)
INSTRUMENTATION_CODE: -1
"""
class InstrumentationResult:
def __init__(self):
self.error = None
self.completed_and_passed = False
self.executed = []
self.skipped = []
class BaseInstrumentationTestTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def assert_parse_instrumentation_options(self, user_params,
expected_instrumentation_options):
mit = mock_instrumentation_test.MockInstrumentationTest(
self.tmp_dir, user_params)
instrumentation_options = mit.parse_instrumentation_options(mit.user_params)
self.assertEqual(instrumentation_options, expected_instrumentation_options)
def test_parse_instrumentation_options_with_no_user_params(self):
self.assert_parse_instrumentation_options({}, {})
def test_parse_instrumentation_options_with_no_instrumentation_params(self):
self.assert_parse_instrumentation_options(
{
'param1': 'val1',
'param2': 'val2',
},
{},
)
def test_parse_instrumentation_options_with_only_instrumentation_params(self):
self.assert_parse_instrumentation_options(
{
'instrumentation_option_key1': 'value1',
'instrumentation_option_key2': 'value2',
},
{
'key1': 'value1',
'key2': 'value2'
},
)
def test_parse_instrumentation_options_with_mixed_user_params(self):
self.assert_parse_instrumentation_options(
{
'param1': 'val1',
'param2': 'val2',
'instrumentation_option_key1': 'value1',
'instrumentation_option_key2': 'value2',
},
{
'key1': 'value1',
'key2': 'value2'
},
)
def run_instrumentation_test(self, instrumentation_output, prefix=None):
mit = mock_instrumentation_test.MockInstrumentationTest(self.tmp_dir)
result = InstrumentationResult()
try:
result.completed_and_passed = mit.run_mock_instrumentation_test(
instrumentation_output, prefix=prefix)
except signals.TestError as e:
result.error = e
result.executed = mit.results.executed
result.skipped = mit.results.skipped
return result
def assert_equal_test(self, actual_test, expected_test):
(expected_test_name, expected_signal) = expected_test
self.assertEqual(actual_test.test_class,
MOCK_INSTRUMENTATION_TEST_CLASS_NAME)
self.assertEqual(actual_test.test_name, expected_test_name)
self.assertIsInstance(actual_test.termination_signal.exception,
expected_signal)
def assert_run_instrumentation_test(self,
instrumentation_output,
expected_executed=[],
expected_skipped=[],
expected_completed_and_passed=False,
expected_has_error=False,
prefix=None,
expected_executed_times=[]):
result = self.run_instrumentation_test(bytes(instrumentation_output,
'utf-8'),
prefix=prefix)
if expected_has_error:
self.assertIsInstance(result.error, signals.TestError)
else:
self.assertIsNone(result.error)
self.assertEqual(result.completed_and_passed,
expected_completed_and_passed)
self.assertEqual(len(result.executed), len(expected_executed))
for actual_test, expected_test in zip(result.executed, expected_executed):
self.assert_equal_test(actual_test, expected_test)
self.assertEqual(len(result.skipped), len(expected_skipped))
for actual_test, expected_test in zip(result.skipped, expected_skipped):
self.assert_equal_test(actual_test, expected_test)
if expected_executed_times:
for actual_test, expected_time in zip(result.executed,
expected_executed_times):
(expected_begin_time, expected_end_time) = expected_time
self.assertEqual(actual_test.begin_time, expected_begin_time)
self.assertEqual(actual_test.end_time, expected_end_time)
def test_run_instrumentation_test_with_invalid_syntax(self):
instrumentation_output = """\
usage: am [subcommand] [options]
usage: am start [-D] [-N] [-W] [-P <FILE>] [--start-profiler <FILE>]
[--sampling INTERVAL] [-R COUNT] [-S]
am start: start an Activity. Options are:
-D: enable debugging
am startservice: start a Service. Options are:
--user <USER_ID> | current: Specify which user to run as; if not
specified then run as the current user.
am task lock: bring <TASK_ID> to the front and don't allow other tasks to run.
<INTENT> specifications include these flags and arguments:
[-a <ACTION>] [-d <DATA_URI>] [-t <MIME_TYPE>]
[-c <CATEGORY> [-c <CATEGORY>] ...]
Error: Bad component name: /
"""
self.assert_run_instrumentation_test(instrumentation_output,
expected_has_error=True)
def test_run_instrumentation_test_with_no_output(self):
instrumentation_output = """\
"""
self.assert_run_instrumentation_test(instrumentation_output,
expected_has_error=True)
def test_run_instrumentation_test_with_missing_test_package(self):
instrumentation_output = """\
android.util.AndroidException: INSTRUMENTATION_FAILED: com.my.package.test/com.my.package.test.runner.MyRunner
at com.android.commands.am.Am.runInstrument(Am.java:897)
at com.android.commands.am.Am.onRun(Am.java:405)
at com.android.internal.os.BaseCommand.run(BaseCommand.java:51)
at com.android.commands.am.Am.main(Am.java:124)
at com.android.internal.os.RuntimeInit.nativeFinishInit(Native Method)
at com.android.internal.os.RuntimeInit.main(RuntimeInit.java:262)
INSTRUMENTATION_STATUS: id=ActivityManagerService
INSTRUMENTATION_STATUS: Error=Unable to find instrumentation info for: ComponentInfo{com.my.package.test/com.my.package.test.runner.MyRunner}
INSTRUMENTATION_STATUS_CODE: -1"""
self.assert_run_instrumentation_test(instrumentation_output,
expected_has_error=True)
def test_run_instrumentation_test_with_missing_runner(self):
instrumentation_output = """\
android.util.AndroidException: INSTRUMENTATION_FAILED: com.my.package.test/com.my.package.test.runner
INSTRUMENTATION_STATUS: id=ActivityManagerService
INSTRUMENTATION_STATUS: Error=Unable to find instrumentation info for: ComponentInfo{com.my.package.test/com.my.package.test.runner}
INSTRUMENTATION_STATUS_CODE: -1
at com.android.commands.am.Am.runInstrument(Am.java:897)
at com.android.commands.am.Am.onRun(Am.java:405)
at com.android.internal.os.BaseCommand.run(BaseCommand.java:51)
at com.android.commands.am.Am.main(Am.java:124)
at com.android.internal.os.RuntimeInit.nativeFinishInit(Native Method)
at com.android.internal.os.RuntimeInit.main(RuntimeInit.java:262)"""
self.assert_run_instrumentation_test(instrumentation_output,
expected_has_error=True)
def test_run_instrumentation_test_with_no_tests(self):
instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
self.assert_run_instrumentation_test(instrumentation_output,
expected_completed_and_passed=True)
@mock.patch('logging.info')
def test_run_instrumentation_test_logs_correctly(self, mock_info_logger):
instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
self.assert_run_instrumentation_test(instrumentation_output,
expected_completed_and_passed=True)
for mock_call in mock_info_logger.mock_calls:
logged_format = mock_call[1][0]
self.assertIsInstance(logged_format, str)
@mock.patch('mobly.utils.get_current_epoch_time')
def test_run_instrumentation_test_with_passing_test(self, mock_get_time):
instrumentation_output = """\
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=.
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 0
INSTRUMENTATION_RESULT: stream=
Time: 0.214
OK (1 test)
INSTRUMENTATION_CODE: -1
"""
expected_executed = [
('com.my.package.test.BasicTest#basicTest', signals.TestPass),
]
mock_get_time.side_effect = [13, 51]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_completed_and_passed=True,
expected_executed_times=[(13, 51)])
def test_run_instrumentation_test_with_random_whitespace(self):
instrumentation_output = """\
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=.
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 0
INSTRUMENTATION_RESULT: stream=
Time: 0.214
OK (1 test)
INSTRUMENTATION_CODE: -1
"""
expected_executed = [
('com.my.package.test.BasicTest#basicTest', signals.TestPass),
]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_completed_and_passed=True)
def test_run_instrumentation_test_with_prefix_test(self):
instrumentation_output = """\
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=.
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 0
INSTRUMENTATION_RESULT: stream=
Time: 0.214
OK (1 test)
INSTRUMENTATION_CODE: -1
"""
expected_executed = [
('%s.com.my.package.test.BasicTest#basicTest' % MOCK_PREFIX,
signals.TestPass),
]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_completed_and_passed=True,
prefix=MOCK_PREFIX)
def test_run_instrumentation_test_with_failing_test(self):
instrumentation_output = """\
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=failingTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stack=java.lang.UnsupportedOperationException: dummy failing test
at com.my.package.test.BasicTest.failingTest(BasicTest.java:38)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
INSTRUMENTATION_STATUS: stream=
Error in failingTest(com.my.package.test.BasicTest):
java.lang.UnsupportedOperationException: dummy failing test
at com.my.package.test.BasicTest.failingTest(BasicTest.java:38)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
INSTRUMENTATION_STATUS: test=failingTest
INSTRUMENTATION_STATUS_CODE: -2
INSTRUMENTATION_RESULT: stream=
Time: 1.92
There was 1 failure:
1) failingTest(com.my.package.test.BasicTest)
java.lang.UnsupportedOperationException: dummy failing test
at com.my.package.test.BasicTest.failingTest(BasicTest.java:38)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
FAILURES!!!
Tests run: 1, Failures: 1
INSTRUMENTATION_CODE: -1"""
expected_executed = [
('com.my.package.test.BasicTest#failingTest', signals.TestFailure),
]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed)
def test_run_instrumentation_test_with_assumption_failure_test(self):
instrumentation_output = """\
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=assumptionFailureTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stack=org.junit.AssumptionViolatedException: Assumption failure reason
at org.junit.Assume.assumeTrue(Assume.java:59)
at org.junit.Assume.assumeFalse(Assume.java:66)
at com.my.package.test.BasicTest.assumptionFailureTest(BasicTest.java:63)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.MyBaseTest$3.evaluate(MyBaseTest.java:96)
at com.my.package.test.MyBaseTest$4.evaluate(MyBaseTest.java:109)
at com.my.package.test.MyBaseTest$2.evaluate(MyBaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.runner.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.runner.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.BaseRunner.onStart(BaseRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=assumptionFailureTest
INSTRUMENTATION_STATUS_CODE: -4
INSTRUMENTATION_RESULT: stream=
Time: 3.139
OK (1 test)
INSTRUMENTATION_CODE: -1"""
expected_skipped = [
('com.my.package.test.BasicTest#assumptionFailureTest',
signals.TestSkip),
]
self.assert_run_instrumentation_test(instrumentation_output,
expected_skipped=expected_skipped,
expected_completed_and_passed=True)
def test_run_instrumentation_test_with_ignored_test(self):
instrumentation_output = """\
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=ignoredTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=ignoredTest
INSTRUMENTATION_STATUS_CODE: -3
INSTRUMENTATION_RESULT: stream=
Time: 0.007
OK (0 tests)
INSTRUMENTATION_CODE: -1"""
expected_skipped = [
('com.my.package.test.BasicTest#ignoredTest', signals.TestSkip),
]
self.assert_run_instrumentation_test(instrumentation_output,
expected_skipped=expected_skipped,
expected_completed_and_passed=True)
@mock.patch('mobly.utils.get_current_epoch_time')
def test_run_instrumentation_test_with_crashed_test(self, mock_get_time):
instrumentation_output = """\
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=crashTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_RESULT: shortMsg=Process crashed.
INSTRUMENTATION_CODE: 0"""
expected_executed = [
('com.my.package.test.BasicTest#crashTest', signals.TestError),
]
mock_get_time.side_effect = [67, 942]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_has_error=True,
expected_executed_times=[(67, 942)])
@mock.patch('mobly.utils.get_current_epoch_time')
def test_run_instrumentation_test_with_crashing_test(self, mock_get_time):
instrumentation_output = """\
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=2
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=crashAndRecover1Test
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=2
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=2
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=crashAndRecover2Test
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_RESULT: stream=
Time: 6.342
OK (2 tests)
INSTRUMENTATION_CODE: -1"""
expected_executed = [
('com.my.package.test.BasicTest#crashAndRecover1Test',
signals.TestError),
('com.my.package.test.BasicTest#crashAndRecover2Test',
signals.TestError),
]
mock_get_time.side_effect = [16, 412, 4143, 6547]
# TODO(winterfrosts): Fix this issue with overlapping timing
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_completed_and_passed=True,
expected_executed_times=[(16, 4143),
(412, 6547)])
def test_run_instrumentation_test_with_runner_setup_crash(self):
instrumentation_output = """\
INSTRUMENTATION_RESULT: shortMsg=Process crashed.
INSTRUMENTATION_CODE: 0"""
self.assert_run_instrumentation_test(instrumentation_output,
expected_has_error=True)
def test_run_instrumentation_test_with_runner_teardown_crash(self):
instrumentation_output = """\
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: numtests=1
INSTRUMENTATION_STATUS: stream=.
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: test=basicTest
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS_CODE: 0
INSTRUMENTATION_RESULT: shortMsg=Process crashed.
INSTRUMENTATION_CODE: 0
"""
expected_executed = [
('com.my.package.test.BasicTest#basicTest', signals.TestPass),
]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_has_error=True)
@mock.patch('mobly.utils.get_current_epoch_time')
def test_run_instrumentation_test_with_multiple_tests(self, mock_get_time):
instrumentation_output = """\
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stream=
com.my.package.test.BasicTest:
INSTRUMENTATION_STATUS: test=failingTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=1
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stack=java.lang.UnsupportedOperationException: dummy failing test
at com.my.package.test.BasicTest.failingTest(BasicTest.java:40)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
INSTRUMENTATION_STATUS: stream=
Error in failingTest(com.my.package.test.BasicTest):
java.lang.UnsupportedOperationException: dummy failing test
at com.my.package.test.BasicTest.failingTest(BasicTest.java:40)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
INSTRUMENTATION_STATUS: test=failingTest
INSTRUMENTATION_STATUS_CODE: -2
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=2
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stream=
INSTRUMENTATION_STATUS: test=assumptionFailureTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=2
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stack=org.junit.AssumptionViolatedException: Assumption failure reason
at org.junit.Assume.assumeTrue(Assume.java:59)
at com.my.package.test.BasicTest.assumptionFailureTest(BasicTest.java:61)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
INSTRUMENTATION_STATUS: stream=
INSTRUMENTATION_STATUS: test=assumptionFailureTest
INSTRUMENTATION_STATUS_CODE: -4
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=3
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stream=
INSTRUMENTATION_STATUS: test=ignoredTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=3
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stream=
INSTRUMENTATION_STATUS: test=ignoredTest
INSTRUMENTATION_STATUS_CODE: -3
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=4
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stream=
INSTRUMENTATION_STATUS: test=passingTest
INSTRUMENTATION_STATUS_CODE: 1
INSTRUMENTATION_STATUS: class=com.my.package.test.BasicTest
INSTRUMENTATION_STATUS: current=4
INSTRUMENTATION_STATUS: id=AndroidJUnitRunner
INSTRUMENTATION_STATUS: numtests=4
INSTRUMENTATION_STATUS: stream=.
INSTRUMENTATION_STATUS: test=passingTest
INSTRUMENTATION_STATUS_CODE: 0
INSTRUMENTATION_RESULT: stream=
Time: 4.131
There was 1 failure:
1) failingTest(com.my.package.test.BasicTest)
java.lang.UnsupportedOperationException: dummy failing test
at com.my.package.test.BasicTest.failingTest(BasicTest.java:40)
at java.lang.reflect.Method.invoke(Native Method)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:57)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:59)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at androidx.test.internal.runner.junit4.statement.RunBefores.evaluate(RunBefores.java:80)
at androidx.test.internal.runner.junit4.statement.RunAfters.evaluate(RunAfters.java:61)
at androidx.test.rule.ActivityTestRule$ActivityStatement.evaluate(ActivityTestRule.java:433)
at com.my.package.test.BaseTest$3.evaluate(BaseTest.java:96)
at com.my.package.test.BaseTest$4.evaluate(BaseTest.java:109)
at com.my.package.test.BaseTest$2.evaluate(BaseTest.java:77)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
at org.junit.rules.RunRules.evaluate(RunRules.java:20)
at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:81)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:327)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:84)
at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at androidx.test.runner.AndroidJUnit4.run(AndroidJUnit4.java:99)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runners.Suite.runChild(Suite.java:128)
at org.junit.runners.Suite.runChild(Suite.java:27)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:292)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:73)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:290)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:60)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:270)
at org.junit.runners.ParentRunner.run(ParentRunner.java:370)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at org.junit.runner.JUnitCore.run(JUnitCore.java:115)
at androidx.test.internal.runner.TestExecutor.execute(TestExecutor.java:56)
at com.my.package.test.BaseRunner.runTests(BaseRunner.java:344)
at com.my.package.test.BaseRunner.onStart(BaseRunner.java:330)
at com.my.package.test.runner.MyRunner.onStart(MyRunner.java:253)
at android.app.Instrumentation$InstrumentationThread.run(Instrumentation.java:2074)
FAILURES!!!
Tests run: 3, Failures: 1
INSTRUMENTATION_CODE: -1"""
expected_executed = [
('com.my.package.test.BasicTest#failingTest', signals.TestFailure),
('com.my.package.test.BasicTest#passingTest', signals.TestPass),
]
expected_skipped = [
('com.my.package.test.BasicTest#assumptionFailureTest',
signals.TestSkip),
('com.my.package.test.BasicTest#ignoredTest', signals.TestSkip),
]
mock_get_time.side_effect = [54, 64, -1, -1, -1, -1, 89, 94]
self.assert_run_instrumentation_test(instrumentation_output,
expected_executed=expected_executed,
expected_skipped=expected_skipped,
expected_executed_times=[(54, 64),
(89, 94)])
def test__Instrumentation_block_set_key_on_multiple_equals_sign(self):
value = "blah=blah, blah2=blah2, blah=2=1=2"
parsed_line = "INSTRUMENTATION_STATUS: stack=%s" % value
block = _InstrumentationBlock()
block.set_key(_InstrumentationStructurePrefixes.STATUS, parsed_line)
self.assertIn(value,
block.known_keys[_InstrumentationKnownStatusKeys.STACK])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1c7442e55db57306a2357cc5c4a70767",
"timestamp": "",
"source": "github",
"line_count": 1046,
"max_line_length": 141,
"avg_line_length": 49.68642447418738,
"alnum_prop": 0.7684522435157393,
"repo_name": "google/mobly",
"id": "c336b90d7b2a0bed1ffa2fd45ddd7ad069a267f4",
"size": "52548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/mobly/base_instrumentation_test_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1048454"
}
],
"symlink_target": ""
} |
"""
WSGI config for airquality project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "airquality.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "ed3af4168f554a3705176ec9f41671b5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.214285714285715,
"alnum_prop": 0.7772151898734178,
"repo_name": "clairityproject/backend",
"id": "1b9853314b309b9526c143044a1343c84000f9ba",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airquality/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31793"
},
{
"name": "HTML",
"bytes": "71696"
},
{
"name": "JavaScript",
"bytes": "235295"
},
{
"name": "Python",
"bytes": "128267"
}
],
"symlink_target": ""
} |
""":mod:`dodotable.helper` --- helper
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from .condition import SelectFilter
from .schema import Queryable, Renderable, Schema
from .util import camel_to_underscore
__all__ = '_Helper', 'Limit', 'Category',
class _Helper(Schema):
pass
class Limit(_Helper, Renderable, Queryable):
"""querystring ์ค์ ``limit`` ๋ฅผ ์กฐ์ํด์ 100๊ฐ๋ณด๊ธฐ ๊ฐ์ ๊ธฐ๋ฅ์
์ ๊ณตํฉ๋๋ค.
"""
def __init__(self, table, request_args, identifier=None):
self.table = table
self.request_args = request_args
if not identifier:
identifier = camel_to_underscore(table.cls.__name__)
self.arg_type_name = 'limit_{}'.format(identifier)
def __query__(self):
pass
def __html__(self):
return self.render('limit.html', filter=self)
class Category(_Helper, SelectFilter):
"""``select`` ํ๊ทธ๋ก ๋ ๋๋ง๋๋ ํํฐ๊ฐ ์๋
์นดํ
๊ณ ๋ฆฌ ํ์์ผ๋ก ๋ ๋๋ง๋๋ ํํฐ๋ฅผ ์ ๊ณตํฉ๋๋ค.
"""
def __html__(self):
return self.render('category.html', filter=self)
def monkey_patch_environment(environ):
modules = 'dodotable.schema', 'dodotable.condition', 'dodotable.helper'
e = environ()
for module_name in modules:
module = __import__(module_name, globals(), locals(), ['*'], -1)
for attr in dir(module):
try:
getattr(module, attr).environment = e
except AttributeError:
continue
| {
"content_hash": "9833f9d2674821541adafbb015ede216",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 25.089285714285715,
"alnum_prop": 0.5893238434163701,
"repo_name": "spoqa/dodotable",
"id": "750a3d5617164c3a7a801a62c71e911d6bc74f98",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dodotable/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7800"
},
{
"name": "Python",
"bytes": "44554"
},
{
"name": "Shell",
"bytes": "468"
}
],
"symlink_target": ""
} |
import os
import logging
import sys
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def auth_with_calendar_api(config):
if config.get('LOGFILE', None):
handler = logging.FileHandler(filename=config['LOGFILE'], mode='a')
else:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('%(asctime)s|[%(levelname)s] %(message)s'))
logger.addHandler(handler)
# this file stores your access and refresh tokens, and is
# created automatically when the auth flow succeeeds for
# the first time.
creds = None
if os.path.exists(config['CREDENTIAL_PATH']):
try:
logger.info('Loading cached credentials')
creds = Credentials.from_authorized_user_file(config['CREDENTIAL_PATH'], [config['SCOPES']])
except Exception as e:
logger.warning(f'Failed to load cached credentials, exception was "{str(e)}"')
os.unlink(config['CREDENTIAL_PATH'])
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
logger.info('Refreshing credentials')
creds.refresh(Request())
else:
logger.info('Credentials need manually approved!')
flow = InstalledAppFlow.from_client_secrets_file(config['CLIENT_SECRET_FILE'],
[config['SCOPES']])
creds = flow.run_local_server(port=0)
# save credentials if successful
with open(config['CREDENTIAL_PATH'], 'w') as token:
token.write(creds.to_json())
service = build('calendar', 'v3', credentials=creds)
return service
| {
"content_hash": "aa36a30e33c95df0b820216b5b6a84c0",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 104,
"avg_line_length": 38.734693877551024,
"alnum_prop": 0.6538461538461539,
"repo_name": "andrewramsay/ical_to_gcal_sync",
"id": "b15ddb11d104e90a02830c5bb4e59c2feb584303",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18889"
}
],
"symlink_target": ""
} |
from pythonwrap import logger as clog
from pythonwrap import log_level
TRACE = 0
DEBUG = 1
INFO = 2
WARNING = 3
ERROR = 4
CRITICAL = 5
class Logging(object):
def __init__(self, section):
self.section = section;
def log(self, level, fmt, *args):
if (level < log_level()):
return
msg = fmt
if args and len(args):
msg = fmt % args
clog(level, self.section, str(msg))
def trace(self, fmt, *args):
self.log(TRACE, fmt, *args)
def debug(self, fmt, *args):
self.log(DEBUG, fmt, *args)
def info(self, level, *args):
self.log(INFO, fmt, *args)
def warning(self, level, *args):
self.log(ERROR, fmt, *args)
def error(self, level, *args):
self.log(WARNING, fmt, *args)
def critical(self, level, *args):
self.log(CRITICAL, fmt, *args)
logger = Logging('python')
| {
"content_hash": "44fe5e929bac835d3c45d14523ece61b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 39,
"avg_line_length": 19.372093023255815,
"alnum_prop": 0.6218487394957983,
"repo_name": "oftc/oftc-ircd",
"id": "5962cabbfee48d410595ce19bf85d9f1ddffba7a",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "modules/python/ircd/logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12555"
},
{
"name": "C++",
"bytes": "229190"
},
{
"name": "Python",
"bytes": "27728"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import datetime
from functools import partial
from textwrap import dedent
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._libs.tslibs import Timedelta
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import (
Axis,
TimedeltaConvertibleTypes,
)
if TYPE_CHECKING:
from pandas import DataFrame, Series
from pandas.core.generic import NDFrame
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.core.dtypes.missing import isna
import pandas.core.common as common # noqa: PDF018
from pandas.core.indexers.objects import (
BaseIndexer,
ExponentialMovingWindowIndexer,
GroupbyIndexer,
)
from pandas.core.util.numba_ import maybe_use_numba
from pandas.core.window.common import zsqrt
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_header,
kwargs_compat,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
)
from pandas.core.window.numba_ import (
generate_ewma_numba_table_func,
generate_numba_ewma_func,
)
from pandas.core.window.online import (
EWMMeanState,
generate_online_numba_ewma_func,
)
from pandas.core.window.rolling import (
BaseWindow,
BaseWindowGroupby,
)
def get_center_of_mass(
comass: float | None,
span: float | None,
halflife: float | None,
alpha: float | None,
) -> float:
valid_count = common.count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _calculate_deltas(
times: str | np.ndarray | NDFrame | None,
halflife: float | TimedeltaConvertibleTypes | None,
) -> np.ndarray:
"""
Return the diff of the times divided by the half-life. These values are used in
the calculation of the ewm mean.
Parameters
----------
times : str, np.ndarray, Series, default None
Times corresponding to the observations. Must be monotonically increasing
and ``datetime64[ns]`` dtype.
halflife : float, str, timedelta, optional
Half-life specifying the decay
Returns
-------
np.ndarray
Diff of the times divided by the half-life
"""
# error: Item "str" of "Union[str, ndarray, NDFrameT, None]" has no
# attribute "view"
# error: Item "None" of "Union[str, ndarray, NDFrameT, None]" has no
# attribute "view"
_times = np.asarray(
times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]
)
_halflife = float(Timedelta(halflife).value)
return np.diff(_times) / _halflife
class ExponentialMovingWindow(BaseWindow):
r"""
Provide exponential weighted (EW) functions.
Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``.
Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be
provided.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`.
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`.
halflife : float, str, timedelta, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for
:math:`halflife > 0`.
If ``times`` is specified, the time unit (str or timedelta) over which an
observation decays to half its value. Only applicable to ``mean()``
and halflife value will not apply to the other functions.
.. versionadded:: 1.1.0
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`.
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
adjust : bool, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average).
- When ``adjust=True`` (default), the EW function is calculated using weights
:math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series
[:math:`x_0, x_1, ..., x_t`] would be:
.. math::
y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 -
\alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t}
- When ``adjust=False``, the exponentially weighted function is calculated
recursively:
.. math::
\begin{split}
y_0 &= x_0\\
y_t &= (1 - \alpha) y_{t-1} + \alpha x_t,
\end{split}
ignore_na : bool, default False
Ignore missing values when calculating weights; specify ``True`` to reproduce
pre-0.15.0 behavior.
- When ``ignore_na=False`` (default), weights are based on absolute positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in calculating
the final weighted average of [:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and
:math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``.
- When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`
used in calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if
``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``.
axis : {0, 1}, default 0
The axis to use. The value 0 identifies the rows, and 1
identifies the columns.
times : str, np.ndarray, Series, default None
.. versionadded:: 1.1.0
Times corresponding to the observations. Must be monotonically increasing and
``datetime64[ns]`` dtype.
If str, the name of the column in the DataFrame representing the times.
If 1-D array like, a sequence with the same shape as the observations.
Only applicable to ``mean()``.
method : str {'single', 'table'}, default 'single'
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
Only applicable to ``mean()``
.. versionadded:: 1.4.0
Returns
-------
DataFrame
A Window sub-classed for the particular operation.
See Also
--------
rolling : Provides rolling window calculations.
expanding : Provides expanding transformations.
Notes
-----
More details can be found at:
:ref:`Exponentially weighted windows <window.exponentially_weighted>`.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Specifying ``times`` with a timedelta ``halflife`` when computing mean.
>>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
>>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
B
0 0.000000
1 0.585786
2 1.523889
3 1.523889
4 3.233686
"""
_attributes = [
"com",
"span",
"halflife",
"alpha",
"min_periods",
"adjust",
"ignore_na",
"axis",
"times",
"method",
]
def __init__(
self,
obj: NDFrame,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | NDFrame | None = None,
method: str = "single",
*,
selection=None,
):
super().__init__(
obj=obj,
min_periods=1 if min_periods is None else max(int(min_periods), 1),
on=None,
center=False,
closed=None,
method=method,
axis=axis,
selection=selection,
)
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.adjust = adjust
self.ignore_na = ignore_na
self.times = times
if self.times is not None:
if not self.adjust:
raise NotImplementedError("times is not supported with adjust=False.")
if isinstance(self.times, str):
warnings.warn(
(
"Specifying times as a string column label is deprecated "
"and will be removed in a future version. Pass the column "
"into times instead."
),
FutureWarning,
stacklevel=find_stack_level(),
)
self.times = self._selected_obj[self.times]
if not is_datetime64_ns_dtype(self.times):
raise ValueError("times must be datetime64[ns] dtype.")
# error: Argument 1 to "len" has incompatible type "Union[str, ndarray,
# NDFrameT, None]"; expected "Sized"
if len(self.times) != len(obj): # type: ignore[arg-type]
raise ValueError("times must be the same length as the object.")
if not isinstance(self.halflife, (str, datetime.timedelta)):
raise ValueError(
"halflife must be a string or datetime.timedelta object"
)
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
self._deltas = _calculate_deltas(self.times, self.halflife)
# Halflife is no longer applicable when calculating COM
# But allow COM to still be calculated if the user passes other decay args
if common.count_not_none(self.com, self.span, self.alpha) > 0:
self._com = get_center_of_mass(self.com, self.span, None, self.alpha)
else:
self._com = 1.0
else:
if self.halflife is not None and isinstance(
self.halflife, (str, datetime.timedelta)
):
raise ValueError(
"halflife can only be a timedelta convertible argument if "
"times is not None."
)
# Without times, points are equally spaced
self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64)
self._com = get_center_of_mass(
# error: Argument 3 to "get_center_of_mass" has incompatible type
# "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]";
# expected "Optional[float]"
self.com,
self.span,
self.halflife, # type: ignore[arg-type]
self.alpha,
)
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
return ExponentialMovingWindowIndexer()
def online(self, engine="numba", engine_kwargs=None):
"""
Return an ``OnlineExponentialMovingWindow`` object to calculate
exponentially moving window aggregations in an online method.
.. versionadded:: 1.3.0
Parameters
----------
engine: str, default ``'numba'``
Execution engine to calculate online aggregations.
Applies to all supported aggregation methods.
engine_kwargs : dict, default None
Applies to all supported aggregation methods.
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
Returns
-------
OnlineExponentialMovingWindow
"""
return OnlineExponentialMovingWindow(
obj=self.obj,
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
min_periods=self.min_periods,
adjust=self.adjust,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
engine=engine,
engine_kwargs=engine_kwargs,
selection=self._selection,
)
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
),
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes.replace("\n", "", 1),
window_method="ewm",
aggregation_description="(exponential weighted moment) mean",
agg_method="mean",
)
def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
if self.method == "single":
ewma_func = generate_numba_ewma_func(
engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
)
numba_cache_key = (lambda x: x, "ewma")
else:
ewma_func = generate_ewma_numba_table_func(
engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
)
numba_cache_key = (lambda x: x, "ewma_table")
return self._apply(
ewma_func,
numba_cache_key=numba_cache_key,
)
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
nv.validate_window_func("mean", args, kwargs)
deltas = None if self.times is None else self._deltas
window_func = partial(
window_aggregations.ewma,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=deltas,
)
return self._apply(window_func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) standard deviation",
agg_method="std",
)
def std(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
return zsqrt(self.var(bias=bias, **kwargs))
def vol(self, bias: bool = False, *args, **kwargs):
warnings.warn(
(
"vol is deprecated will be removed in a future version. "
"Use std instead."
),
FutureWarning,
stacklevel=2,
)
return self.std(bias, *args, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) variance",
agg_method="var",
)
def var(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = window_aggregations.ewmcov
wfunc = partial(
window_func,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
bias=bias,
)
def var_func(values, begin, end, min_periods):
return wfunc(values, begin, end, min_periods, values)
return self._apply(var_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame , optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample covariance",
agg_method="cov",
)
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
result = window_aggregations.ewmcov(
x_array,
start,
end,
# error: Argument 4 to "ewmcov" has incompatible type
# "Optional[int]"; expected "int"
self.min_periods, # type: ignore[arg-type]
y_array,
self._com,
self.adjust,
self.ignore_na,
bias,
)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample correlation",
agg_method="corr",
)
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
def _cov(X, Y):
return window_aggregations.ewmcov(
X,
start,
end,
min_periods,
Y,
self._com,
self.adjust,
self.ignore_na,
True,
)
with np.errstate(all="ignore"):
cov = _cov(x_array, y_array)
x_var = _cov(x_array, x_array)
y_var = _cov(y_array, y_array)
result = cov / zsqrt(x_var * y_var)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):
"""
Provide an exponential moving window groupby implementation.
"""
_attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes
def __init__(self, obj, *args, _grouper=None, **kwargs):
super().__init__(obj, *args, _grouper=_grouper, **kwargs)
if not obj.empty and self.times is not None:
# sort the times and recalculate the deltas according to the groups
groupby_order = np.concatenate(list(self._grouper.indices.values()))
self._deltas = _calculate_deltas(
self.times.take(groupby_order), # type: ignore[union-attr]
self.halflife,
)
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
"""
window_indexer = GroupbyIndexer(
groupby_indicies=self._grouper.indices,
window_indexer=ExponentialMovingWindowIndexer,
)
return window_indexer
class OnlineExponentialMovingWindow(ExponentialMovingWindow):
def __init__(
self,
obj: NDFrame,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | NDFrame | None = None,
engine: str = "numba",
engine_kwargs: dict[str, bool] | None = None,
*,
selection=None,
):
if times is not None:
raise NotImplementedError(
"times is not implemented with online operations."
)
super().__init__(
obj=obj,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
selection=selection,
)
self._mean = EWMMeanState(
self._com, self.adjust, self.ignore_na, self.axis, obj.shape
)
if maybe_use_numba(engine):
self.engine = engine
self.engine_kwargs = engine_kwargs
else:
raise ValueError("'numba' is the only supported engine")
def reset(self):
"""
Reset the state captured by `update` calls.
"""
self._mean.reset()
def aggregate(self, func, *args, **kwargs):
return NotImplementedError
def std(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
**kwargs,
):
return NotImplementedError
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
return NotImplementedError
def var(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def mean(self, *args, update=None, update_times=None, **kwargs):
"""
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
"""
result_kwargs = {}
is_frame = True if self._selected_obj.ndim == 2 else False
if update_times is not None:
raise NotImplementedError("update_times is not implemented.")
else:
update_deltas = np.ones(
max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64
)
if update is not None:
if self._mean.last_ewm is None:
raise ValueError(
"Must call mean with update=None first before passing update"
)
result_from = 1
result_kwargs["index"] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs["columns"] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs["name"] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs["index"] = self._selected_obj.index
if is_frame:
result_kwargs["columns"] = self._selected_obj.columns
else:
result_kwargs["name"] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)
result = self._mean.run_ewm(
np_array if is_frame else np_array[:, np.newaxis],
update_deltas,
self.min_periods,
ewma_func,
)
if not is_frame:
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
| {
"content_hash": "6f5deb342a9562f798ccd158dbedeee8",
"timestamp": "",
"source": "github",
"line_count": 905,
"max_line_length": 88,
"avg_line_length": 33.92707182320442,
"alnum_prop": 0.5495375195414278,
"repo_name": "rs2/pandas",
"id": "79102c2bc82ee8de6791feaff1cb7d769cd3d47a",
"size": "30704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/window/ewm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360253"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1081551"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17393243"
},
{
"name": "Shell",
"bytes": "10872"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import io
import unittest
from unittest.mock import patch
from kattis import k_pseudoprime
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('3 2')
inputs.append('10 3')
inputs.append('341 2')
inputs.append('341 3')
inputs.append('1105 2')
inputs.append('1105 3')
inputs.append('0 0')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('no')
outputs.append('no')
outputs.append('yes')
outputs.append('no')
outputs.append('yes')
outputs.append('yes')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_pseudoprime.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0eed29356216c59990333dae1155f48f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 31.48780487804878,
"alnum_prop": 0.5042602633617351,
"repo_name": "ivanlyon/exercises",
"id": "806f743f8df9bf3f31037d491da8da7abb7ef3a3",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_k_pseudoprime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1283"
},
{
"name": "HTML",
"bytes": "9068"
},
{
"name": "Python",
"bytes": "96419"
}
],
"symlink_target": ""
} |
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "drftest.settings")
# Add the lib/ directory to the system path
sys.path.append("lib")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "d4f914c54c3f8e367f963be2056875f7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 29.7,
"alnum_prop": 0.6902356902356902,
"repo_name": "jinto/drftest",
"id": "ee12edaa9590983bb5883fef5102cfbb44272228",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11300"
},
{
"name": "HTML",
"bytes": "16321"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Nginx",
"bytes": "4758"
},
{
"name": "Python",
"bytes": "18653"
},
{
"name": "Shell",
"bytes": "2253"
}
],
"symlink_target": ""
} |
import sys, argparse, re
connection_time_regex = re.compile(r"^DATM\t.*sender\t(\d+)\tconnection_time_(\d+)_(\d+)\t\d+\t\d+\t(\d+)")
bytes_sent_regex = re.compile(r"^DATM\t.*sender\t(\d+)\ttotal_bytes_sent_(\d+)_(\d+)\t(\d+)")
def build_nested_dict(data_dict, sender, flow):
if sender not in data_dict:
data_dict[sender] = {}
if flow not in data_dict[sender]:
data_dict[sender][flow] = {"sec": None, "MB": None}
return data_dict
def get_peer_data(log_file):
flow_data = {}
peer_flow_data = {}
with open(log_file, "r") as fp:
for line in fp:
line = line.strip()
if connection_time_regex.match(line) is not None:
matched_groups = connection_time_regex.match(line)
sender = int(matched_groups.group(1))
if sender not in peer_flow_data:
peer_flow_data[sender] = {}
flow = int(matched_groups.group(2))
peer = int(matched_groups.group(3))
if peer not in peer_flow_data[sender]:
peer_flow_data[sender][peer] = {}
connection_time = int(matched_groups.group(4))
# Convert to seconds
connection_time /= 1000000.0
if flow not in peer_flow_data[sender][peer]:
peer_flow_data[sender][peer][flow] = {}
peer_flow_data[sender][peer][flow]["sec"] = connection_time
elif bytes_sent_regex.match(line) is not None:
matched_groups = bytes_sent_regex.match(line)
sender = int(matched_groups.group(1))
if sender not in peer_flow_data:
peer_flow_data[sender] = {}
flow = int(matched_groups.group(2))
peer = int(matched_groups.group(3))
if peer not in peer_flow_data[sender]:
peer_flow_data[sender][peer] = {}
bytes_sent = int(matched_groups.group(4))
# Convert to MB
MB_sent = bytes_sent / 1000000.0
if flow not in peer_flow_data[sender][peer]:
peer_flow_data[sender][peer][flow] = {}
peer_flow_data[sender][peer][flow]["MB"] = MB_sent
# Compute peer throughput totals
peer_total_bytes = {}
peer_max_times = {}
for sender in peer_flow_data:
for peer in peer_flow_data[sender]:
if peer not in peer_total_bytes:
peer_total_bytes[peer] = 0.0
peer_max_times[peer] = 0.0
for flow in peer_flow_data[sender][peer]:
peer_total_bytes[peer] += \
peer_flow_data[sender][peer][flow]["MB"]
peer_max_times[peer] = \
max(peer_max_times[peer],
peer_flow_data[sender][peer][flow]["sec"])
return (peer_flow_data, peer_max_times, peer_total_bytes)
def network_flow_throughput(log_file):
(peer_flow_data, peer_max_times, peer_total_bytes) = get_peer_data(log_file)
# Display data.
for sender in peer_flow_data:
print "Sender %d:" % sender
for peer in peer_flow_data[sender]:
print " Peer %d:" % peer
for flow in peer_flow_data[sender][peer]:
data = peer_flow_data[sender][peer][flow]
throughput = data["MB"] / data["sec"]
print " Flow %d: %.2f MB/s (%.2f MB over %.2f seconds)" % (
flow, throughput, data["MB"], data["sec"])
print ""
print "Total peer throughputs:"
for peer in peer_total_bytes:
peer_throughput = peer_total_bytes[peer] / peer_max_times[peer]
print " Peer %d: %.2f MB/s (%.2f MB over %.2f seconds)" % (
peer, peer_throughput, peer_total_bytes[peer], peer_max_times[peer])
return 0
def main():
parser = argparse.ArgumentParser(
description="Analyze network flow throughputs")
parser.add_argument("log_file", help="a log stat file to analyze")
args = parser.parse_args()
return network_flow_throughput(**vars(args))
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "0c0435e0794811cf6cc23d667a97c425",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 107,
"avg_line_length": 36.14655172413793,
"alnum_prop": 0.5401860243262581,
"repo_name": "TritonNetworking/themis_tritonsort",
"id": "43169417026304c6ab79617ebd0f7bcce464c72f",
"size": "4216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/scripts/themis/metaprograms/network_flow_throughput/network_flow_throughput.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "84"
},
{
"name": "C",
"bytes": "186435"
},
{
"name": "C++",
"bytes": "1759436"
},
{
"name": "CMake",
"bytes": "19466"
},
{
"name": "HTML",
"bytes": "1125"
},
{
"name": "JavaScript",
"bytes": "33834"
},
{
"name": "Makefile",
"bytes": "1036"
},
{
"name": "Python",
"bytes": "504453"
},
{
"name": "Ruby",
"bytes": "3031"
},
{
"name": "Shell",
"bytes": "39208"
}
],
"symlink_target": ""
} |
import os
import ast
import logging
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
class ImproperlyConfigured(Exception):
pass
def envar(key, ktype, default=None, verbose=False):
"""Looks for ENVIRONMENT VARIABLE named `key`;
returns the value after coercing it to type `ktype`.
Raises `ImproperlyConfigured` exception if environment
does not have the variable name defined and `default`
is set to None.
If `verbose` is true, values loaded from the environment
are echoed. This is useful to check which of the defaults
are being overridden.
Examples:
envar('HELLO', str) # Export HELLO="WORLD"
envar('COUNT', int) # Export COUNT=21
enavr('DEBUG', bool) # Export DEBUG="True"
"""
value = os.getenv(key)
# Hopeless situation
if value is None and default is None:
raise ImproperlyConfigured('Cannot load environment variable: %s' % (key))
# Work with defaults, force through the typecast.
if value is None:
return convert_to_type(ktype, default)
# Not using defaults, value is being overridden
if verbose is True:
logging.warn("VARIABLE-OVERRIDE:{0}:{1} => {2}".format(key, default, value))
return convert_to_type(ktype, value)
def convert_to_type(ktype, value):
try:
evalue = ast.literal_eval(value)
if ktype == type(evalue):
return evalue
else:
raise ImproperlyConfigured('Value: {0} is not of type {1}'.format(evalue, ktype))
except (ValueError, SyntaxError):
try:
if ktype == bool:
try:
return {'true': True, 'false': False}[value.lower()]
except:
raise ImproperlyConfigured('Boolean variable types must have values: "True" or "False", got "%s"' % value)
return ktype(value)
except:
raise ImproperlyConfigured('Value: {0} is not of type {1}'.format(value, ktype))
| {
"content_hash": "9d9d78f8c5c9db6231e1690ebdc0faaa",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 126,
"avg_line_length": 32.74193548387097,
"alnum_prop": 0.6320197044334975,
"repo_name": "hiway/envar",
"id": "f86a86d2e1bda339ae28298a0437a1c282ead883",
"size": "2030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envar/envar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5245"
}
],
"symlink_target": ""
} |
"""
Functions for dealing with markup text
"""
import re
import six
from six import moves
from w3lib.util import str_to_unicode, unicode_to_str
from w3lib.url import safe_url_string
_ent_re = re.compile(r'&(#?(x?))([^&;\s]+);')
_tag_re = re.compile(r'<[a-zA-Z\/!].*?>', re.DOTALL)
_baseurl_re = re.compile(six.u(r'<base\s+href\s*=\s*[\"\']\s*([^\"\'\s]+)\s*[\"\']'), re.I)
_meta_refresh_re = re.compile(six.u(r'<meta[^>]*http-equiv[^>]*refresh[^>]*content\s*=\s*(?P<quote>["\'])(?P<int>(\d*\.)?\d+)\s*;\s*url=(?P<url>.*?)(?P=quote)'), re.DOTALL | re.IGNORECASE)
_cdata_re = re.compile(r'((?P<cdata_s><!\[CDATA\[)(?P<cdata_d>.*?)(?P<cdata_e>\]\]>))', re.DOTALL)
def remove_entities(text, keep=(), remove_illegal=True, encoding='utf-8'):
"""Remove entities from the given text by converting them to
corresponding unicode character.
'text' can be a unicode string or a byte string encoded in the given
`encoding` (which defaults to 'utf-8').
If 'keep' is passed (with a list of entity names) those entities will
be kept (they won't be removed).
It supports both numeric (&#nnnn; and &#hhhh;) and named ( >)
entities.
If remove_illegal is True, entities that can't be converted are removed.
If remove_illegal is False, entities that can't be converted are kept "as
is". For more information see the tests.
Always returns a unicode string (with the entities removed).
"""
def convert_entity(m):
entity_body = m.group(3)
if m.group(1):
try:
if m.group(2):
number = int(entity_body, 16)
else:
number = int(entity_body, 10)
# Numeric character references in the 80-9F range are typically
# interpreted by browsers as representing the characters mapped
# to bytes 80-9F in the Windows-1252 encoding. For more info
# see: http://en.wikipedia.org/wiki/Character_encodings_in_HTML
if 0x80 <= number <= 0x9f:
return six.int2byte(number).decode('cp1252')
except ValueError:
number = None
else:
if entity_body in keep:
return m.group(0)
else:
number = moves.html_entities.name2codepoint.get(entity_body)
if number is not None:
try:
return six.unichr(number)
except ValueError:
pass
return u'' if remove_illegal else m.group(0)
return _ent_re.sub(convert_entity, str_to_unicode(text, encoding))
def has_entities(text, encoding=None):
return bool(_ent_re.search(str_to_unicode(text, encoding)))
def replace_tags(text, token='', encoding=None):
"""Replace all markup tags found in the given text by the given token. By
default token is a null string so it just remove all tags.
'text' can be a unicode string or a regular string encoded as 'utf-8'
Always returns a unicode string.
"""
return _tag_re.sub(token, str_to_unicode(text, encoding))
_REMOVECOMMENTS_RE = re.compile(u'<!--.*?-->', re.DOTALL)
def remove_comments(text, encoding=None):
""" Remove HTML Comments. """
text = str_to_unicode(text, encoding)
return _REMOVECOMMENTS_RE.sub(u'', text)
def remove_tags(text, which_ones=(), keep=(), encoding=None):
""" Remove HTML Tags only.
which_ones and keep are both tuples, there are four cases:
which_ones, keep (1 - not empty, 0 - empty)
1, 0 - remove all tags in which_ones
0, 1 - remove all tags except the ones in keep
0, 0 - remove all tags
1, 1 - not allowd
"""
assert not (which_ones and keep), 'which_ones and keep can not be given at the same time'
def will_remove(tag):
if which_ones:
return tag in which_ones
else:
return tag not in keep
def remove_tag(m):
tag = m.group(1)
return u'' if will_remove(tag) else m.group(0)
regex = '</?([^ >/]+).*?>'
retags = re.compile(regex, re.DOTALL | re.IGNORECASE)
return retags.sub(remove_tag, str_to_unicode(text, encoding))
def remove_tags_with_content(text, which_ones=(), encoding=None):
""" Remove tags and its content.
which_ones -- is a tuple of which tags with its content we want to remove.
if is empty do nothing.
"""
text = str_to_unicode(text, encoding)
if which_ones:
tags = '|'.join([r'<%s.*?</%s>|<%s\s*/>' % (tag, tag, tag) for tag in which_ones])
retags = re.compile(tags, re.DOTALL | re.IGNORECASE)
text = retags.sub(u'', text)
return text
def replace_escape_chars(text, which_ones=('\n', '\t', '\r'), replace_by=u'', \
encoding=None):
""" Remove escape chars. Default : \\n, \\t, \\r
which_ones -- is a tuple of which escape chars we want to remove.
By default removes \n, \t, \r.
replace_by -- text to replace the escape chars for.
It defaults to '', so the escape chars are removed.
"""
text = str_to_unicode(text, encoding)
for ec in which_ones:
text = text.replace(ec, str_to_unicode(replace_by, encoding))
return text
def unquote_markup(text, keep=(), remove_illegal=True, encoding=None):
"""
This function receives markup as a text (always a unicode string or a utf-8 encoded string) and does the following:
- removes entities (except the ones in 'keep') from any part of it that it's not inside a CDATA
- searches for CDATAs and extracts their text (if any) without modifying it.
- removes the found CDATAs
"""
def _get_fragments(txt, pattern):
offset = 0
for match in pattern.finditer(txt):
match_s, match_e = match.span(1)
yield txt[offset:match_s]
yield match
offset = match_e
yield txt[offset:]
text = str_to_unicode(text, encoding)
ret_text = u''
for fragment in _get_fragments(text, _cdata_re):
if isinstance(fragment, six.string_types):
# it's not a CDATA (so we try to remove its entities)
ret_text += remove_entities(fragment, keep=keep, remove_illegal=remove_illegal)
else:
# it's a CDATA (so we just extract its content)
ret_text += fragment.group('cdata_d')
return ret_text
def get_base_url(text, baseurl='', encoding='utf-8'):
"""Return the base url if declared in the given html text, relative to the
given base url. If no base url is found, the given base url is returned
"""
text = str_to_unicode(text, encoding)
baseurl = unicode_to_str(baseurl, encoding)
m = _baseurl_re.search(text)
if m:
baseurl = moves.urllib.parse.urljoin(baseurl, m.group(1).encode(encoding))
return safe_url_string(baseurl)
def get_meta_refresh(text, baseurl='', encoding='utf-8'):
"""Return the http-equiv parameter of the HTML meta element from the given
HTML text and return a tuple (interval, url) where interval is an integer
containing the delay in seconds (or zero if not present) and url is a
string with the absolute url to redirect.
If no meta redirect is found, (None, None) is returned.
"""
if six.PY2:
baseurl = unicode_to_str(baseurl, encoding)
try:
text = str_to_unicode(text, encoding)
except UnicodeDecodeError:
print(text)
raise
text = remove_comments(remove_entities(text))
m = _meta_refresh_re.search(text)
if m:
interval = float(m.group('int'))
url = safe_url_string(m.group('url').strip(' "\''), encoding)
url = moves.urllib.parse.urljoin(baseurl, url)
return interval, url
else:
return None, None
| {
"content_hash": "0dd28f002caa541ce262b215e20679e8",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 188,
"avg_line_length": 37.50239234449761,
"alnum_prop": 0.6074253636131666,
"repo_name": "nzavagli/UnrealPy",
"id": "f347b4be90c217d5d65b89be5d5c151fc9bd725f",
"size": "7838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/w3lib-1.5/w3lib/html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
"""Synthesize a federated dataset from MNIST-like datasets."""
import functools
from typing import Mapping, Optional, Tuple
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_federated as tff
from generalization.synthesization import dirichlet
from generalization.synthesization import gmm_embedding
def load_mnist_dataset_by_name(base_dataset_name: str, include_train: bool,
include_test: bool):
"""Load centralized dataset by name."""
if not include_train and not include_test:
raise ValueError('At least one of the `include_train` and'
'`include_test` must be True.')
if base_dataset_name in ['mnist', 'fashion_mnist']:
total_ds_dict = tfds.load(base_dataset_name)
if include_train and (not include_test):
ds = total_ds_dict['train']
elif include_test and (not include_train):
ds = total_ds_dict['test']
elif include_test and include_train:
ds = total_ds_dict['train'].concatenate(total_ds_dict['test'])
def emnist_consistency_preprocessor(
elem: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]:
"""Preprocess to keep consistency with the TFF official EMNIST dataset."""
return {'pixels': 1 - elem['image'] / 255, 'label': elem['label']}
return ds.map(emnist_consistency_preprocessor)
elif base_dataset_name in ('emnist10', 'emnist62'):
train_cd, val_cd = tff.simulation.datasets.emnist.load_data(
only_digits=True if base_dataset_name == 'emnist10' else False)
if include_train and (not include_test):
train_ds = train_cd.create_tf_dataset_from_all_clients()
return train_ds
elif include_test and (not include_train):
val_ds = val_cd.create_tf_dataset_from_all_clients()
return val_ds
elif include_train and include_test:
train_ds = train_cd.create_tf_dataset_from_all_clients()
val_ds = val_cd.create_tf_dataset_from_all_clients()
return train_ds.concatenate(val_ds)
else:
raise ValueError(f'Unknown base_dataset_name {base_dataset_name}.')
def _load_mnist_pretrained_model(efficient_net_b: int = 7) -> tf.keras.Model:
"""Load pretrained model for MNIST(s)."""
model_builder = getattr(tf.keras.applications.efficientnet,
f'EfficientNetB{efficient_net_b}')
base_model = model_builder(
include_top=False,
weights='imagenet',
input_shape=(32, 32, 3),
)
inputs = tf.keras.Input(shape=(28, 28))
x = tf.pad(
inputs, [[0, 0], [2, 2], [2, 2]], mode='CONSTANT',
constant_values=1) # (None, 32, 32)
x = tf.expand_dims(x, axis=3) # (None, 32, 32, 1)
x = tf.image.grayscale_to_rgb(x) * 255 # (None, 32, 32, 3)
x = base_model(x, training=False) # (None, 1, 1, 1280)
outputs = tf.keras.layers.Flatten()(x) # (None, 1280)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def synthesize_mnist_by_gmm_embedding(
base_dataset_name: str, num_clients: int, efficient_net_b: int,
pca_components: Optional[int], use_progressive_matching: bool,
kl_pairwise_batch_size: int, gmm_init_params: str, include_train: bool,
include_test: bool,
seed: Optional[int]) -> Tuple[tff.simulation.datasets.ClientData, str]:
"""Synthesize a federated dataset from a MNIST-like dataset via GMM over embeddding.
Args:
base_dataset_name: A str representing the name of the base MNIST-like
dataset, can be ['mnist', 'emnist10', 'emnist62', 'fashion_mnist'].
num_clients: An integer representing the number of clients to construct.
efficient_net_b: An integer ranging from 0--7 representing the size of the
EfficientNet pretrained model.
pca_components: An optional integer representing the number of PCA
components to be extracted from the embedding arrays for GMM. If None, the
full embedding array will be used for GMM.
use_progressive_matching: Whether to use progressive matching. If True, the
function will progressively match the clusters of one unmatched label with
a matched label by computing the optimal bipartite matching under pairwise
KL divergence. If False, the function will randomly match the clusters
across labels.
kl_pairwise_batch_size: An optional integer representing the batch size when
computing pairwise KL divergence. If None, the full cost matrix will be
computed in one batch. This could result in large memory cost.
gmm_init_params: A str representing the initialization mode of GMM, can be
either 'random' or 'kmeans'.
include_train: A boolean representing whether to include training split of
the original dataset.
include_test: A boolean representing whether to include test split of the
original dataset. At least one of the include_train and include_test
should be True.
seed: An optional integer representing the random seed for all random
procedures. If None, no random seed is used.
Returns:
A ClientData instance holding the resulting federated dataset, and a
str representing the name of the synthesized dataset.
"""
dataset = load_mnist_dataset_by_name(
base_dataset_name, include_train=include_train, include_test=include_test)
ds_name = base_dataset_name
if include_train and (not include_test):
ds_name = ds_name + '_train_only'
elif include_test and (not include_train):
ds_name = ds_name + '_test_only'
name = ','.join([
ds_name, 'gmm_embedding', f'clients={num_clients}',
f'model=b{efficient_net_b}', f'pca={pca_components}', 'matching=' +
('progressive_optimal' if use_progressive_matching else 'random'),
f'gmm_init={gmm_init_params}', f'seed={seed}'
])
cd = gmm_embedding.synthesize_by_gmm_over_pretrained_embedding(
dataset=dataset,
pretrained_model_builder=functools.partial(
_load_mnist_pretrained_model, efficient_net_b=efficient_net_b),
num_clients=num_clients,
pca_components=pca_components,
input_name='pixels',
label_name='label',
use_progressive_matching=use_progressive_matching,
kl_pairwise_batch_size=kl_pairwise_batch_size,
gmm_init_params=gmm_init_params,
seed=seed)
return cd, name
def synthesize_mnist_by_dirichlet_over_labels(
base_dataset_name: str,
num_clients: int,
concentration_factor: float,
use_rotate_draw: bool,
include_train: bool,
include_test: bool,
seed: Optional[int],
) -> Tuple[tff.simulation.datasets.ClientData, str]:
"""Synthesize a federated dataset from a MNIST-like dataset via dirichlet over labels.
Args:
base_dataset_name: A str representing the name of the base MNIST-like
dataset, can be ['mnist', 'emnist10', 'emnist62', 'fashion_mnist'].
num_clients: An integer representing the number of clients to construct.
concentration_factor: A float-typed parameter of Dirichlet distribution.
Each client will sample from Dirichlet(concentration_factor *
label_relative_popularity) to get a multinomial distribution over labels.
It controls the data heterogeneity of clients. If approaches 0, then each
client only have data from a single category label. If approaches
infinity, then the client distribution will approach overall popularity.
use_rotate_draw: Whether to rotate the drawing clients. If True, each client
will draw only one sample at once, and then rotate to the next random
client. This is intended to prevent the last clients from deviating from
its desired distribution. If False, a client will draw all the samples at
once before moving to the next client.
include_train: A boolean representing whether to include training split of
the original dataset.
include_test: A boolean representing whether to include test split of the
original dataset. At least one of the include_train and include_test
should be True.
seed: An optional integer representing the random seed for all random
procedures. If None, no random seed is used.
Returns:
A ClientData instance holding the resulting federated dataset, and a
str representing the name of the synthesized dataset.
"""
dataset = load_mnist_dataset_by_name(
base_dataset_name, include_train=include_train, include_test=include_test)
ds_name = base_dataset_name
if include_train and (not include_test):
ds_name = ds_name + '_train_only'
elif include_test and (not include_train):
ds_name = ds_name + '_test_only'
name = ','.join([
ds_name, 'dirichlet', f'clients={num_clients}',
f'concentration_factor={concentration_factor}',
f'rotate={use_rotate_draw}', f'seed={seed}'
])
cd = dirichlet.synthesize_by_dirichlet_over_labels(
dataset=dataset,
num_clients=num_clients,
concentration_factor=concentration_factor,
use_rotate_draw=use_rotate_draw,
seed=seed)
return cd, name
| {
"content_hash": "43c0d1b56f3dda498b4ac631ffb1ed6b",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 88,
"avg_line_length": 42.856459330143544,
"alnum_prop": 0.6976666294518253,
"repo_name": "google-research/federated",
"id": "af65a2b5b78a882700964123964b2219ae82cc38",
"size": "9534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generalization/synthesization/mnist_synthesis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76424"
},
{
"name": "Python",
"bytes": "4122952"
},
{
"name": "Shell",
"bytes": "7089"
},
{
"name": "Starlark",
"bytes": "97189"
}
],
"symlink_target": ""
} |
from autobahn.twisted import wamp
class IVRouterSession(wamp.RouterSession):
pass
| {
"content_hash": "55c48c8a6fa0b8dd3a031eac74ee01d4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 21.5,
"alnum_prop": 0.813953488372093,
"repo_name": "donghaoren/iVisDesigner",
"id": "bdacc34cb1a34633ddfad5a6146e793e953fc118",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/websocket/session.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47560"
},
{
"name": "HTML",
"bytes": "258278"
},
{
"name": "JavaScript",
"bytes": "1193537"
},
{
"name": "Python",
"bytes": "104636"
}
],
"symlink_target": ""
} |
""" Test the drop_from_u_dir functionality. """
import os
import sys
import time
import unittest
from binascii import hexlify
import hashlib
from rnglib import SimpleRNG
from nlhtree import NLHTree, NLHLeaf
from xlattice import HashTypes
from xlu import UDir, DirStruc
if sys.version_info < (3, 6):
# pylint: disable=unused-import
import sha3 # monkey-patches hashlib
assert sha3 # prevent flakes warning
class TestDropFromU(unittest.TestCase):
""" Test the drop_from_u_dir functionality. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def populate_tree(self, tree, data_path, u_dir, hashtype):
"""
Generate nnn and nnn unique random values, where nnn is at least 16.
"""
nnn = 16 + self.rng.next_int16(16)
# DEBUG
# print("nnn = %d" % nnn)
# EnnnD
values = []
hashes = []
for count in range(nnn):
# generate datum ------------------------------
datum = self.rng.some_bytes(32 + self.rng.next_int16(32))
values.append(datum)
# generate hash = bin_key ----------------------
if hashtype == HashTypes.SHA1:
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
sha = hashlib.sha3_256()
elif hashtype == HashTypes.BLAKE2B:
sha = hashlib.blake2b(digest_size=32)
else:
raise NotImplementedError
sha.update(datum)
bin_key = sha.digest()
hex_key = sha.hexdigest()
hashes.append(bin_key)
# write data file -----------------------------
file_name = 'value%04d' % count
path_to_file = os.path.join(data_path, file_name)
with open(path_to_file, 'wb') as file:
# DEBUG
# print("writing %s to %s" % (hex_key, path_to_file))
# END
file.write(datum)
# insert leaf into tree -----------------------
# path_from_top = os.path.join(top_name, file_name)
leaf = NLHLeaf(file_name, bin_key, hashtype)
tree.insert(leaf)
# DEBUG
# print(" inserting <%s %s>" % (leaf.name, leaf.hex_hash))
# END
# write data into uDir ------------------------
u_dir.put_data(datum, hex_key)
return values, hashes
def generate_udt(self, struc, hashtype):
"""
Generate under ./tmp a data directory with random content,
a uDir containing the same data, and an NLHTree that matches.
uDir has the directory structure (DIR_FLAT, DIR16x16, DIR256x256,
etc requested. Hashes are SHA1 if using SHA1 is True, SHA256
otherwise.
values is a list of binary values, each the content of a file
under dataDir. Each value contains a non-zero number of bytes.
hashes is a list of the SHA hashes of the values. Each hash
is a binary value. If using SHA1 it consists of 20 bytes.
return uPath, data_path, tree, hashes, values
"""
# make a unique U directory under ./tmp/
os.makedirs('tmp', mode=0o755, exist_ok=True)
u_root_name = self.rng.next_file_name(8)
u_path = os.path.join('tmp', u_root_name)
while os.path.exists(u_path):
u_root_name = self.rng.next_file_name(8)
u_path = os.path.join('tmp', u_root_name)
# DEBUG
# print("u_root_name = %s" % u_root_name)
# END
# create uDir and the NLHTree
u_dir = UDir(u_path, struc, hashtype)
self.assertTrue(os.path.exists(u_path))
# make a unique data directory under tmp/
data_tmp = self.rng.next_file_name(8)
tmp_path = os.path.join('tmp', data_tmp)
while os.path.exists(tmp_path):
data_tmp = self.rng.next_file_name(8)
tmp_path = os.path.join('tmp', data_tmp)
# dataDir must have same base name as NLHTree
top_name = self.rng.next_file_name(8)
data_path = os.path.join(tmp_path, top_name)
os.makedirs(data_path, mode=0o755)
# DEBUG
# print("data_tmp = %s" % data_tmp)
# print("top_name = %s" % top_name)
# print('data_path = %s' % data_path)
# END
tree = NLHTree(top_name, hashtype)
values, hashes = self.populate_tree(tree, data_path, u_dir, hashtype)
return u_path, data_path, tree, hashes, values
# ---------------------------------------------------------------
def do_test_with_ephemeral_tree(self, struc, hashtype):
"""
Generate a tmp/ subdirectory containing a quasi-random data
directory and corresponding uDir and NLHTree serialization.
We use the directory strucure (struc) and hash type (hashtype)
indicated, running various consistency tests on the three.
"""
u_path, data_path, tree, hashes, values = self.generate_udt(
struc, hashtype)
# DEBUG
# print("TREE:\n%s" % tree)
# END
# verify that the dataDir matches the nlhTree
tree2 = NLHTree.create_from_file_system(data_path, hashtype)
# DEBUG
# print("TREE2:\n%s" % tree2)
# END
self.assertEqual(tree2, tree)
nnn = len(values) # number of values present
hex_hashes = []
for count in range(nnn):
hex_hashes.append(hexlify(hashes[count]).decode('ascii'))
ndxes = [ndx for ndx in range(nnn)] # indexes into lists
self.rng.shuffle(ndxes) # shuffled
kkk = self.rng.next_int16(nnn) # we will drop this many indexes
# DEBUG
# print("dropping %d from %d elements" % (kkk, nnn))
# END
drop_me = ndxes[0:kkk] # indexes of values to drop
keep_me = ndxes[kkk:] # of those which should still be present
# construct an NLHTree containing values to be dropped from uDir
clone = tree.clone()
for count in keep_me:
name = 'value%04d' % count
clone.delete(name) # the parameter is a glob !
# these values should be absent from q: they won't be dropped from uDir
for count in keep_me:
name = 'value%04d' % count
xxx = clone.find(name)
self.assertEqual(len(xxx), 0)
# these values shd still be present in clone: they'll be dropped from
# UDir
for count in drop_me:
name = 'value%04d' % count
xxx = clone.find(name)
self.assertEqual(len(xxx), 1)
# the clone subtree contains those elements which will be dropped
# from uDir
unmatched = clone.drop_from_u_dir(u_path) # was unmatched
# DEBUG
# for x in unmatched: # (relPath, hash)
# print("unmatched: %s %s" % (x[0], x[1]))
# END
self.assertEqual(len(unmatched), 0)
u_dir = UDir(u_path, struc, hashtype)
self.assertTrue(os.path.exists(u_path))
# these values should still be present in uDir
for count in keep_me:
hex_hash = hex_hashes[count]
self.assertTrue(u_dir.exists(hex_hash))
# these values should NOT be present in UDir
for count in drop_me:
hex_hash = hex_hashes[count]
self.assertFalse(u_dir.exists(hex_hash))
def test_with_ephemeral_tree(self):
"""
Generate tmp/ subdirectories containing a quasi-random data
directory and corresponding uDir and NLHTree serialization,
using various directory structures and hash types.
"""
for struc in DirStruc:
for hashtype in HashTypes:
self.do_test_with_ephemeral_tree(struc, hashtype)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b1c75322dfeecef5d966e5b2067b4059",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 34.340425531914896,
"alnum_prop": 0.5548946716232962,
"repo_name": "jddixon/nlhtree_py",
"id": "3325725d74ad1761653e25183d4c8f505eab7ea1",
"size": "8124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_drop_from_u.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "95277"
},
{
"name": "Shell",
"bytes": "1859"
}
],
"symlink_target": ""
} |
"""
Created on Dec 24, 2016
@author: john papa
Copyright 2016 John Papa. All rights reserved.
This work is licensed under the MIT License.
"""
import logging
from .hand import Hand
from .bet import Bet
class BjHand(Hand):
""" This is a class for Hands used in the game of Blackjack.
Hands from this class should work for any standard game of blackjack or twenty-one.
"""
def __init__(self, bet):
""" Initialize the hand as an empty list of cards and a bet
Arguments:
bet - The amount to bet before recieving cards for the hand
"""
super().__init__()
self._bet = Bet(bet)
self._blackjack = None
self._logger = logging.getLogger('bj')
@property
def bet(self):
""" Returns the bet associated with this hand """
return self._bet
@property
def can_split(self):
""" Checks to see if a hand can be split into two hands """
splittable = False
if len(self) == 2:
if self.cards[0].value == self.cards[1].value:
splittable = True
return splittable
@property
def is_blackjack(self):
""" Checks to see if the hand is a Natural Blackjack """
if self._blackjack:
return self._blackjack
else:
if len(self) == 2 and self.value == 21:
self._blackjack = True
else:
self._blackjack = False
return self._blackjack
@property
def value(self):
""" Return the value of the hand """
value = sum(card.value for card in self.cards)
if value > 21:
for card in self.cards:
if card.rank == 'A' and card.value == 11:
card.set_ace_low()
value = sum(card.value for card in self.cards)
if value <= 21:
break
return value
| {
"content_hash": "27acdcb62c169e7dfdab7853f190185b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 87,
"avg_line_length": 27.92753623188406,
"alnum_prop": 0.5526725480020758,
"repo_name": "johnpapa2/twenty-one",
"id": "c458cf44c64dcd057c56532eec102c4207d33e11",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "players/bjhand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40303"
},
{
"name": "Shell",
"bytes": "827"
}
],
"symlink_target": ""
} |
import argparse
def common_parser(description='untitled'):
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-a',
'--auto',
help='auto-detect',
action='store_true'
)
parser.add_argument(
'-r',
'--region',
help='ec2 region'
)
return parser
import boto.utils
def common_args(args):
# Region setting:
# 1. Prefer command-line --region
# 2. Use instance metadata when --auto
# 3. Default to us-east-1
local_region = None
if args.auto:
identity = boto.utils.get_instance_identity(timeout=1, num_retries=5)
try:
local_region = identity['document']['region']
except KeyError:
pass
if not args.region:
args.region = local_region
else:
local_region = args.region
if not args.region:
args.region = 'us-east-1'
import signal
import sys
def cli_signal_handler(signal, frame):
sys.exit(1)
def catch_sigint():
signal.signal(signal.SIGINT, cli_signal_handler)
| {
"content_hash": "df71c04deba792a3b966fc2e039546c5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 24.387755102040817,
"alnum_prop": 0.5882845188284519,
"repo_name": "WrathOfChris/elb-rotate-certs",
"id": "2f645725990dbd2a5e84c999b64444326ae68931",
"size": "1195",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "elb_rotate_certs/util.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6819"
}
],
"symlink_target": ""
} |
from .checksum import Checksum
__all__ = ["Checksum"]
| {
"content_hash": "80cd2dd32a6328543dc722d5bf890a0f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.6727272727272727,
"repo_name": "blockbuster/mpxapi",
"id": "dcbc0d1e20ace76c6878493ddafdf03775a3a756",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpxapi/adapter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18273"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
} |
"""
IO utilities for the files associated with the BPA co-funded project:
TIP 304: Predicting the Hydrologic Response of the Columbia River System to
Climate Change
Import and export file formats as required by the project. Provide the data to
the rest of bmorph as pandas.Series.
"""
import pandas as pd
import xarray as xr
from ..version import version
def construct_file_name(file_info, file_template):
'''Construct a file name from a dictionary and template
Parameters
----------
file_info : dict
Dictionary with keys that match entries in `file_template`
file_template : str
Template for constructing path names for files
Returns
-------
str
Pathname for file
'''
return file_template.format(**file_info)
def get_metadata(infilename, comment='#'):
'''Get commented out header section from ascii file as metadata
Parameters
----------
infilename : str
Pathname for file
comment : str, optional
Comment indicator at the start of the line. Default value is '#'.
Returns
-------
metadata : str
The header section of the file as a single string (with line breaks)
'''
metadata = ''
with open(infilename, 'r') as f:
for line in f:
if line.startswith(comment):
if 'models_streamflow_bias_correction_name' in line:
continue
if 'models_streamflow_bias_correction_version' in line:
line = '{} models_streamflow_bias_correction_name : bmorph\n{} models_streamflow_bias_correction_version : {}\n'.format(comment, comment, version)
if 'short_name : ' in line:
line = '{} short_name : biascorrected_streamflow\n'.format(comment)
if 'long_name : ' in line:
line = '{} long_name : Bias-corrected streamflow at outlet grid cell\n'.format(comment)
metadata += line
else:
break
return metadata
def get_model_ts(infilename, na_values='-9999', comment='#',
rename_columns=None, column='streamflow'):
'''Retrieve modeled time series from ASCII file
Parameters
----------
infilename : str
Pathname for file
na_values : str, optional
Values that should be converted to `NA`. Default value is '-9999'
comment : str, optional
Comment indicator at the start of the line. Default value is '#'=
rename_columns: dict or None, optional
Dictionary to rename columns. Default value is None
column = str, optional
Name of the column that will be returned. Default value is 'streamflow'
Returns
-------
pandas.Series
Column from file as a pandas.Series
'''
ts = pd.read_csv(infilename, comment=comment, na_values=na_values,
index_col=0, parse_dates=True)
# renaming of columns may seem superfluous if we are converting to a Series
# anyway, but it allows all the Series to have the same name
if rename_columns:
ts.columns = [column]
return pd.Series(ts[column])
def get_nrni_ts_nc(site_index, nrni_file,
rename_columns={'Streamflow': 'streamflow'},
column='streamflow'):
'''Retrieve NRNI streamflow from NetCDF file by site index
Parameters
----------
site_index : str
Site index for NRNI site
nrni_file : str
Pathname for NRNI file
rename_columns: dict or None, optional
Dictionary to rename columns. Default value is
`{'Streamflow': 'streamflow'}`
column = str, optional
Name of the column that will be returned. Default value is 'streamflow'
Returns
-------
pandas.Series
Column from file as a pandas.Series
'''
nrni = xr.open_dataset(nrni_file)
nrni.coords['time'] = nrni.Time
nrni = nrni.Streamflow[nrni.IndexNames == site_index, :]
nrni = nrni[0, ].drop('index')
nrni = nrni.to_dataframe()
# renaming of columns may seem superfluous if we are converting to a Series
# anyway, but it allows all the Series to have the same name
if rename_columns:
nrni = nrni.rename(columns=rename_columns)
return pd.Series(nrni[column])
def get_nrni_ts_csv(site_index, nrni_file, date_column=1,
series_name='streamflow'):
'''Retrieve NRNI streamflow from ASCII file by site index'''
nrni = pd.read_csv(nrni_file, skiprows=list(range(1, 7)))
nrni.index = pd.date_range(
start=parse_csv_date(nrni.iloc[0, date_column]),
end=parse_csv_date(nrni.iloc[-1, date_column]))
for suffix in ['5N', '_QD', '_QN', '_QM']:
nrni.columns = nrni.columns.str.replace(suffix, '')
nrni = pd.Series(nrni[site_index], dtype='float32')
if series_name:
nrni.name = series_name
return nrni
def parse_csv_date(date_string):
'''Fix the time stamp for the 2-digit years in the NRNI file'''
(day, month, year) = date_string.split('-')
year = int(year)
if year < 10:
year += 2000
else:
year += 1900
return pd.to_datetime('{}-{}-{}'.format(day, month, year),
format="%d-%b-%Y")
def put_bmorph_ts(outfilename, ts, metadata=''):
'''Write bias-corrected output to file
Parameters
----------
outfilename : str
Pathname for output file
ts : pandas.Series
Series to be written to file
metadata : str or None, optional
Metadata that will be written at the start of the file. This is one
long string with line breaks. Default value is ''
Returns
-------
nothing
'''
buffer = metadata
buffer += ts.to_csv(na_rep='-9999')
with open(outfilename, 'w') as f:
f.write(buffer)
return
| {
"content_hash": "9abc5f62ccf5d205d55512837145e33c",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 166,
"avg_line_length": 32.57222222222222,
"alnum_prop": 0.6135084427767354,
"repo_name": "UW-Hydro/bmorph",
"id": "300a42076416eff1cdd0bde29faa2b5cfeb6e81e",
"size": "5863",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bmorph/io/tip304.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "41657"
},
{
"name": "Python",
"bytes": "312494"
}
],
"symlink_target": ""
} |
# match.py - filename matching
#
# Copyright 2008, 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import bisect
import copy
import itertools
import os
import re
from .i18n import _
from .pycompat import open
from . import (
encoding,
error,
pathutil,
policy,
pycompat,
util,
)
from .utils import stringutil
rustmod = policy.importrust('dirstate')
allpatternkinds = (
b're',
b'glob',
b'path',
b'relglob',
b'relpath',
b'relre',
b'rootglob',
b'listfile',
b'listfile0',
b'set',
b'include',
b'subinclude',
b'rootfilesin',
)
cwdrelativepatternkinds = (b'relpath', b'glob')
propertycache = util.propertycache
def _rematcher(regex):
"""compile the regexp with the best available regexp engine and return a
matcher function"""
m = util.re.compile(regex)
try:
# slightly faster, provided by facebook's re2 bindings
return m.test_match
except AttributeError:
return m.match
def _expandsets(cwd, kindpats, ctx=None, listsubrepos=False, badfn=None):
'''Returns the kindpats list with the 'set' patterns expanded to matchers'''
matchers = []
other = []
for kind, pat, source in kindpats:
if kind == b'set':
if ctx is None:
raise error.ProgrammingError(
b"fileset expression with no context"
)
matchers.append(ctx.matchfileset(cwd, pat, badfn=badfn))
if listsubrepos:
for subpath in ctx.substate:
sm = ctx.sub(subpath).matchfileset(cwd, pat, badfn=badfn)
pm = prefixdirmatcher(subpath, sm, badfn=badfn)
matchers.append(pm)
continue
other.append((kind, pat, source))
return matchers, other
def _expandsubinclude(kindpats, root):
"""Returns the list of subinclude matcher args and the kindpats without the
subincludes in it."""
relmatchers = []
other = []
for kind, pat, source in kindpats:
if kind == b'subinclude':
sourceroot = pathutil.dirname(util.normpath(source))
pat = util.pconvert(pat)
path = pathutil.join(sourceroot, pat)
newroot = pathutil.dirname(path)
matcherargs = (newroot, b'', [], [b'include:%s' % path])
prefix = pathutil.canonpath(root, root, newroot)
if prefix:
prefix += b'/'
relmatchers.append((prefix, matcherargs))
else:
other.append((kind, pat, source))
return relmatchers, other
def _kindpatsalwaysmatch(kindpats):
"""Checks whether the kindspats match everything, as e.g.
'relpath:.' does.
"""
for kind, pat, source in kindpats:
if pat != b'' or kind not in [b'relpath', b'glob']:
return False
return True
def _buildkindpatsmatcher(
matchercls,
root,
cwd,
kindpats,
ctx=None,
listsubrepos=False,
badfn=None,
):
matchers = []
fms, kindpats = _expandsets(
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=badfn,
)
if kindpats:
m = matchercls(root, kindpats, badfn=badfn)
matchers.append(m)
if fms:
matchers.extend(fms)
if not matchers:
return nevermatcher(badfn=badfn)
if len(matchers) == 1:
return matchers[0]
return unionmatcher(matchers)
def match(
root,
cwd,
patterns=None,
include=None,
exclude=None,
default=b'glob',
auditor=None,
ctx=None,
listsubrepos=False,
warn=None,
badfn=None,
icasefs=False,
):
r"""build an object to match a set of file patterns
arguments:
root - the canonical root of the tree you're matching against
cwd - the current working directory, if relevant
patterns - patterns to find
include - patterns to include (unless they are excluded)
exclude - patterns to exclude (even if they are included)
default - if a pattern in patterns has no explicit type, assume this one
auditor - optional path auditor
ctx - optional changecontext
listsubrepos - if True, recurse into subrepositories
warn - optional function used for printing warnings
badfn - optional bad() callback for this matcher instead of the default
icasefs - make a matcher for wdir on case insensitive filesystems, which
normalizes the given patterns to the case in the filesystem
a pattern is one of:
'glob:<glob>' - a glob relative to cwd
're:<regexp>' - a regular expression
'path:<path>' - a path relative to repository root, which is matched
recursively
'rootfilesin:<path>' - a path relative to repository root, which is
matched non-recursively (will not match subdirectories)
'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
'relpath:<path>' - a path relative to cwd
'relre:<regexp>' - a regexp that needn't match the start of a name
'set:<fileset>' - a fileset expression
'include:<path>' - a file of patterns to read and include
'subinclude:<path>' - a file of patterns to match against files under
the same directory
'<something>' - a pattern of the specified default type
>>> def _match(root, *args, **kwargs):
... return match(util.localpath(root), *args, **kwargs)
Usually a patternmatcher is returned:
>>> _match(b'/foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
<patternmatcher patterns='.*\\.c$|foo/a(?:/|$)|[^/]*\\.py$'>
Combining 'patterns' with 'include' (resp. 'exclude') gives an
intersectionmatcher (resp. a differencematcher):
>>> type(_match(b'/foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
<class 'mercurial.match.intersectionmatcher'>
>>> type(_match(b'/foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
<class 'mercurial.match.differencematcher'>
Notice that, if 'patterns' is empty, an alwaysmatcher is returned:
>>> _match(b'/foo', b'.', [])
<alwaysmatcher>
The 'default' argument determines which kind of pattern is assumed if a
pattern has no prefix:
>>> _match(b'/foo', b'.', [b'.*\.c$'], default=b're')
<patternmatcher patterns='.*\\.c$'>
>>> _match(b'/foo', b'.', [b'main.py'], default=b'relpath')
<patternmatcher patterns='main\\.py(?:/|$)'>
>>> _match(b'/foo', b'.', [b'main.py'], default=b're')
<patternmatcher patterns='main.py'>
The primary use of matchers is to check whether a value (usually a file
name) matches againset one of the patterns given at initialization. There
are two ways of doing this check.
>>> m = _match(b'/foo', b'', [b're:.*\.c$', b'relpath:a'])
1. Calling the matcher with a file name returns True if any pattern
matches that file name:
>>> m(b'a')
True
>>> m(b'main.c')
True
>>> m(b'test.py')
False
2. Using the exact() method only returns True if the file name matches one
of the exact patterns (i.e. not re: or glob: patterns):
>>> m.exact(b'a')
True
>>> m.exact(b'main.c')
False
"""
assert os.path.isabs(root)
cwd = os.path.join(root, util.localpath(cwd))
normalize = _donormalize
if icasefs:
dirstate = ctx.repo().dirstate
dsnormalize = dirstate.normalize
def normalize(patterns, default, root, cwd, auditor, warn):
kp = _donormalize(patterns, default, root, cwd, auditor, warn)
kindpats = []
for kind, pats, source in kp:
if kind not in (b're', b'relre'): # regex can't be normalized
p = pats
pats = dsnormalize(pats)
# Preserve the original to handle a case only rename.
if p != pats and p in dirstate:
kindpats.append((kind, p, source))
kindpats.append((kind, pats, source))
return kindpats
if patterns:
kindpats = normalize(patterns, default, root, cwd, auditor, warn)
if _kindpatsalwaysmatch(kindpats):
m = alwaysmatcher(badfn)
else:
m = _buildkindpatsmatcher(
patternmatcher,
root,
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=badfn,
)
else:
# It's a little strange that no patterns means to match everything.
# Consider changing this to match nothing (probably using nevermatcher).
m = alwaysmatcher(badfn)
if include:
kindpats = normalize(include, b'glob', root, cwd, auditor, warn)
im = _buildkindpatsmatcher(
includematcher,
root,
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=None,
)
m = intersectmatchers(m, im)
if exclude:
kindpats = normalize(exclude, b'glob', root, cwd, auditor, warn)
em = _buildkindpatsmatcher(
includematcher,
root,
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=None,
)
m = differencematcher(m, em)
return m
def exact(files, badfn=None):
return exactmatcher(files, badfn=badfn)
def always(badfn=None):
return alwaysmatcher(badfn)
def never(badfn=None):
return nevermatcher(badfn)
def badmatch(match, badfn):
"""Make a copy of the given matcher, replacing its bad method with the given
one.
"""
m = copy.copy(match)
m.bad = badfn
return m
def _donormalize(patterns, default, root, cwd, auditor=None, warn=None):
"""Convert 'kind:pat' from the patterns list to tuples with kind and
normalized and rooted patterns and with listfiles expanded."""
kindpats = []
for kind, pat in [_patsplit(p, default) for p in patterns]:
if kind in cwdrelativepatternkinds:
pat = pathutil.canonpath(root, cwd, pat, auditor=auditor)
elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'):
pat = util.normpath(pat)
elif kind in (b'listfile', b'listfile0'):
try:
files = util.readfile(pat)
if kind == b'listfile0':
files = files.split(b'\0')
else:
files = files.splitlines()
files = [f for f in files if f]
except EnvironmentError:
raise error.Abort(_(b"unable to read file list (%s)") % pat)
for k, p, source in _donormalize(
files, default, root, cwd, auditor, warn
):
kindpats.append((k, p, pat))
continue
elif kind == b'include':
try:
fullpath = os.path.join(root, util.localpath(pat))
includepats = readpatternfile(fullpath, warn)
for k, p, source in _donormalize(
includepats, default, root, cwd, auditor, warn
):
kindpats.append((k, p, source or pat))
except error.Abort as inst:
raise error.Abort(
b'%s: %s'
% (
pat,
inst.message,
) # pytype: disable=unsupported-operands
)
except IOError as inst:
if warn:
warn(
_(b"skipping unreadable pattern file '%s': %s\n")
% (pat, stringutil.forcebytestr(inst.strerror))
)
continue
# else: re or relre - which cannot be normalized
kindpats.append((kind, pat, b''))
return kindpats
class basematcher(object):
def __init__(self, badfn=None):
if badfn is not None:
self.bad = badfn
def __call__(self, fn):
return self.matchfn(fn)
# Callbacks related to how the matcher is used by dirstate.walk.
# Subscribers to these events must monkeypatch the matcher object.
def bad(self, f, msg):
"""Callback from dirstate.walk for each explicit file that can't be
found/accessed, with an error message."""
# If an traversedir is set, it will be called when a directory discovered
# by recursive traversal is visited.
traversedir = None
@propertycache
def _files(self):
return []
def files(self):
"""Explicitly listed files or patterns or roots:
if no patterns or .always(): empty list,
if exact: list exact files,
if not .anypats(): list all files and dirs,
else: optimal roots"""
return self._files
@propertycache
def _fileset(self):
return set(self._files)
def exact(self, f):
'''Returns True if f is in .files().'''
return f in self._fileset
def matchfn(self, f):
return False
def visitdir(self, dir):
"""Decides whether a directory should be visited based on whether it
has potential matches in it or one of its subdirectories. This is
based on the match's primary, included, and excluded patterns.
Returns the string 'all' if the given directory and all subdirectories
should be visited. Otherwise returns True or False indicating whether
the given directory should be visited.
"""
return True
def visitchildrenset(self, dir):
"""Decides whether a directory should be visited based on whether it
has potential matches in it or one of its subdirectories, and
potentially lists which subdirectories of that directory should be
visited. This is based on the match's primary, included, and excluded
patterns.
This function is very similar to 'visitdir', and the following mapping
can be applied:
visitdir | visitchildrenlist
----------+-------------------
False | set()
'all' | 'all'
True | 'this' OR non-empty set of subdirs -or files- to visit
Example:
Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
the following values (assuming the implementation of visitchildrenset
is capable of recognizing this; some implementations are not).
'' -> {'foo', 'qux'}
'baz' -> set()
'foo' -> {'bar'}
# Ideally this would be 'all', but since the prefix nature of matchers
# is applied to the entire matcher, we have to downgrade this to
# 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
# in.
'foo/bar' -> 'this'
'qux' -> 'this'
Important:
Most matchers do not know if they're representing files or
directories. They see ['path:dir/f'] and don't know whether 'f' is a
file or a directory, so visitchildrenset('dir') for most matchers will
return {'f'}, but if the matcher knows it's a file (like exactmatcher
does), it may return 'this'. Do not rely on the return being a set
indicating that there are no files in this dir to investigate (or
equivalently that if there are files to investigate in 'dir' that it
will always return 'this').
"""
return b'this'
def always(self):
"""Matcher will match everything and .files() will be empty --
optimization might be possible."""
return False
def isexact(self):
"""Matcher will match exactly the list of files in .files() --
optimization might be possible."""
return False
def prefix(self):
"""Matcher will match the paths in .files() recursively --
optimization might be possible."""
return False
def anypats(self):
"""None of .always(), .isexact(), and .prefix() is true --
optimizations will be difficult."""
return not self.always() and not self.isexact() and not self.prefix()
class alwaysmatcher(basematcher):
'''Matches everything.'''
def __init__(self, badfn=None):
super(alwaysmatcher, self).__init__(badfn)
def always(self):
return True
def matchfn(self, f):
return True
def visitdir(self, dir):
return b'all'
def visitchildrenset(self, dir):
return b'all'
def __repr__(self):
return r'<alwaysmatcher>'
class nevermatcher(basematcher):
'''Matches nothing.'''
def __init__(self, badfn=None):
super(nevermatcher, self).__init__(badfn)
# It's a little weird to say that the nevermatcher is an exact matcher
# or a prefix matcher, but it seems to make sense to let callers take
# fast paths based on either. There will be no exact matches, nor any
# prefixes (files() returns []), so fast paths iterating over them should
# be efficient (and correct).
def isexact(self):
return True
def prefix(self):
return True
def visitdir(self, dir):
return False
def visitchildrenset(self, dir):
return set()
def __repr__(self):
return r'<nevermatcher>'
class predicatematcher(basematcher):
"""A matcher adapter for a simple boolean function"""
def __init__(self, predfn, predrepr=None, badfn=None):
super(predicatematcher, self).__init__(badfn)
self.matchfn = predfn
self._predrepr = predrepr
@encoding.strmethod
def __repr__(self):
s = stringutil.buildrepr(self._predrepr) or pycompat.byterepr(
self.matchfn
)
return b'<predicatenmatcher pred=%s>' % s
def path_or_parents_in_set(path, prefix_set):
"""Returns True if `path` (or any parent of `path`) is in `prefix_set`."""
l = len(prefix_set)
if l == 0:
return False
if path in prefix_set:
return True
# If there's more than 5 paths in prefix_set, it's *probably* quicker to
# "walk up" the directory hierarchy instead, with the assumption that most
# directory hierarchies are relatively shallow and hash lookup is cheap.
if l > 5:
return any(
parentdir in prefix_set for parentdir in pathutil.finddirs(path)
)
# FIXME: Ideally we'd never get to this point if this is the case - we'd
# recognize ourselves as an 'always' matcher and skip this.
if b'' in prefix_set:
return True
if pycompat.ispy3:
sl = ord(b'/')
else:
sl = '/'
# We already checked that path isn't in prefix_set exactly, so
# `path[len(pf)] should never raise IndexError.
return any(path.startswith(pf) and path[len(pf)] == sl for pf in prefix_set)
class patternmatcher(basematcher):
r"""Matches a set of (kind, pat, source) against a 'root' directory.
>>> kindpats = [
... (b're', br'.*\.c$', b''),
... (b'path', b'foo/a', b''),
... (b'relpath', b'b', b''),
... (b'glob', b'*.h', b''),
... ]
>>> m = patternmatcher(b'foo', kindpats)
>>> m(b'main.c') # matches re:.*\.c$
True
>>> m(b'b.txt')
False
>>> m(b'foo/a') # matches path:foo/a
True
>>> m(b'a') # does not match path:b, since 'root' is 'foo'
False
>>> m(b'b') # matches relpath:b, since 'root' is 'foo'
True
>>> m(b'lib.h') # matches glob:*.h
True
>>> m.files()
['', 'foo/a', 'b', '']
>>> m.exact(b'foo/a')
True
>>> m.exact(b'b')
True
>>> m.exact(b'lib.h') # exact matches are for (rel)path kinds
False
"""
def __init__(self, root, kindpats, badfn=None):
super(patternmatcher, self).__init__(badfn)
self._files = _explicitfiles(kindpats)
self._prefix = _prefix(kindpats)
self._pats, self.matchfn = _buildmatch(kindpats, b'$', root)
@propertycache
def _dirs(self):
return set(pathutil.dirs(self._fileset))
def visitdir(self, dir):
if self._prefix and dir in self._fileset:
return b'all'
return dir in self._dirs or path_or_parents_in_set(dir, self._fileset)
def visitchildrenset(self, dir):
ret = self.visitdir(dir)
if ret is True:
return b'this'
elif not ret:
return set()
assert ret == b'all'
return b'all'
def prefix(self):
return self._prefix
@encoding.strmethod
def __repr__(self):
return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
# This is basically a reimplementation of pathutil.dirs that stores the
# children instead of just a count of them, plus a small optional optimization
# to avoid some directories we don't need.
class _dirchildren(object):
def __init__(self, paths, onlyinclude=None):
self._dirs = {}
self._onlyinclude = onlyinclude or []
addpath = self.addpath
for f in paths:
addpath(f)
def addpath(self, path):
if path == b'':
return
dirs = self._dirs
findsplitdirs = _dirchildren._findsplitdirs
for d, b in findsplitdirs(path):
if d not in self._onlyinclude:
continue
dirs.setdefault(d, set()).add(b)
@staticmethod
def _findsplitdirs(path):
# yields (dirname, basename) tuples, walking back to the root. This is
# very similar to pathutil.finddirs, except:
# - produces a (dirname, basename) tuple, not just 'dirname'
# Unlike manifest._splittopdir, this does not suffix `dirname` with a
# slash.
oldpos = len(path)
pos = path.rfind(b'/')
while pos != -1:
yield path[:pos], path[pos + 1 : oldpos]
oldpos = pos
pos = path.rfind(b'/', 0, pos)
yield b'', path[:oldpos]
def get(self, path):
return self._dirs.get(path, set())
class includematcher(basematcher):
def __init__(self, root, kindpats, badfn=None):
super(includematcher, self).__init__(badfn)
if rustmod is not None:
# We need to pass the patterns to Rust because they can contain
# patterns from the user interface
self._kindpats = kindpats
self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root)
self._prefix = _prefix(kindpats)
roots, dirs, parents = _rootsdirsandparents(kindpats)
# roots are directories which are recursively included.
self._roots = set(roots)
# dirs are directories which are non-recursively included.
self._dirs = set(dirs)
# parents are directories which are non-recursively included because
# they are needed to get to items in _dirs or _roots.
self._parents = parents
def visitdir(self, dir):
if self._prefix and dir in self._roots:
return b'all'
return (
dir in self._dirs
or dir in self._parents
or path_or_parents_in_set(dir, self._roots)
)
@propertycache
def _allparentschildren(self):
# It may seem odd that we add dirs, roots, and parents, and then
# restrict to only parents. This is to catch the case of:
# dirs = ['foo/bar']
# parents = ['foo']
# if we asked for the children of 'foo', but had only added
# self._parents, we wouldn't be able to respond ['bar'].
return _dirchildren(
itertools.chain(self._dirs, self._roots, self._parents),
onlyinclude=self._parents,
)
def visitchildrenset(self, dir):
if self._prefix and dir in self._roots:
return b'all'
# Note: this does *not* include the 'dir in self._parents' case from
# visitdir, that's handled below.
if (
b'' in self._roots
or dir in self._dirs
or path_or_parents_in_set(dir, self._roots)
):
return b'this'
if dir in self._parents:
return self._allparentschildren.get(dir) or set()
return set()
@encoding.strmethod
def __repr__(self):
return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats)
class exactmatcher(basematcher):
r"""Matches the input files exactly. They are interpreted as paths, not
patterns (so no kind-prefixes).
>>> m = exactmatcher([b'a.txt', br're:.*\.c$'])
>>> m(b'a.txt')
True
>>> m(b'b.txt')
False
Input files that would be matched are exactly those returned by .files()
>>> m.files()
['a.txt', 're:.*\\.c$']
So pattern 're:.*\.c$' is not considered as a regex, but as a file name
>>> m(b'main.c')
False
>>> m(br're:.*\.c$')
True
"""
def __init__(self, files, badfn=None):
super(exactmatcher, self).__init__(badfn)
if isinstance(files, list):
self._files = files
else:
self._files = list(files)
matchfn = basematcher.exact
@propertycache
def _dirs(self):
return set(pathutil.dirs(self._fileset))
def visitdir(self, dir):
return dir in self._dirs
@propertycache
def _visitchildrenset_candidates(self):
"""A memoized set of candidates for visitchildrenset."""
return self._fileset | self._dirs - {b''}
@propertycache
def _sorted_visitchildrenset_candidates(self):
"""A memoized sorted list of candidates for visitchildrenset."""
return sorted(self._visitchildrenset_candidates)
def visitchildrenset(self, dir):
if not self._fileset or dir not in self._dirs:
return set()
if dir == b'':
candidates = self._visitchildrenset_candidates
else:
candidates = self._sorted_visitchildrenset_candidates
d = dir + b'/'
# Use bisect to find the first element potentially starting with d
# (i.e. >= d). This should always find at least one element (we'll
# assert later if this is not the case).
first = bisect.bisect_left(candidates, d)
# We need a representation of the first element that is > d that
# does not start with d, so since we added a `/` on the end of dir,
# we'll add whatever comes after slash (we could probably assume
# that `0` is after `/`, but let's not) to the end of dir instead.
dnext = dir + encoding.strtolocal(chr(ord(b'/') + 1))
# Use bisect to find the first element >= d_next
last = bisect.bisect_left(candidates, dnext, lo=first)
dlen = len(d)
candidates = {c[dlen:] for c in candidates[first:last]}
# self._dirs includes all of the directories, recursively, so if
# we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
# 'foo/bar' in it. Thus we can safely ignore a candidate that has a
# '/' in it, indicating a it's for a subdir-of-a-subdir; the
# immediate subdir will be in there without a slash.
ret = {c for c in candidates if b'/' not in c}
# We really do not expect ret to be empty, since that would imply that
# there's something in _dirs that didn't have a file in _fileset.
assert ret
return ret
def isexact(self):
return True
@encoding.strmethod
def __repr__(self):
return b'<exactmatcher files=%r>' % self._files
class differencematcher(basematcher):
"""Composes two matchers by matching if the first matches and the second
does not.
The second matcher's non-matching-attributes (bad, traversedir) are ignored.
"""
def __init__(self, m1, m2):
super(differencematcher, self).__init__()
self._m1 = m1
self._m2 = m2
self.bad = m1.bad
self.traversedir = m1.traversedir
def matchfn(self, f):
return self._m1(f) and not self._m2(f)
@propertycache
def _files(self):
if self.isexact():
return [f for f in self._m1.files() if self(f)]
# If m1 is not an exact matcher, we can't easily figure out the set of
# files, because its files() are not always files. For example, if
# m1 is "path:dir" and m2 is "rootfileins:.", we don't
# want to remove "dir" from the set even though it would match m2,
# because the "dir" in m1 may not be a file.
return self._m1.files()
def visitdir(self, dir):
if self._m2.visitdir(dir) == b'all':
return False
elif not self._m2.visitdir(dir):
# m2 does not match dir, we can return 'all' here if possible
return self._m1.visitdir(dir)
return bool(self._m1.visitdir(dir))
def visitchildrenset(self, dir):
m2_set = self._m2.visitchildrenset(dir)
if m2_set == b'all':
return set()
m1_set = self._m1.visitchildrenset(dir)
# Possible values for m1: 'all', 'this', set(...), set()
# Possible values for m2: 'this', set(...), set()
# If m2 has nothing under here that we care about, return m1, even if
# it's 'all'. This is a change in behavior from visitdir, which would
# return True, not 'all', for some reason.
if not m2_set:
return m1_set
if m1_set in [b'all', b'this']:
# Never return 'all' here if m2_set is any kind of non-empty (either
# 'this' or set(foo)), since m2 might return set() for a
# subdirectory.
return b'this'
# Possible values for m1: set(...), set()
# Possible values for m2: 'this', set(...)
# We ignore m2's set results. They're possibly incorrect:
# m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''):
# m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
# return set(), which is *not* correct, we still need to visit 'dir'!
return m1_set
def isexact(self):
return self._m1.isexact()
@encoding.strmethod
def __repr__(self):
return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)
def intersectmatchers(m1, m2):
"""Composes two matchers by matching if both of them match.
The second matcher's non-matching-attributes (bad, traversedir) are ignored.
"""
if m1 is None or m2 is None:
return m1 or m2
if m1.always():
m = copy.copy(m2)
# TODO: Consider encapsulating these things in a class so there's only
# one thing to copy from m1.
m.bad = m1.bad
m.traversedir = m1.traversedir
return m
if m2.always():
m = copy.copy(m1)
return m
return intersectionmatcher(m1, m2)
class intersectionmatcher(basematcher):
def __init__(self, m1, m2):
super(intersectionmatcher, self).__init__()
self._m1 = m1
self._m2 = m2
self.bad = m1.bad
self.traversedir = m1.traversedir
@propertycache
def _files(self):
if self.isexact():
m1, m2 = self._m1, self._m2
if not m1.isexact():
m1, m2 = m2, m1
return [f for f in m1.files() if m2(f)]
# It neither m1 nor m2 is an exact matcher, we can't easily intersect
# the set of files, because their files() are not always files. For
# example, if intersecting a matcher "-I glob:foo.txt" with matcher of
# "path:dir2", we don't want to remove "dir2" from the set.
return self._m1.files() + self._m2.files()
def matchfn(self, f):
return self._m1(f) and self._m2(f)
def visitdir(self, dir):
visit1 = self._m1.visitdir(dir)
if visit1 == b'all':
return self._m2.visitdir(dir)
# bool() because visit1=True + visit2='all' should not be 'all'
return bool(visit1 and self._m2.visitdir(dir))
def visitchildrenset(self, dir):
m1_set = self._m1.visitchildrenset(dir)
if not m1_set:
return set()
m2_set = self._m2.visitchildrenset(dir)
if not m2_set:
return set()
if m1_set == b'all':
return m2_set
elif m2_set == b'all':
return m1_set
if m1_set == b'this' or m2_set == b'this':
return b'this'
assert isinstance(m1_set, set) and isinstance(m2_set, set)
return m1_set.intersection(m2_set)
def always(self):
return self._m1.always() and self._m2.always()
def isexact(self):
return self._m1.isexact() or self._m2.isexact()
@encoding.strmethod
def __repr__(self):
return b'<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)
class subdirmatcher(basematcher):
"""Adapt a matcher to work on a subdirectory only.
The paths are remapped to remove/insert the path as needed:
>>> from . import pycompat
>>> m1 = match(util.localpath(b'/root'), b'', [b'a.txt', b'sub/b.txt'], auditor=lambda name: None)
>>> m2 = subdirmatcher(b'sub', m1)
>>> m2(b'a.txt')
False
>>> m2(b'b.txt')
True
>>> m2.matchfn(b'a.txt')
False
>>> m2.matchfn(b'b.txt')
True
>>> m2.files()
['b.txt']
>>> m2.exact(b'b.txt')
True
>>> def bad(f, msg):
... print(pycompat.sysstr(b"%s: %s" % (f, msg)))
>>> m1.bad = bad
>>> m2.bad(b'x.txt', b'No such file')
sub/x.txt: No such file
"""
def __init__(self, path, matcher):
super(subdirmatcher, self).__init__()
self._path = path
self._matcher = matcher
self._always = matcher.always()
self._files = [
f[len(path) + 1 :]
for f in matcher._files
if f.startswith(path + b"/")
]
# If the parent repo had a path to this subrepo and the matcher is
# a prefix matcher, this submatcher always matches.
if matcher.prefix():
self._always = any(f == path for f in matcher._files)
def bad(self, f, msg):
self._matcher.bad(self._path + b"/" + f, msg)
def matchfn(self, f):
# Some information is lost in the superclass's constructor, so we
# can not accurately create the matching function for the subdirectory
# from the inputs. Instead, we override matchfn() and visitdir() to
# call the original matcher with the subdirectory path prepended.
return self._matcher.matchfn(self._path + b"/" + f)
def visitdir(self, dir):
if dir == b'':
dir = self._path
else:
dir = self._path + b"/" + dir
return self._matcher.visitdir(dir)
def visitchildrenset(self, dir):
if dir == b'':
dir = self._path
else:
dir = self._path + b"/" + dir
return self._matcher.visitchildrenset(dir)
def always(self):
return self._always
def prefix(self):
return self._matcher.prefix() and not self._always
@encoding.strmethod
def __repr__(self):
return b'<subdirmatcher path=%r, matcher=%r>' % (
self._path,
self._matcher,
)
class prefixdirmatcher(basematcher):
"""Adapt a matcher to work on a parent directory.
The matcher's non-matching-attributes (bad, traversedir) are ignored.
The prefix path should usually be the relative path from the root of
this matcher to the root of the wrapped matcher.
>>> m1 = match(util.localpath(b'/root/d/e'), b'f', [b'../a.txt', b'b.txt'], auditor=lambda name: None)
>>> m2 = prefixdirmatcher(b'd/e', m1)
>>> m2(b'a.txt')
False
>>> m2(b'd/e/a.txt')
True
>>> m2(b'd/e/b.txt')
False
>>> m2.files()
['d/e/a.txt', 'd/e/f/b.txt']
>>> m2.exact(b'd/e/a.txt')
True
>>> m2.visitdir(b'd')
True
>>> m2.visitdir(b'd/e')
True
>>> m2.visitdir(b'd/e/f')
True
>>> m2.visitdir(b'd/e/g')
False
>>> m2.visitdir(b'd/ef')
False
"""
def __init__(self, path, matcher, badfn=None):
super(prefixdirmatcher, self).__init__(badfn)
if not path:
raise error.ProgrammingError(b'prefix path must not be empty')
self._path = path
self._pathprefix = path + b'/'
self._matcher = matcher
@propertycache
def _files(self):
return [self._pathprefix + f for f in self._matcher._files]
def matchfn(self, f):
if not f.startswith(self._pathprefix):
return False
return self._matcher.matchfn(f[len(self._pathprefix) :])
@propertycache
def _pathdirs(self):
return set(pathutil.finddirs(self._path))
def visitdir(self, dir):
if dir == self._path:
return self._matcher.visitdir(b'')
if dir.startswith(self._pathprefix):
return self._matcher.visitdir(dir[len(self._pathprefix) :])
return dir in self._pathdirs
def visitchildrenset(self, dir):
if dir == self._path:
return self._matcher.visitchildrenset(b'')
if dir.startswith(self._pathprefix):
return self._matcher.visitchildrenset(dir[len(self._pathprefix) :])
if dir in self._pathdirs:
return b'this'
return set()
def isexact(self):
return self._matcher.isexact()
def prefix(self):
return self._matcher.prefix()
@encoding.strmethod
def __repr__(self):
return b'<prefixdirmatcher path=%r, matcher=%r>' % (
pycompat.bytestr(self._path),
self._matcher,
)
class unionmatcher(basematcher):
"""A matcher that is the union of several matchers.
The non-matching-attributes (bad, traversedir) are taken from the first
matcher.
"""
def __init__(self, matchers):
m1 = matchers[0]
super(unionmatcher, self).__init__()
self.traversedir = m1.traversedir
self._matchers = matchers
def matchfn(self, f):
for match in self._matchers:
if match(f):
return True
return False
def visitdir(self, dir):
r = False
for m in self._matchers:
v = m.visitdir(dir)
if v == b'all':
return v
r |= v
return r
def visitchildrenset(self, dir):
r = set()
this = False
for m in self._matchers:
v = m.visitchildrenset(dir)
if not v:
continue
if v == b'all':
return v
if this or v == b'this':
this = True
# don't break, we might have an 'all' in here.
continue
assert isinstance(v, set)
r = r.union(v)
if this:
return b'this'
return r
@encoding.strmethod
def __repr__(self):
return b'<unionmatcher matchers=%r>' % self._matchers
def patkind(pattern, default=None):
r"""If pattern is 'kind:pat' with a known kind, return kind.
>>> patkind(br're:.*\.c$')
're'
>>> patkind(b'glob:*.c')
'glob'
>>> patkind(b'relpath:test.py')
'relpath'
>>> patkind(b'main.py')
>>> patkind(b'main.py', default=b're')
're'
"""
return _patsplit(pattern, default)[0]
def _patsplit(pattern, default):
"""Split a string into the optional pattern kind prefix and the actual
pattern."""
if b':' in pattern:
kind, pat = pattern.split(b':', 1)
if kind in allpatternkinds:
return kind, pat
return default, pattern
def _globre(pat):
r"""Convert an extended glob string to a regexp string.
>>> from . import pycompat
>>> def bprint(s):
... print(pycompat.sysstr(s))
>>> bprint(_globre(br'?'))
.
>>> bprint(_globre(br'*'))
[^/]*
>>> bprint(_globre(br'**'))
.*
>>> bprint(_globre(br'**/a'))
(?:.*/)?a
>>> bprint(_globre(br'a/**/b'))
a/(?:.*/)?b
>>> bprint(_globre(br'[a*?!^][^b][!c]'))
[a*?!^][\^b][^c]
>>> bprint(_globre(br'{a,b}'))
(?:a|b)
>>> bprint(_globre(br'.\*\?'))
\.\*\?
"""
i, n = 0, len(pat)
res = b''
group = 0
escape = util.stringutil.regexbytesescapemap.get
def peek():
return i < n and pat[i : i + 1]
while i < n:
c = pat[i : i + 1]
i += 1
if c not in b'*?[{},\\':
res += escape(c, c)
elif c == b'*':
if peek() == b'*':
i += 1
if peek() == b'/':
i += 1
res += b'(?:.*/)?'
else:
res += b'.*'
else:
res += b'[^/]*'
elif c == b'?':
res += b'.'
elif c == b'[':
j = i
if j < n and pat[j : j + 1] in b'!]':
j += 1
while j < n and pat[j : j + 1] != b']':
j += 1
if j >= n:
res += b'\\['
else:
stuff = pat[i:j].replace(b'\\', b'\\\\')
i = j + 1
if stuff[0:1] == b'!':
stuff = b'^' + stuff[1:]
elif stuff[0:1] == b'^':
stuff = b'\\' + stuff
res = b'%s[%s]' % (res, stuff)
elif c == b'{':
group += 1
res += b'(?:'
elif c == b'}' and group:
res += b')'
group -= 1
elif c == b',' and group:
res += b'|'
elif c == b'\\':
p = peek()
if p:
i += 1
res += escape(p, p)
else:
res += escape(c, c)
else:
res += escape(c, c)
return res
def _regex(kind, pat, globsuffix):
"""Convert a (normalized) pattern of any kind into a
regular expression.
globsuffix is appended to the regexp of globs."""
if not pat and kind in (b'glob', b'relpath'):
return b''
if kind == b're':
return pat
if kind in (b'path', b'relpath'):
if pat == b'.':
return b''
return util.stringutil.reescape(pat) + b'(?:/|$)'
if kind == b'rootfilesin':
if pat == b'.':
escaped = b''
else:
# Pattern is a directory name.
escaped = util.stringutil.reescape(pat) + b'/'
# Anything after the pattern must be a non-directory.
return escaped + b'[^/]+$'
if kind == b'relglob':
globre = _globre(pat)
if globre.startswith(b'[^/]*'):
# When pat has the form *XYZ (common), make the returned regex more
# legible by returning the regex for **XYZ instead of **/*XYZ.
return b'.*' + globre[len(b'[^/]*') :] + globsuffix
return b'(?:|.*/)' + globre + globsuffix
if kind == b'relre':
if pat.startswith(b'^'):
return pat
return b'.*' + pat
if kind in (b'glob', b'rootglob'):
return _globre(pat) + globsuffix
raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat))
def _buildmatch(kindpats, globsuffix, root):
"""Return regexp string and a matcher function for kindpats.
globsuffix is appended to the regexp of globs."""
matchfuncs = []
subincludes, kindpats = _expandsubinclude(kindpats, root)
if subincludes:
submatchers = {}
def matchsubinclude(f):
for prefix, matcherargs in subincludes:
if f.startswith(prefix):
mf = submatchers.get(prefix)
if mf is None:
mf = match(*matcherargs)
submatchers[prefix] = mf
if mf(f[len(prefix) :]):
return True
return False
matchfuncs.append(matchsubinclude)
regex = b''
if kindpats:
if all(k == b'rootfilesin' for k, p, s in kindpats):
dirs = {p for k, p, s in kindpats}
def mf(f):
i = f.rfind(b'/')
if i >= 0:
dir = f[:i]
else:
dir = b'.'
return dir in dirs
regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
matchfuncs.append(mf)
else:
regex, mf = _buildregexmatch(kindpats, globsuffix)
matchfuncs.append(mf)
if len(matchfuncs) == 1:
return regex, matchfuncs[0]
else:
return regex, lambda f: any(mf(f) for mf in matchfuncs)
MAX_RE_SIZE = 20000
def _joinregexes(regexps):
"""gather multiple regular expressions into a single one"""
return b'|'.join(regexps)
def _buildregexmatch(kindpats, globsuffix):
"""Build a match function from a list of kinds and kindpats,
return regexp string and a matcher function.
Test too large input
>>> _buildregexmatch([
... (b'relglob', b'?' * MAX_RE_SIZE, b'')
... ], b'$')
Traceback (most recent call last):
...
Abort: matcher pattern is too long (20009 bytes)
"""
try:
allgroups = []
regexps = [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
fullregexp = _joinregexes(regexps)
startidx = 0
groupsize = 0
for idx, r in enumerate(regexps):
piecesize = len(r)
if piecesize > MAX_RE_SIZE:
msg = _(b"matcher pattern is too long (%d bytes)") % piecesize
raise error.Abort(msg)
elif (groupsize + piecesize) > MAX_RE_SIZE:
group = regexps[startidx:idx]
allgroups.append(_joinregexes(group))
startidx = idx
groupsize = 0
groupsize += piecesize + 1
if startidx == 0:
matcher = _rematcher(fullregexp)
func = lambda s: bool(matcher(s))
else:
group = regexps[startidx:]
allgroups.append(_joinregexes(group))
allmatchers = [_rematcher(g) for g in allgroups]
func = lambda s: any(m(s) for m in allmatchers)
return fullregexp, func
except re.error:
for k, p, s in kindpats:
try:
_rematcher(_regex(k, p, globsuffix))
except re.error:
if s:
raise error.Abort(
_(b"%s: invalid pattern (%s): %s") % (s, k, p)
)
else:
raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p))
raise error.Abort(_(b"invalid pattern"))
def _patternrootsanddirs(kindpats):
"""Returns roots and directories corresponding to each pattern.
This calculates the roots and directories exactly matching the patterns and
returns a tuple of (roots, dirs) for each. It does not return other
directories which may also need to be considered, like the parent
directories.
"""
r = []
d = []
for kind, pat, source in kindpats:
if kind in (b'glob', b'rootglob'): # find the non-glob prefix
root = []
for p in pat.split(b'/'):
if b'[' in p or b'{' in p or b'*' in p or b'?' in p:
break
root.append(p)
r.append(b'/'.join(root))
elif kind in (b'relpath', b'path'):
if pat == b'.':
pat = b''
r.append(pat)
elif kind in (b'rootfilesin',):
if pat == b'.':
pat = b''
d.append(pat)
else: # relglob, re, relre
r.append(b'')
return r, d
def _roots(kindpats):
'''Returns root directories to match recursively from the given patterns.'''
roots, dirs = _patternrootsanddirs(kindpats)
return roots
def _rootsdirsandparents(kindpats):
"""Returns roots and exact directories from patterns.
`roots` are directories to match recursively, `dirs` should
be matched non-recursively, and `parents` are the implicitly required
directories to walk to items in either roots or dirs.
Returns a tuple of (roots, dirs, parents).
>>> r = _rootsdirsandparents(
... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
... (b'glob', b'g*', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
(['g/h', 'g/h', ''], []) ['', 'g']
>>> r = _rootsdirsandparents(
... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
([], ['g/h', '']) ['', 'g']
>>> r = _rootsdirsandparents(
... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
... (b'path', b'', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
(['r', 'p/p', ''], []) ['', 'p']
>>> r = _rootsdirsandparents(
... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
... (b'relre', b'rr', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
(['', '', ''], []) ['']
"""
r, d = _patternrootsanddirs(kindpats)
p = set()
# Add the parents as non-recursive/exact directories, since they must be
# scanned to get to either the roots or the other exact directories.
p.update(pathutil.dirs(d))
p.update(pathutil.dirs(r))
# FIXME: all uses of this function convert these to sets, do so before
# returning.
# FIXME: all uses of this function do not need anything in 'roots' and
# 'dirs' to also be in 'parents', consider removing them before returning.
return r, d, p
def _explicitfiles(kindpats):
"""Returns the potential explicit filenames from the patterns.
>>> _explicitfiles([(b'path', b'foo/bar', b'')])
['foo/bar']
>>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
[]
"""
# Keep only the pattern kinds where one can specify filenames (vs only
# directory names).
filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)]
return _roots(filable)
def _prefix(kindpats):
'''Whether all the patterns match a prefix (i.e. recursively)'''
for kind, pat, source in kindpats:
if kind not in (b'path', b'relpath'):
return False
return True
_commentre = None
def readpatternfile(filepath, warn, sourceinfo=False):
"""parse a pattern file, returning a list of
patterns. These patterns should be given to compile()
to be validated and converted into a match function.
trailing white space is dropped.
the escape character is backslash.
comments start with #.
empty lines are skipped.
lines can be of the following formats:
syntax: regexp # defaults following lines to non-rooted regexps
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
rootglob:pat # rooted glob (same root as ^ in regexps)
pattern # pattern of the current default type
if sourceinfo is set, returns a list of tuples:
(pattern, lineno, originalline).
This is useful to debug ignore patterns.
"""
syntaxes = {
b're': b'relre:',
b'regexp': b'relre:',
b'glob': b'relglob:',
b'rootglob': b'rootglob:',
b'include': b'include',
b'subinclude': b'subinclude',
}
syntax = b'relre:'
patterns = []
fp = open(filepath, b'rb')
for lineno, line in enumerate(util.iterfile(fp), start=1):
if b"#" in line:
global _commentre
if not _commentre:
_commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
# remove comments prefixed by an even number of escapes
m = _commentre.search(line)
if m:
line = line[: m.end(1)]
# fixup properly escaped comments that survived the above
line = line.replace(b"\\#", b"#")
line = line.rstrip()
if not line:
continue
if line.startswith(b'syntax:'):
s = line[7:].strip()
try:
syntax = syntaxes[s]
except KeyError:
if warn:
warn(
_(b"%s: ignoring invalid syntax '%s'\n") % (filepath, s)
)
continue
linesyntax = syntax
for s, rels in pycompat.iteritems(syntaxes):
if line.startswith(rels):
linesyntax = rels
line = line[len(rels) :]
break
elif line.startswith(s + b':'):
linesyntax = rels
line = line[len(s) + 1 :]
break
if sourceinfo:
patterns.append((linesyntax + line, lineno, line))
else:
patterns.append(linesyntax + line)
fp.close()
return patterns
| {
"content_hash": "b3beabd9a306d285bff6cef91e0f44ef",
"timestamp": "",
"source": "github",
"line_count": 1659,
"max_line_length": 106,
"avg_line_length": 32.13562386980109,
"alnum_prop": 0.5584003901487442,
"repo_name": "ingokegel/intellij-community",
"id": "66fc95381ef314d3e49ca566bbaa90ec28b85bd9",
"size": "53313",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/mercurial/match.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ui', '0018_auto_20171005_1213'),
]
operations = [
migrations.AlterField(
model_name='export',
name='export_format',
field=models.CharField(default=b'xlsx', max_length=10, choices=[(b'xlsx', b'Excel (XLSX)'), (b'csv', b'Comma separated values (CSV)'), (b'tsv', b'Tab separated values (TSV)'), (b'json_full', b'Full JSON'), (b'json', b'JSON of limited fields'), (b'dehydrate', b'Text file of identifiers (dehydrate)')]),
),
migrations.AlterField(
model_name='export',
name='export_segment_size',
field=models.BigIntegerField(default=250000, null=True, blank=True, choices=[(100000, b'100,000'), (250000, b'250,000'), (500000, b'500,000'), (100000, b'1,000,000'), (None, b'Single file')]),
),
]
| {
"content_hash": "4baf5ef108e0c8a929d2648a4ba09217",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 314,
"avg_line_length": 41.91304347826087,
"alnum_prop": 0.6047717842323651,
"repo_name": "gwu-libraries/sfm-ui",
"id": "8ebec60e99251af3b9fa81181e34c74651dde6e9",
"size": "988",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sfm/ui/migrations/0019_auto_20180530_1831.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6127"
},
{
"name": "Dockerfile",
"bytes": "1446"
},
{
"name": "HTML",
"bytes": "123789"
},
{
"name": "JavaScript",
"bytes": "130102"
},
{
"name": "Python",
"bytes": "523118"
},
{
"name": "Shell",
"bytes": "2227"
}
],
"symlink_target": ""
} |
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: Y = X coef_ + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y[:, None]
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if not self.deflation_mode in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q] or [n_samples]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: Y = X coef_ + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
@property
def coefs(self):
DeprecationWarning("'coefs' attribute has been deprecated and will be "
"removed in version 0.17. Use 'coef_' instead")
return self.coef_
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vector, where n_samples is the number of samples and
p is the number of predictors. X will be centered before any analysis.
Y : array-like of response, shape = [n_samples, q]
Training vector, where n_samples is the number of samples and
q is the number of response variables. X will be centered before any
analysis.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale X and Y.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
p = X.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| {
"content_hash": "02fa3d095d9213841403217363836c8b",
"timestamp": "",
"source": "github",
"line_count": 801,
"max_line_length": 79,
"avg_line_length": 35.96629213483146,
"alnum_prop": 0.5836023464889444,
"repo_name": "ankurankan/scikit-learn",
"id": "34859d666bb40806cedd2c942bc1bb02903919cb",
"size": "28809",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/cross_decomposition/pls_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18557197"
},
{
"name": "C++",
"bytes": "1810938"
},
{
"name": "CSS",
"bytes": "1503"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Makefile",
"bytes": "4897"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5708697"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
} |
import os.path
import sys
from requestbuilder import Arg
from requestbuilder.command import BaseCommand
from requestbuilder.mixins import (FileTransferProgressBarMixin,
RegionConfigurableMixin)
from euca2ools.bundle.pipes.core import (create_unbundle_pipeline,
copy_with_progressbar)
from euca2ools.bundle.util import open_pipe_fileobjs
from euca2ools.commands import Euca2ools
from euca2ools.commands.argtypes import filesize
class UnbundleStream(BaseCommand, FileTransferProgressBarMixin,
RegionConfigurableMixin):
DESCRIPTION = ('Recreate an image solely from its combined bundled parts '
'without using a manifest\n\nUsually one would want to use '
'euca-unbundle instead.')
SUITE = Euca2ools
ARGS = [Arg('-i', dest='source', metavar='FILE',
help='file to read the bundle from (default: stdin)'),
Arg('-o', dest='dest', metavar='FILE',
help='file to write the unbundled image to (default: stdout)'),
Arg('--enc-key', metavar='HEX', required=True, help='''the
symmetric key used to encrypt the bundle (required)'''),
Arg('--enc-iv', metavar='HEX', required=True,
help='''the initialization vector used to encrypt the bundle
(required)'''),
Arg('--image-size', metavar='BYTES', type=filesize,
help='verify the unbundled image is a certain size'),
Arg('--sha1-digest', metavar='HEX', help='''verify the image's
contents against a SHA1 digest from its manifest file''')]
# noinspection PyExceptionInherit
def configure(self):
BaseCommand.configure(self)
self.update_config_view()
if not self.args.get('source') or self.args['source'] == '-':
# We dup stdin because the multiprocessing lib closes it
self.args['source'] = os.fdopen(os.dup(sys.stdin.fileno()))
elif isinstance(self.args['source'], basestring):
self.args['source'] = open(self.args['source'])
# Otherwise, assume it is already a file object
if not self.args.get('dest') or self.args['dest'] == '-':
self.args['dest'] = sys.stdout
self.args['show_progress'] = False
elif isinstance(self.args['dest'], basestring):
self.args['dest'] = open(self.args['dest'], 'w')
# Otherwise, assume it is already a file object
def main(self):
pbar = self.get_progressbar(maxval=self.args.get('image_size'))
unbundle_out_r, unbundle_out_w = open_pipe_fileobjs()
unbundle_sha1_r = create_unbundle_pipeline(
self.args['source'], unbundle_out_w, self.args['enc_key'],
self.args['enc_iv'], debug=self.debug)
unbundle_out_w.close()
actual_size = copy_with_progressbar(unbundle_out_r, self.args['dest'],
progressbar=pbar)
actual_sha1 = unbundle_sha1_r.recv()
unbundle_sha1_r.close()
expected_sha1 = self.args.get('sha1_digest') or ''
expected_sha1 = expected_sha1.lower().strip('0x')
expected_size = self.args.get('image_size')
if expected_sha1 and expected_sha1 != actual_sha1:
self.log.error('rejecting unbundle due to SHA1 mismatch '
'(expected SHA1: %s, actual: %s)',
expected_sha1, actual_sha1)
raise RuntimeError('bundle appears to be corrupt (expected SHA1: '
'{0}, actual: {1})'
.format(expected_sha1, actual_sha1))
expected_size = self.args.get('image_size')
if expected_size and expected_size != actual_size:
self.log.error('rejecting unbundle due to size mismatch '
'(expected: %i, actual: %i)',
expected_size, actual_size)
raise RuntimeError('bundle appears to be corrupt (expected size: '
'{0}, actual: {1})'
.format(expected_size, actual_size))
return actual_sha1, actual_size
| {
"content_hash": "44fe9bc4dbb4ab23e86405b0a76f9984",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 50.2,
"alnum_prop": 0.5819076634637919,
"repo_name": "vasiliykochergin/euca2ools",
"id": "7481bc849fef93e7665d25a2b1ddfb9a1e3ba095",
"size": "5614",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "euca2ools/commands/bundle/unbundlestream.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1220919"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_profile_location'),
]
operations = [
migrations.AddField(
model_name='profile',
name='surf_level',
field=models.IntegerField(choices=[(1, 'Beginner'), (2, 'Intermediate'), (3, 'Advanced'), (4, 'PRO Surfer')], default=1),
),
]
| {
"content_hash": "9938cf851f4ed08d4c718c82e411f117",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 133,
"avg_line_length": 25.72222222222222,
"alnum_prop": 0.5896328293736501,
"repo_name": "Dude1983/surfapp",
"id": "265d005813fd3f67aedd5fd45e40a73479474415",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/profiles/migrations/0003_profile_surf_level.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3377"
},
{
"name": "HTML",
"bytes": "26188"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "36765"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EnergiStream API Client'
copyright = u'2015, MelRok LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EnergiScoreWebDocumentationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'EnergiScoreWebDocumentation.tex', u'EnergiScore Web Documentation Documentation',
u'Harrison Fross', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'energiscorewebdocumentation', u'EnergiScore Web Documentation Documentation',
[u'Harrison Fross'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'EnergiScoreWebDocumentation', u'EnergiScore Web Documentation Documentation',
u'Harrison Fross', 'EnergiScoreWebDocumentation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "4fa52117320d86557e823eeaec47fb32",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 94,
"avg_line_length": 32.03875968992248,
"alnum_prop": 0.7113476893297846,
"repo_name": "Melrok/energistream-py",
"id": "fd5865a5bbba4396f8f00181ed73ddf611452c7a",
"size": "8708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47981"
},
{
"name": "Shell",
"bytes": "2888"
}
],
"symlink_target": ""
} |
default_app_config = 'customers.apps.CustomersConfig'
| {
"content_hash": "3e2381c08c595d9a304d69e2a8839166",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 53,
"avg_line_length": 54,
"alnum_prop": 0.8148148148148148,
"repo_name": "tomturner/django-tenants",
"id": "3417f161cc5adbd457def57525e3beb4bf403a77",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tenant_tutorial/customers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "166"
},
{
"name": "Dockerfile",
"bytes": "176"
},
{
"name": "HTML",
"bytes": "4964"
},
{
"name": "Python",
"bytes": "155369"
},
{
"name": "Shell",
"bytes": "853"
}
],
"symlink_target": ""
} |
import os
def write_to_config(username, host):
with open(os.path.join(os.path.expanduser("~"), '.my.conf'), 'w') as cfg:
config = '[DEFAULT]\nusername=%s\nhost=%s\n'
cfg.write(config % (username, host))
| {
"content_hash": "45edde2777e9ab663d6437fa822a2b78",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 77,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.6133333333333333,
"repo_name": "esikachev/my-dev-client",
"id": "2202452ec14e8b0d1f0de667f8db6b43093c10db",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_dev/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12160"
}
],
"symlink_target": ""
} |
"""
"""
from .basic import BASIC
class ULONG(BASIC): pass
def template():
return ULONG('GPLong') | {
"content_hash": "027b1294ffb573ad96f8c35025d4b1f4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 26,
"avg_line_length": 9.545454545454545,
"alnum_prop": 0.638095238095238,
"repo_name": "geospatial-services-framework/gsfpyarc",
"id": "4de9dea0cdc064c1686bfe4bf8b797a8ae070048",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gsfarc/gptool/parameter/templates/ulong.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "IDL",
"bytes": "26862"
},
{
"name": "Python",
"bytes": "82842"
}
],
"symlink_target": ""
} |
from base import *
from rekening import Rekening
from rekening_hukum import DasarHukum
from urusan import Urusan
from fungsi import Fungsi
from fungsi_urusan import FungsiUrusan
from unit import Unit
from program import Program
from kegiatan import Kegiatan
if __name__ == '__main__':
Rekening.import_data()
DasarHukum.import_data()
Urusan.import_data()
Fungsi.import_data()
FungsiUrusan.import_data()
Unit.import_data()
Program.import_data()
Kegiatan.import_data()
| {
"content_hash": "2f2036edb96705cd7f44500c54d88f5a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 38,
"avg_line_length": 25.2,
"alnum_prop": 0.7341269841269841,
"repo_name": "aagusti/o-sipkd",
"id": "8d12431347f75074ef2a428649a8ba4c549a5f4c",
"size": "504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/ref/referensi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "107334"
},
{
"name": "HTML",
"bytes": "1317001"
},
{
"name": "JavaScript",
"bytes": "983058"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "PLpgSQL",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "615248"
},
{
"name": "Shell",
"bytes": "241"
},
{
"name": "Smarty",
"bytes": "2003"
}
],
"symlink_target": ""
} |
class TestWsgiApp(object):
def __init__(self, environment):
self.environment = environment
def __call__(self, env, start_response):
lines = [
"""<!DOCTYPE html>
<html>
<head><title>Openshift WSGI Info</title></head>
</html>
<body>
<table style="width:100%">
<thead><tr><th colspan="2">Main Environment</th></tr>
<tr><th>Key</th><th>Value</th></tr>
</thead>
<tbody>
"""
]
for k,v in self.environment.items():
lines.append("<tr><td>{0}</td><td>{1}</td></tr>\n".format(k,v))
lines.append("</tbody></table>")
lines.append("""
<table style="width:100%">
<thead><tr><th colspan="2">Request Environment</th></tr>
<tr><th>Key</th><th>Value</th></tr>
</thead>
<tbody>
""")
for k,v in env.items():
lines.append("<tr><td>{0}</td><td>{1}</td></tr>\n".format(k,v))
lines.append("</tbody></table>")
lines.append("</body></html>")
start_response('200 OK', [])
return [bytes(l, 'utf-8') for l in lines]
import os
application = TestWsgiApp(os.environ)
| {
"content_hash": "12b806402ef4483f664a51b9d880be13",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 28.625,
"alnum_prop": 0.5213973799126638,
"repo_name": "pduval/wsgitestoshift",
"id": "f4eff16b8b39d3475c4465c3d7ac8be0af75c8f9",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1245"
}
],
"symlink_target": ""
} |
import hashlib
import logging
import os
import codecs
from shutil import copyfileobj
import simplejson as json
from time import sleep
from nose.tools import nottest
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model.meta import Session
from subprocess import call
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.model import MorphologicalParser, MorphologicalParserBackup
from sqlalchemy.sql import desc
log = logging.getLogger(__name__)
def pretty_parses(parses):
result = []
for parse in parses:
tmp = parse.split('-')
tmp = [x.split(u'\u2980') for x in tmp]
tmp = zip(*tmp)
result.append('%s %s' % (u'-'.join(tmp[0]), u'-'.join(tmp[1])))
return result
class TestMorphologicalparsersController(TestController):
"""Tests the morphologicalparsers controller. WARNING: the tests herein are pretty messy. The higher
ordered tests will fail if the previous tests have not been run.
"""
def __init__(self, *args, **kwargs):
TestController.__init__(self, *args, **kwargs)
self.blackfoot_phonology_script = h.normalize(
codecs.open(self.test_phonology_script_path, 'r', 'utf8').read())
def tearDown(self):
pass
def create_form(self, tr, mb, mg, tl, cat):
params = self.form_create_params.copy()
params.update({'transcription': tr, 'morpheme_break': mb, 'morpheme_gloss': mg,
'translations': [{'transcription': tl, 'grammaticality': u''}], 'syntactic_category': cat})
params = json.dumps(params)
self.app.post(url('forms'), params, self.json_headers, self.extra_environ_admin)
def human_readable_seconds(self, seconds):
return u'%02dm%02ds' % (seconds / 60, seconds % 60)
@nottest
def test_a_general(self):
"""General purpose test for morphological parsers.
This is a lengthy, linear test. Here is an overview:
1. create application settings
2. create forms
3. create 2 morphologies, one with impoverished morpheme representations
4. create a phonology
5. create 2 language models, one categorial
6. create 4 parsers -- all combinations of +-impoverished and +-categorial
TODO: test servecompiled
"""
# Create the default application settings -- note that we have only one morpheme delimiter.
# This is relevant to the morphemic language model.
application_settings = h.generate_default_application_settings()
application_settings.morpheme_delimiters = u'-'
Session.add(application_settings)
Session.commit()
# Create some syntactic categories
cats = {
'N': model.SyntacticCategory(name=u'N'),
'V': model.SyntacticCategory(name=u'V'),
'AGR': model.SyntacticCategory(name=u'AGR'),
'Agr': model.SyntacticCategory(name=u'Agr'),
'PHI': model.SyntacticCategory(name=u'PHI'),
'S': model.SyntacticCategory(name=u'S'),
'D': model.SyntacticCategory(name=u'D')
}
Session.add_all(cats.values())
Session.commit()
cats = dict([(k, v.id) for k, v in cats.iteritems()])
dataset = (
('chien', 'chien', 'dog', 'dog', cats['N']),
('chat', 'chat', 'cat', 'cat', cats['N']),
('oiseau', 'oiseau', 'bird', 'bird', cats['N']),
('cheval', 'cheval', 'horse', 'horse', cats['N']),
('vache', 'vache', 'cow', 'cow', cats['N']),
('grenouille', 'grenouille', 'frog', 'frog', cats['N']),
('tortue', 'tortue', 'turtle', 'turtle', cats['N']),
('fourmi', 'fourmi', 'ant', 'ant', cats['N']),
('poule!t', 'poule!t', 'chicken', 'chicken', cats['N']), # note the ! which is a foma reserved symbol
(u'be\u0301casse', u'be\u0301casse', 'woodcock', 'woodcock', cats['N']),
('parle', 'parle', 'speak', 'speak', cats['V']),
('grimpe', 'grimpe', 'climb', 'climb', cats['V']),
('nage', 'nage', 'swim', 'swim', cats['V']),
('tombe', 'tombe', 'fall', 'fall', cats['V']),
('le', 'le', 'the', 'the', cats['D']),
('la', 'la', 'the', 'the', cats['D']),
('s', 's', 'PL', 'plural', cats['PHI']),
('ait', 'ait', '3SG.IMPV', 'third person singular imperfective', cats['AGR']),
('ait', 'ait', '3IMP', 'third person imparfait', cats['Agr']),
('aient', 'aient', '3PL.IMPV', 'third person plural imperfective', cats['AGR']),
('Les chats nageaient.', 'le-s chat-s nage-aient', 'the-PL cat-PL swim-3PL.IMPV',
'The cats were swimming.', cats['S']),
('La tortue parlait', 'la tortue parle-ait', 'the turtle speak-3SG.IMPV',
'The turtle was speaking.', cats['S']),
('La tortue tombait', 'la tortue tombe-ait', 'the turtle fall-3SG.IMPV',
'The turtle was falling.', cats['S']),
('Le chien parlait', 'le chien parle-ait', 'the dog speak-3IMP',
'The dog was speaking.', cats['S'])
)
for tuple_ in dataset:
self.create_form(*map(unicode, tuple_))
# Create a form search model that returns lexical items (will be used to create the lexicon corpus)
query = {'filter': ['Form', 'syntactic_category', 'name', 'in', [u'N', u'V', u'AGR', u'PHI', u'D', u'Agr']]}
params = self.form_search_create_params.copy()
params.update({
'name': u'Find morphemes',
'search': query
})
params = json.dumps(params)
response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)
lexicon_form_search_id = json.loads(response.body)['id']
# Create the lexicon corpus
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of lexical items',
'form_search': lexicon_form_search_id
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
lexicon_corpus_id = json.loads(response.body)['id']
# Create a form search model that returns sentences (will be used to create the rules corpus)
query = {'filter': ['Form', 'syntactic_category', 'name', '=', u'S']}
params = self.form_search_create_params.copy()
params.update({
'name': u'Find sentences',
'description': u'Returns all sentential forms',
'search': query
})
params = json.dumps(params)
response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)
rules_form_search_id = json.loads(response.body)['id']
# Create the rules corpus
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of sentences',
'form_search': rules_form_search_id
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
rules_corpus_id = json.loads(response.body)['id']
# Create a morphology using the lexicon and rules corpora
name = u'Morphology of a very small subset of french'
morphology_params = self.morphology_create_params.copy()
morphology_params.update({
'name': name,
'lexicon_corpus': lexicon_corpus_id,
'rules_corpus': rules_corpus_id,
'script_type': 'regex'
})
morphology_params = json.dumps(morphology_params)
response = self.app.post(url('morphologies'), morphology_params,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morphology_id = resp['id']
assert resp['name'] == name
assert resp['script_type'] == u'regex'
# If foma is not installed, make sure the error message is being returned
# and exit the test.
if not h.foma_installed(force_check=True):
response = self.app.put(url(controller='morphologies', action='generate_and_compile',
id=morphology_id), headers=self.json_headers,
extra_environ=self.extra_environ_contrib, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'Foma and flookup are not installed.'
return
# Compile the morphology's script
response = self.app.put(url(controller='morphologies', action='generate_and_compile',
id=morphology_id), headers=self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
compile_attempt = resp['compile_attempt']
# Poll ``GET /morphologies/morphology_id`` until ``compile_attempt`` has changed.
requester = lambda: self.app.get(url('morphology', id=morphology_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', compile_attempt, log, wait=1,
vocal=True, task_descr='compile morphology %s' % morphology_id)
assert resp['compile_message'] == \
u'Compilation process terminated successfully and new binary file was written.'
response = self.app.get(url('morphology', id=morphology_id), params={'script': u'1', 'lexicon': u'1'},
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
morphology_dir = os.path.join(self.morphologies_path, 'morphology_%d' % morphology_id)
morphology_binary_filename = 'morphology.foma'
morphology_dir_contents = os.listdir(morphology_dir)
morphology_script_path = os.path.join(morphology_dir, 'morphology.script')
morphology_script = codecs.open(morphology_script_path, mode='r', encoding='utf8').read()
assert u'define morphology' in morphology_script
assert u'(NCat)' in morphology_script # cf. tortue
assert u'(DCat)' in morphology_script # cf. la
assert u'(NCat "-" PHICat)' in morphology_script # cf. chien-s
assert u'(DCat "-" PHICat)' in morphology_script # cf. le-s
assert u'(VCat "-" AGRCat)' in morphology_script # cf. nage-aient, parle-ait
assert u'c h a t "%scat%sN":0' % (h.rare_delimiter, h.rare_delimiter) in morphology_script # cf. extract_morphemes_from_rules_corpus = False and chat's exclusion from the lexicon corpus
assert u'c h i e n "%sdog%sN":0' % (h.rare_delimiter, h.rare_delimiter) in morphology_script
assert u'b e \u0301 c a s s e "%swoodcock%sN":0' % (h.rare_delimiter, h.rare_delimiter) in morphology_script
assert resp['compile_succeeded'] == True
assert resp['compile_message'] == u'Compilation process terminated successfully and new binary file was written.'
assert morphology_binary_filename in morphology_dir_contents
assert resp['modifier']['role'] == u'contributor'
rules = resp['rules_generated']
assert u'D' in rules # cf. le
assert u'N' in rules # cf. tortue
assert u'D-PHI' in rules # cf. le-s
assert u'N-PHI' in rules # cf. chien-s
assert u'V-AGR' in rules # cf. nage-aient, parle-ait
assert 'lexicon' in resp
assert 'script' in resp
assert resp['script'] == morphology_script
assert [u'chat', u'cat'] in resp['lexicon']['N']
assert [u'chien', u'dog'] in resp['lexicon']['N']
# Test GET /morphologies/1?script=1&lexicon=1 and make sure the script and lexicon are returned
response = self.app.get(url('morphology', id=morphology_id), params={'script': u'1', 'lexicon': u'1'},
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
assert resp['script'] == morphology_script
lexicon = resp['lexicon']
assert ['s', 'PL'] in lexicon['PHI']
assert ['oiseau', 'bird'] in lexicon['N']
assert ['aient', '3PL.IMPV'] in lexicon['AGR']
assert ['la', 'the'] in lexicon['D']
assert ['nage', 'swim'] in lexicon['V']
################################################################################
# BEGIN IMPOVERISHED REPRESENTATION MORPHOLOGY
################################################################################
# Create a new morphology, this time one that parses to impoverished representations.
impoverished_name = u'Morphology of a very small subset of french, impoverished morphemes'
morphology_params = self.morphology_create_params.copy()
morphology_params.update({
'name': impoverished_name,
'lexicon_corpus': lexicon_corpus_id,
'rules_corpus': rules_corpus_id,
'script_type': 'regex',
'rich_upper': False
})
morphology_params = json.dumps(morphology_params)
response = self.app.post(url('morphologies'), morphology_params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
impoverished_morphology_id = resp['id']
assert resp['name'] == impoverished_name
assert resp['script_type'] == u'regex'
# Compile the morphology's script
response = self.app.put(url(controller='morphologies', action='generate_and_compile',
id=impoverished_morphology_id), headers=self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
compile_attempt = resp['compile_attempt']
# Poll ``GET /morphologies/morphology_id`` until ``compile_attempt`` has changed.
requester = lambda: self.app.get(url('morphology', id=impoverished_morphology_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', compile_attempt, log, wait=1,
vocal=True, task_descr='compile morphology %s' % impoverished_morphology_id)
assert resp['compile_message'] == \
u'Compilation process terminated successfully and new binary file was written.'
response = self.app.get(url('morphology', id=impoverished_morphology_id), params={'script': u'1', 'lexicon': u'1'},
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
morphology_dir = os.path.join(self.morphologies_path, 'morphology_%d' % impoverished_morphology_id)
morphology_binary_filename = 'morphology.foma'
morphology_dir_contents = os.listdir(morphology_dir)
morphology_script_path = os.path.join(morphology_dir, 'morphology.script')
morphology_script = codecs.open(morphology_script_path, mode='r', encoding='utf8').read()
assert u'define morphology' in morphology_script
assert u'(NCat)' in morphology_script # cf. tortue
assert u'(DCat)' in morphology_script # cf. la
assert u'(NCat "-" PHICat)' in morphology_script # cf. chien-s
assert u'(DCat "-" PHICat)' in morphology_script # cf. le-s
assert u'(VCat "-" AGRCat)' in morphology_script # cf. nage-aient, parle-ait
assert u'c h a t' in morphology_script # cf. extract_morphemes_from_rules_corpus = False and chat's exclusion from the lexicon corpus
assert u'c h i e n' in morphology_script
assert u'b e \u0301 c a s s e' in morphology_script
assert resp['compile_succeeded'] == True
assert resp['compile_message'] == u'Compilation process terminated successfully and new binary file was written.'
assert morphology_binary_filename in morphology_dir_contents
assert resp['modifier']['role'] == u'contributor'
rules = resp['rules_generated']
assert u'D' in rules # cf. le
assert u'N' in rules # cf. tortue
assert u'D-PHI' in rules # cf. le-s
assert u'N-PHI' in rules # cf. chien-s
assert u'V-AGR' in rules # cf. nage-aient, parle-ait
assert 'lexicon' in resp
assert 'script' in resp
assert resp['script'] == morphology_script
assert [u'chat', u'cat'] in resp['lexicon']['N']
assert [u'chien', u'dog'] in resp['lexicon']['N']
# Test GET /morphologies/1?script=1&lexicon=1 and make sure the script and lexicon are returned
response = self.app.get(url('morphology', id=impoverished_morphology_id), params={'script': u'1', 'lexicon': u'1'},
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
assert resp['script'] == morphology_script
lexicon = resp['lexicon']
assert ['s', 'PL'] in lexicon['PHI']
assert ['oiseau', 'bird'] in lexicon['N']
assert ['aient', '3PL.IMPV'] in lexicon['AGR']
assert ['la', 'the'] in lexicon['D']
assert ['nage', 'swim'] in lexicon['V']
################################################################################
# END IMPOVERISHED REPRESENTATION MORPHOLOGY
################################################################################
# Create a very simple French phonology
script = u'''
define eDrop e -> 0 || _ "-" a;
define breakDrop "-" -> 0;
define phonology eDrop .o. breakDrop;
'''
params = self.phonology_create_params.copy()
params.update({
'name': u'Phonology',
'description': u'Covers a lot of the data.',
'script': script
})
params = json.dumps(params)
response = self.app.post(url('phonologies'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
phonology_id = resp['id']
################################################################################
# LANGUAGE MODEL 1
################################################################################
# Create a corpus heavily stacked towards tombe|fall-ait|3SG.IMPV and V-AGR
# as opposed to tombe|fall-ait|3IMP and V-Agr.
sentences = Session.query(model.Form).filter(model.Form.syntactic_category.has(
model.SyntacticCategory.name==u'S')).all()
target_id = [s for s in sentences if s.transcription == u'La tortue tombait'][0].id
sentence_ids = [s.id for s in sentences] + [target_id] * 100
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of sentences with lots of form %s' % target_id,
'content': u','.join(map(unicode, sentence_ids))
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
lm_corpus_id = json.loads(response.body)['id']
# Create the LM using lm_corpus
name = u'Morpheme language model'
params = self.morpheme_language_model_create_params.copy()
params.update({
'name': name,
'corpus': lm_corpus_id,
'toolkit': 'mitlm'
})
params = json.dumps(params)
response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morpheme_language_model_id = resp['id']
assert resp['name'] == name
assert resp['toolkit'] == u'mitlm'
assert resp['order'] == 3
assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM
# Generate the files of the language model
response = self.app.put(url(controller='morphemelanguagemodels', action='generate', id=morpheme_language_model_id),
{}, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
lm_generate_attempt = resp['generate_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphemelanguagemodel', id=morpheme_language_model_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)
assert resp['generate_message'] == u'Language model successfully generated.'
################################################################################
# LANGUAGE MODEL 2 -- CATEGORIAL
################################################################################
categorial_lm_name = u'Morpheme language model -- categorial'
params = self.morpheme_language_model_create_params.copy()
params.update({
'name': categorial_lm_name,
'corpus': lm_corpus_id,
'toolkit': 'mitlm',
'categorial': True
})
params = json.dumps(params)
response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
categorial_language_model_id = resp['id']
assert resp['name'] == categorial_lm_name
assert resp['toolkit'] == u'mitlm'
assert resp['order'] == 3
assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM
assert resp['categorial'] == True
# Generate the files of the language model
response = self.app.put(url(controller='morphemelanguagemodels', action='generate',
id=categorial_language_model_id),
{}, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
lm_generate_attempt = resp['generate_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphemelanguagemodel', id=categorial_language_model_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)
assert resp['generate_message'] == u'Language model successfully generated.'
################################################################################
# TRANSCRIPTIONS & PARSES
################################################################################
transcription1 = u'tombait'
transcription1_correct_parse = u'%s-%s' % (
h.rare_delimiter.join([u'tombe', u'fall', u'V']),
h.rare_delimiter.join([u'ait', u'3SG.IMPV', u'AGR']))
transcription1_alt_parse = u'%s-%s' % (
h.rare_delimiter.join([u'tombe', u'fall', u'V']),
h.rare_delimiter.join([u'ait', u'3IMP', u'Agr']))
transcription1_impoverished_parse = u'tombe-ait'
transcription2 = u'tombeait'
transcription3 = u'chiens'
transcription3_correct_parse = u'%s-%s' % (
h.rare_delimiter.join([u'chien', u'dog', u'N']),
h.rare_delimiter.join([u's', u'PL', u'PHI']))
transcription3_impoverished_parse = u'chiens-s'
################################################################################
# MORPHOLOGICAL PARSER 1
################################################################################
# Create a morphological parser for toy french
params = self.morphological_parser_create_params.copy()
params.update({
'name': u'Morphological parser for toy French',
'phonology': phonology_id,
'morphology': morphology_id,
'language_model': morpheme_language_model_id
})
params = json.dumps(params)
response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_id = resp['id']
# Generate the parser's morphophonology FST and compile it.
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.
while True:
response = self.app.get(url('morphologicalparser', id=morphological_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
if morphological_parser_compile_attempt != resp['compile_attempt']:
log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)
break
else:
log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)
sleep(1)
# Test applyup on the mophological parser's morphophonology FST
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='applyup',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1_correct_parse in resp[transcription1]
assert len(resp[transcription1]) == 2
assert resp[transcription2] == []
# Test how well the morphological parser parses some test words.
# In-memory cache will result in the second request to parse transcription 1
# being accomplished via dict lookup. Parses for both transcriptions 1 and 2
# will be persisted across requests in the ``parse`` table.
params = json.dumps({'transcriptions': [transcription1, transcription1, transcription3]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_correct_parse
assert resp[transcription3] == transcription3_correct_parse
# Make the same parse request again. This time the persistent cache will be used
# and all of the parses returned will be from the cache, i.e., no subprocesses to
# flookup will be initiated.
params = json.dumps({'transcriptions': [transcription1, transcription1, transcription3, 'abc']})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_correct_parse
assert resp[transcription3] == transcription3_correct_parse
assert resp['abc'] == None
################################################################################
# END MORPHOLOGICAL PARSER 1
################################################################################
################################################################################
# MORPHOLOGICAL PARSER 2
################################################################################
# Create an impoverished morphemes morphological parser for toy french
params = self.morphological_parser_create_params.copy()
params.update({
'name': u'Morphological parser for toy French, impoverished morphemes',
'phonology': phonology_id,
'morphology': impoverished_morphology_id,
'language_model': morpheme_language_model_id
})
params = json.dumps(params)
response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_id = resp['id']
# Generate the parser's morphophonology FST and compile it.
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.
while True:
response = self.app.get(url('morphologicalparser', id=morphological_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
if morphological_parser_compile_attempt != resp['compile_attempt']:
log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)
break
else:
log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)
sleep(1)
# Test applyup on the mophological parser's morphophonology FST
# Because the morphology returns impoverished representations, the morphophonology_
# will too.
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='applyup',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1_impoverished_parse in resp[transcription1]
assert len(resp[transcription1]) == 1
assert resp[transcription2] == []
# Test applydown on the mophological parser's morphophonology FST
params = json.dumps({'morpheme_sequences': [transcription1_impoverished_parse]})
response = self.app.put(url(controller='morphologicalparsers', action='applydown',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1 in resp[transcription1_impoverished_parse]
# Test how well the morphological parser parses some test words.
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
# Note how the rich representation is always returned by a parser even if its morphophonology
# returns impoverished ones. The ``parse`` action disambiguates the morphemic analysis received
# from the morphophonology before selecting the most probable candidate.
assert resp[transcription1] == transcription1_correct_parse
################################################################################
# END MORPHOLOGICAL PARSER 2
################################################################################
################################################################################
# MORPHOLOGICAL PARSER 3 -- categorial LM
################################################################################
# Create categorial LM morphological parser for toy french
params = self.morphological_parser_create_params.copy()
params.update({
'name': u'Morphological parser for toy French, categorial LM',
'phonology': phonology_id,
'morphology': morphology_id,
'language_model': categorial_language_model_id
})
params = json.dumps(params)
response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_id = resp['id']
# Generate the parser's morphophonology FST and compile it.
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.
while True:
response = self.app.get(url('morphologicalparser', id=morphological_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
if morphological_parser_compile_attempt != resp['compile_attempt']:
log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)
break
else:
log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)
sleep(1)
# Test applyup on the mophological parser's morphophonology FST. Everything should
# work just like parser #1.
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='applyup',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1_correct_parse in resp[transcription1]
assert len(resp[transcription1]) == 2
assert resp[transcription2] == []
# Test applydown on the mophological parser's morphophonology FST
params = json.dumps({'morpheme_sequences': [transcription1_correct_parse]})
response = self.app.put(url(controller='morphologicalparsers', action='applydown',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1 in resp[transcription1_correct_parse]
# Test how well the morphological parser parses some test words.
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
# There is only one possible parse for transcription 1 -- it is de facto the most probable
assert resp[transcription1] == transcription1_correct_parse
################################################################################
# END MORPHOLOGICAL PARSER 3
################################################################################
################################################################################
# MORPHOLOGICAL PARSER 4 -- categorial LM & impoverished morphology
################################################################################
# Create categorial LM, impoverished morphology morphological parser for toy french
params = self.morphological_parser_create_params.copy()
params.update({
'name': u'Morphological parser for toy French, categorial LM, impoverished morphology',
'phonology': phonology_id,
'morphology': impoverished_morphology_id,
'language_model': categorial_language_model_id
})
params = json.dumps(params)
response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_id = parser_4_id = resp['id']
# Generate the parser's morphophonology FST and compile it.
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.
while True:
response = self.app.get(url('morphologicalparser', id=morphological_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
if morphological_parser_compile_attempt != resp['compile_attempt']:
log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)
break
else:
log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)
sleep(1)
# Test applyup on the mophological parser's morphophonology FST. Expect to get morpheme
# form sequences.
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='applyup',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1_impoverished_parse in resp[transcription1]
assert len(resp[transcription1]) == 1
assert resp[transcription2] == []
# Test applydown on the mophological parser's morphophonology FST
params = json.dumps({'morpheme_sequences': [transcription1_impoverished_parse]})
response = self.app.put(url(controller='morphologicalparsers', action='applydown',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert transcription1 in resp[transcription1_impoverished_parse]
# Test how well the morphological parser parses some test words.
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
# parse time disambiguation and categorial LM application should all conspire to return the correct parse...
assert resp[transcription1] == transcription1_correct_parse
################################################################################
# END MORPHOLOGICAL PARSER 4
################################################################################
################################################################################
# TEST PARSER DEPENDENCY REPLICATION
################################################################################
# Vacuously re-generate and re-compile the parser
################################################################################
# Show that the cache will not be cleared.
parser_4_parses = sorted([parse.transcription for parse in Session.query(model.Parse).\
filter(model.Parse.parser_id==parser_4_id).all()])
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=parser_4_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll GET /morphologicalparsers/id until compile_attempt changes.
requester = lambda: self.app.get(url('morphologicalparser', id=parser_4_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt,
log, wait=1, vocal=True, task_descr='compile parser %s' % parser_4_id)
# Perform the same parse request as previously and expect the same results.
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=parser_4_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_correct_parse
parser_4_parses_now = sorted([parse.transcription for parse in Session.query(model.Parse).\
filter(model.Parse.parser_id==parser_4_id).all()])
assert parser_4_parses == parser_4_parses_now
# Update the parser's LM
################################################################################
# The parsing behaviour of the parser will not change because it has not been
# re-generated or re-compiled.
# For the updated LM, create a new corpus heavily stacked towards V-Agr.
sentences = Session.query(model.Form).filter(model.Form.syntactic_category.has(
model.SyntacticCategory.name==u'S')).all()
# The sentence below is analyzed using an Agr-categorized suffix
target_id = [s for s in sentences if s.transcription == u'Le chien parlait'][0].id
sentence_ids = [s.id for s in sentences] + [target_id] * 100
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of sentences with lots of form %s' % target_id,
'content': u','.join(map(unicode, sentence_ids))
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
lm_corpus_2_id = json.loads(response.body)['id']
# update the categorial LM so that its corpus is the newly created one.
params = self.morpheme_language_model_create_params.copy()
params.update({
'name': categorial_lm_name,
'corpus': lm_corpus_2_id, # HERE IS THE CHANGE
'toolkit': 'mitlm',
'categorial': True
})
params = json.dumps(params)
response = self.app.put(url('morphemelanguagemodel', id=categorial_language_model_id),
params, self.json_headers, self.extra_environ_admin)
# Request that the files of the language model be generated anew; this will create a new
# LMTree pickle file.
response = self.app.put(url(controller='morphemelanguagemodels', action='generate',
id=categorial_language_model_id),
{}, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
lm_generate_attempt = resp['generate_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphemelanguagemodel', id=categorial_language_model_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)
assert resp['generate_message'] == u'Language model successfully generated.'
# Now if we try to parse "tombait" using parser #4 we will still get the V-AGR parse
# even though the LM associated to that parser (the categorial one) has been changed to be
# weighted heavily towards the V-Agr parse.
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=parser_4_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_correct_parse
# Request probabilities form the just re-generated LM and expect V-Agr to be higher.
likely_word = u'V Agr'
unlikely_word = u'V AGR'
ms_params = json.dumps({'morpheme_sequences': [likely_word, unlikely_word]})
response = self.app.put(url(controller='morphemelanguagemodels', action='get_probabilities',
id=categorial_language_model_id), ms_params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
likely_word_log_prob = resp[likely_word]
unlikely_word_log_prob = resp[unlikely_word]
assert likely_word_log_prob > unlikely_word_log_prob
# Re-generate and re-compile the parser
################################################################################
# Expect it to now parse tombait as tombe-ait fall-3IMP V-Agr
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=parser_4_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphologicalparser', id=parser_4_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt,
log, wait=1, vocal=True, task_descr='compile parser %s' % parser_4_id)
# Perform the same parse request as above and expect different results.
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=parser_4_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] != transcription1_correct_parse
assert resp[transcription1] == transcription1_alt_parse
# Delete the parser's LM
################################################################################
# Expect it to still work as previously.
response = self.app.delete(url('morphemelanguagemodel', id=categorial_language_model_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
params = json.dumps({'transcriptions': [transcription1]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_alt_parse
# If we re-generate and re-compile, the compile will succeed (since it requires only a
# phonology and a morphology) while the generate attempt will fail because there
# will be no LM object to copy attribute values and file objects from.
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=parser_4_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphologicalparser', id=parser_4_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt,
log, wait=1, vocal=True, task_descr='compile parser %s' % parser_4_id)
assert resp['compile_succeeded'] == True
assert resp['generate_succeeded'] == False
# Test GET /morphologicalparsers
################################################################################
morphological_parsers = Session.query(MorphologicalParser).all()
# Get all morphological parsers
response = self.app.get(url('morphologicalparsers'), headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert len(resp) == 4
# Test the paginator GET params.
paginator = {'items_per_page': 1, 'page': 1}
response = self.app.get(url('morphologicalparsers'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert len(resp['items']) == 1
assert resp['items'][0]['name'] == morphological_parsers[0].name
assert response.content_type == 'application/json'
# Test the order_by GET params.
order_by_params = {'order_by_model': 'MorphologicalParser', 'order_by_attribute': 'id',
'order_by_direction': 'desc'}
response = self.app.get(url('morphologicalparsers'), order_by_params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp[0]['id'] == morphological_parsers[-1].id
assert response.content_type == 'application/json'
# Test the order_by *with* paginator.
params = {'order_by_model': 'MorphologicalParser', 'order_by_attribute': 'id',
'order_by_direction': 'desc', 'items_per_page': 1, 'page': 4}
response = self.app.get(url('morphologicalparsers'), params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert morphological_parsers[0].name == resp['items'][0]['name']
# Expect a 400 error when the order_by_direction param is invalid
order_by_params = {'order_by_model': 'MorphologicalParser', 'order_by_attribute': 'name',
'order_by_direction': 'descending'}
response = self.app.get(url('morphologicalparsers'), order_by_params, status=400,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp['errors']['order_by_direction'] == u"Value must be one of: asc; desc (not u'descending')"
assert response.content_type == 'application/json'
# Test that GET /morphologicalparsers/<id> works correctly.
# Try to get a morphological parser using an invalid id
id = 100000000000
response = self.app.get(url('morphologicalparser', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin, status=404)
resp = json.loads(response.body)
assert u'There is no morphological parser with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('morphologicalparser', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id
response = self.app.get(url('morphologicalparser', id=morphological_parsers[0].id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['name'] == morphological_parsers[0].name
assert resp['description'] == morphological_parsers[0].description
assert response.content_type == 'application/json'
# Tests that GET /morphologicalparsers/new and GET /morphologicalparsers/id/edit return
# the data needed to create or update a morphological parser.
# Test GET /morphologicalparsers/new
response = self.app.get(url('new_morphologicalparser'), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp['phonologies']) == 1
assert len(resp['morphologies']) == 2
assert len(resp['morpheme_language_models']) == 1
# Not logged in: expect 401 Unauthorized
response = self.app.get(url('edit_morphologicalparser', id=morphological_parsers[0].id), status=401)
resp = json.loads(response.body)
assert resp['error'] == u'Authentication is required to access this resource.'
assert response.content_type == 'application/json'
# Invalid id
id = 9876544
response = self.app.get(url('edit_morphologicalparser', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no morphological parser with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('edit_morphologicalparser', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id
response = self.app.get(url('edit_morphologicalparser', id=morphological_parsers[0].id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['morphological_parser']['name'] == morphological_parsers[0].name
assert len(resp['data']['phonologies']) == 1
assert len(resp['data']['morphologies']) == 2
assert len(resp['data']['morpheme_language_models']) == 1
assert response.content_type == 'application/json'
# Tests that PUT /morphologicalparsers/id updates the morphological parser with id=id.
foma_installed = h.foma_installed(force_check=True)
morphological_parsers = [json.loads(json.dumps(m, cls=h.JSONOLDEncoder))
for m in Session.query(MorphologicalParser).all()]
morphological_parser_1_id = morphological_parsers[0]['id']
morphological_parser_1_name = morphological_parsers[0]['name']
morphological_parser_1_description = morphological_parsers[0]['description']
morphological_parser_1_modified = morphological_parsers[0]['datetime_modified']
morphological_parser_1_phonology_id = morphological_parsers[0]['phonology']['id']
morphological_parser_1_morphology_id = morphological_parsers[0]['morphology']['id']
morphological_parser_1_lm_id = morphological_parsers[0]['language_model']['id']
morphological_parser_count = len(morphological_parsers)
morphological_parser_1_dir = os.path.join(self.morphological_parsers_path,
'morphological_parser_%d' % morphological_parser_1_id)
morphological_parser_1_morphophonology_path = os.path.join(
morphological_parser_1_dir, 'morphophonology.script')
if foma_installed:
morphology_1_morphophonology = codecs.open(morphological_parser_1_morphophonology_path,
mode='r', encoding='utf8').read()
# Update the first morphological parser.
original_backup_count = Session.query(MorphologicalParserBackup).count()
params = self.morphology_create_params.copy()
params.update({
'name': morphological_parser_1_name,
'description': u'New description',
'phonology': morphological_parser_1_phonology_id,
'morphology': morphological_parser_1_morphology_id,
'language_model': morphological_parser_1_lm_id
})
params = json.dumps(params)
response = self.app.put(url('morphologicalparser', id=morphological_parser_1_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
new_backup_count = Session.query(MorphologicalParserBackup).count()
datetime_modified = resp['datetime_modified']
new_morphological_parser_count = Session.query(MorphologicalParser).count()
assert morphological_parser_count == new_morphological_parser_count
assert datetime_modified != morphological_parser_1_modified
assert resp['description'] == u'New description'
assert response.content_type == 'application/json'
assert original_backup_count + 1 == new_backup_count
backup = Session.query(MorphologicalParserBackup).filter(
MorphologicalParserBackup.UUID==unicode(
resp['UUID'])).order_by(
desc(MorphologicalParserBackup.id)).first()
assert backup.datetime_modified.isoformat() == morphological_parser_1_modified
assert backup.description == morphological_parser_1_description
assert response.content_type == 'application/json'
# Attempt an update with no new input and expect to fail
response = self.app.put(url('morphologicalparser', id=morphological_parser_1_id), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
morphological_parser_count = new_morphological_parser_count
new_morphological_parser_count = Session.query(MorphologicalParser).count()
our_morphological_parser_datetime_modified = Session.query(
MorphologicalParser).get(morphological_parser_1_id).datetime_modified
assert our_morphological_parser_datetime_modified.isoformat() == datetime_modified
assert morphological_parser_count == new_morphological_parser_count
assert resp['error'] == u'The update request failed because the submitted data were not new.'
assert response.content_type == 'application/json'
# Update the first morphological parser again.
original_backup_count = new_backup_count
params = self.morphology_create_params.copy()
params.update({
'name': morphological_parser_1_name,
'description': u'Newer description',
'phonology': morphological_parser_1_phonology_id,
'morphology': morphological_parser_1_morphology_id,
'language_model': morphological_parser_1_lm_id
})
params = json.dumps(params)
response = self.app.put(url('morphologicalparser', id=morphological_parser_1_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
new_backup_count = Session.query(MorphologicalParserBackup).count()
datetime_modified = resp['datetime_modified']
morphological_parser_count = new_morphological_parser_count
new_morphological_parser_count = Session.query(MorphologicalParser).count()
assert morphological_parser_count == new_morphological_parser_count
assert resp['description'] == u'Newer description'
assert response.content_type == 'application/json'
assert original_backup_count + 1 == new_backup_count
backup = Session.query(MorphologicalParserBackup).filter(
MorphologicalParserBackup.UUID==unicode(
resp['UUID'])).order_by(
desc(MorphologicalParserBackup.id)).first()
assert backup.datetime_modified.isoformat() == our_morphological_parser_datetime_modified.isoformat()
assert backup.description == u'New description'
assert response.content_type == 'application/json'
# Tests that GET /morphologicalparsers//id/history returns the morphological parser with id=id and its previous incarnations.
morphological_parser_1_backup_count = Session.query(MorphologicalParserBackup).count() # there should only be backups of parser #1
morphological_parsers = Session.query(MorphologicalParser).all()
morphological_parser_1_id = morphological_parsers[0].id
morphological_parser_1_UUID = morphological_parsers[0].UUID
# Now get the history of the first morphological parser (which was updated twice in ``test_update``.
response = self.app.get(
url(controller='morphologicalparsers', action='history', id=morphological_parser_1_id),
headers=self.json_headers, extra_environ=self.extra_environ_view_appset)
resp = json.loads(response.body)
assert response.content_type == 'application/json'
assert 'morphological_parser' in resp
assert 'previous_versions' in resp
assert len(resp['previous_versions']) == morphological_parser_1_backup_count
# Get the same history as above, except use the UUID
response = self.app.get(
url(controller='morphologicalparsers', action='history', id=morphological_parser_1_UUID),
headers=self.json_headers, extra_environ=self.extra_environ_view_appset)
resp = json.loads(response.body)
assert response.content_type == 'application/json'
assert 'morphological_parser' in resp
assert 'previous_versions' in resp
assert len(resp['previous_versions']) == morphological_parser_1_backup_count
# Attempt to get the history with an invalid id and expect to fail
response = self.app.get(
url(controller='morphologicalparsers', action='history', id=123456789),
headers=self.json_headers, extra_environ=self.extra_environ_view_appset, status=404)
resp = json.loads(response.body)
assert response.content_type == 'application/json'
assert resp['error'] == u'No morphological parsers or morphological parser backups match 123456789'
# Test servecompiled
response = self.app.get(url(controller='morphologicalparsers', action='servecompiled',
id=morphological_parser_1_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
binary_path = os.path.join(morphological_parser_1_dir, 'morphophonology.foma')
binary_file = open(binary_path, 'rb').read()
binary_file_from_resp = response.body
assert binary_file == binary_file_from_resp
# Test export
response = self.app.get(url(controller='morphologicalparsers', action='export',
id=morphological_parser_1_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert response.content_type == 'application/zip'
# To ensure the exported parser works, unzip it and test it out: ./parse.py chiens chats
parser_1_cache = sorted([p.transcription for p in Session.query(model.Parse).\
filter(model.Parse.parser_id==morphological_parser_1_id).all()])
assert parser_1_cache == [u'abc', u'chiens', u'tombait']
# Test morphological parser deletion.
assert u'morphophonology.script' in os.listdir(morphological_parser_1_dir)
assert u'morphophonology.foma' in os.listdir(morphological_parser_1_dir)
response = self.app.delete(url('morphologicalparser', id=morphological_parser_1_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert not os.path.exists(morphological_parser_1_dir)
assert resp['description'] == u'Newer description'
assert resp['phonology']['id'] == morphological_parser_1_phonology_id
@nottest
def test_i_large_datasets(self):
"""Tests that morphological parser functionality works with large datasets.
.. note::
This test only works if MySQL is being used as the RDBMS for the test
*and* there is a file in
``onlinelinguisticdatabase/onlinelinguisticdatabase/tests/data/datasets/``
that is a MySQL dump file of a valid OLD database. The name of this file
can be configured by setting the ``old_dump_file`` variable. Note that no
such dump file is provided with the OLD source since the file used by the
developer contains data that cannot be publicly shared.
"""
# If foma is not installed, exit.
if not h.foma_installed(force_check=True):
return
# Configuration
# The ``old_dump_file`` variable holds the name of a MySQL dump file in /tests/data/datasets
# that will be used to populate the database.
old_dump_file = 'blaold.sql'
backup_dump_file = 'old_test_dump.sql'
# The ``precompiled_morphophonology`` variable holds the name of a compiled foma FST that
# maps surface representations to sequences of morphemes. A file with this name should be
# present in /tests/data/morphophonologies or else the variable should be set to None.
pregenerated_morphophonology = None # 'blaold_morphophonology.script'
precompiled_morphophonology = None # 'blaold_morphophonology.foma'
# Here we load a whole database from the mysqpl dump file specified in ``tests/data/datasets/<old_dump_file>``.
old_dump_file_path = os.path.join(self.test_datasets_path, old_dump_file)
backup_dump_file_path = os.path.join(self.test_datasets_path, backup_dump_file)
tmp_script_path = os.path.join(self.test_datasets_path, 'tmp.sh')
if not os.path.isfile(old_dump_file_path):
return
config = h.get_config(config_filename='test.ini')
SQLAlchemyURL = config['sqlalchemy.url']
if not SQLAlchemyURL.split(':')[0] == 'mysql':
return
rdbms, username, password, db_name = SQLAlchemyURL.split(':')
username = username[2:]
password = password.split('@')[0]
db_name = db_name.split('/')[-1]
# First dump the existing database so we can load it later.
# Note: the --single-transaction option seems to be required (on Mac MySQL 5.6 using InnoDB tables ...)
# see http://forums.mysql.com/read.php?10,108835,112951#msg-112951
with open(tmp_script_path, 'w') as tmpscript:
tmpscript.write('#!/bin/sh\nmysqldump -u %s -p%s --single-transaction --no-create-info --result-file=%s %s' % (
username, password, backup_dump_file_path, db_name))
os.chmod(tmp_script_path, 0744)
with open(os.devnull, "w") as fnull:
call([tmp_script_path], stdout=fnull, stderr=fnull)
# Now load the dump file of the large database (from old_dump_file)
with open(tmp_script_path, 'w') as tmpscript:
tmpscript.write('#!/bin/sh\nmysql -u %s -p%s %s < %s' % (username, password, db_name, old_dump_file_path))
with open(os.devnull, "w") as fnull:
call([tmp_script_path], stdout=fnull, stderr=fnull)
# Recreate the default users that the loaded dump file deleted
administrator = h.generate_default_administrator()
contributor = h.generate_default_contributor()
viewer = h.generate_default_viewer()
Session.add_all([administrator, contributor, viewer])
Session.commit()
################################################################################
# PHONOLOGY
################################################################################
# Create a Blackfoot phonology with the test phonology script
params = self.phonology_create_params.copy()
params.update({
'name': u'Blackfoot Phonology',
'description': u'The phonological rules of Frantz (1997) as FSTs',
'script': self.blackfoot_phonology_script
})
params = json.dumps(params)
response = self.app.post(url('phonologies'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
phonology_id = resp['id']
"""
################################################################################
# MORPHOLOGY
################################################################################
# Create a lexicon form search and corpus
# The code below constructs a query that finds a (large) subset of the Blackfoot morphemes.
# Notes for future morphology creators:
# 1. the "oth" category is a mess: detangle the nominalizer, inchoative, transitive suffixes, etc. from
# one another and from the numerals and temporal modifiers -- ugh!
# 2. the "pro" category" is also a mess: clearly pronoun-forming iisto does not have the same distribution
# as the verbal suffixes aiksi and aistsi! And oht, the LING/means thing, is different again...
# 3. hkayi, that thing at the end of demonstratives, is not agra, what is it? ...
# 4. the dim category contains only 'sst' 'DIM' and is not used in any forms ...
lexical_category_names = ['nan', 'nin', 'nar', 'nir', 'vai', 'vii', 'vta', 'vti', 'vrt', 'adt',
'drt', 'prev', 'med', 'fin', 'oth', 'o', 'und', 'pro', 'asp', 'ten', 'mod', 'agra', 'agrb', 'thm', 'whq',
'num', 'stp', 'PN']
durative_morpheme = 15717
hkayi_morpheme = 23429
query = {'filter': ['and', [['Form', 'syntactic_category', 'name', 'in', lexical_category_names],
['not', ['Form', 'morpheme_break', 'regex', '[ -]']],
['not', ['Form', 'id', 'in', [durative_morpheme, hkayi_morpheme]]],
['not', ['Form', 'grammaticality', '=', '*']]
]]}
smaller_query_for_rapid_testing = {'filter': ['and', [['Form', 'id', '<', 1000],
['Form', 'syntactic_category', 'name', 'in', lexical_category_names]]]}
params = self.form_search_create_params.copy()
params.update({
'name': u'Blackfoot morphemes',
'search': query
})
params = json.dumps(params)
response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)
lexicon_form_search_id = json.loads(response.body)['id']
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of Blackfoot morphemes',
'form_search': lexicon_form_search_id
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
lexicon_corpus_id = json.loads(response.body)['id']
# Create a rules corpus
# Create a corpus of forms containing words -- to be used to estimate ngram probabilities
# The goal here is to exclude things that look like words but are not really words, i.e.,
# morphemes; as a heuristic we search for grammatical forms categorized as 'sent' or whose
# transcription value contains a space or a dash.
query = {'filter': ['and', [['or', [['Form', 'syntactic_category', 'name', '=', u'sent'],
['Form', 'morpheme_break', 'like', '% %'],
['Form', 'morpheme_break', 'like', '%-%']]],
['Form', 'syntactic_category_string', '!=', None],
['Form', 'grammaticality', '=', '']]]}
params = self.form_search_create_params.copy()
params.update({
'name': u'Find Blackfoot sentences',
'description': u'Returns all forms containing words',
'search': query
})
params = json.dumps(params)
response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)
rules_form_search_id = json.loads(response.body)['id']
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of Blackfoot sentences',
'form_search': rules_form_search_id
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
rules_corpus_id = json.loads(response.body)['id']
# Now we reduce the number of category-based word-formation rules by removing all such
# rules implicit in the rules corpus that have fewer than two exemplar tokens.
# Get the category sequence types of all of the words in the rules corpus ordered by their counts, minus
# those with fewer than ``minimum_token_count`` counts.
minimum_token_count = 2
params = {'minimum_token_count': minimum_token_count}
response = self.app.get(url(controller='corpora', action='get_word_category_sequences', id=rules_corpus_id),
params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
word_category_sequences = u' '.join([word_category_sequence for word_category_sequence, ids in resp])
#word_category_sequences = u'agra-vai vai-agrb'
# Now create a morphology using the lexicon and rules defined by word_category_sequences
rich_upper = False
name = u'Morphology of Blackfoot'
params = self.morphology_create_params.copy()
params.update({
'name': name,
'lexicon_corpus': lexicon_corpus_id,
'rules': word_category_sequences,
'script_type': u'lexc',
'extract_morphemes_from_rules_corpus': False,
'rich_upper': rich_upper
})
params = json.dumps(params)
response = self.app.post(url('morphologies'), params, self.json_headers, self.extra_environ_admin_appset)
resp = json.loads(response.body)
morphology_id = resp['id']
assert resp['name'] == name
assert resp['script_type'] == u'lexc'
# Generate the morphology's script without compiling it.
response = self.app.put(url(controller='morphologies', action='generate',
id=morphology_id), headers=self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
generate_attempt = resp['generate_attempt']
# Poll ``GET /morphologies/morphology_id`` until ``generate_attempt`` has changed.
seconds_elapsed = 0
wait = 2
while True:
response = self.app.get(url('morphology', id=morphology_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
if generate_attempt != resp['generate_attempt']:
log.debug('Generate attempt for morphology %d has terminated.' % morphology_id)
break
else:
log.debug('Waiting for morphology %d\'s script to generate: %s' % (
morphology_id, self.human_readable_seconds(seconds_elapsed)))
sleep(wait)
seconds_elapsed = seconds_elapsed + wait
################################################################################
# MORPHEME LANGUAGE MODEL
################################################################################
# Create a morpheme language model
name = u'Blackfoot morpheme language model'
params = self.morpheme_language_model_create_params.copy()
params.update({
'name': name,
'corpus': rules_corpus_id,
'toolkit': 'mitlm'
})
params = json.dumps(params)
response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morpheme_language_model_id = resp['id']
assert resp['name'] == name
assert resp['toolkit'] == u'mitlm'
assert resp['order'] == 3
assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM
# Generate the files of the language model
response = self.app.put(url(controller='morphemelanguagemodels', action='generate', id=morpheme_language_model_id),
{}, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
lm_generate_attempt = resp['generate_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphemelanguagemodel', id=morpheme_language_model_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)
assert resp['generate_message'] == u'Language model successfully generated.'
################################################################################
# MORPHOLOGICAL PARSER
################################################################################
# Create a morphological parser for Blackfoot
params = self.morphological_parser_create_params.copy()
params.update({
'name': u'Morphological parser for Blackfoot',
'phonology': phonology_id,
'morphology': morphology_id,
'language_model': morpheme_language_model_id
})
params = json.dumps(params)
response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_id = resp['id']
# Compile the morphological parser's morphophonology script if necessary, cf. precompiled_morphophonology and pregenerated_morphophonology.
morphological_parser_directory = os.path.join(self.morphological_parsers_path, 'morphological_parser_%d' % morphological_parser_id)
morphophonology_binary_filename = 'morphophonology.foma'
morphophonology_script_filename = 'morphological_parser.script'
morphophonology_binary_path = os.path.join(morphological_parser_directory, morphophonology_binary_filename )
morphophonology_script_path = os.path.join(morphological_parser_directory, morphophonology_script_filename )
try:
precompiled_morphophonology_path = os.path.join(self.test_morphophonologies_path, precompiled_morphophonology)
pregenerated_morphophonology_path = os.path.join(self.test_morphophonologies_path, pregenerated_morphophonology)
except Exception:
precompiled_morphophonology_path = None
pregenerated_morphophonology_path = None
if (precompiled_morphophonology_path and pregenerated_morphophonology_path and
os.path.exists(precompiled_morphophonology_path) and os.path.exists(pregenerated_morphophonology_path)):
# Use the precompiled morphophonology script if it's available,
copyfileobj(open(precompiled_morphophonology_path, 'rb'), open(morphophonology_binary_path, 'wb'))
copyfileobj(open(pregenerated_morphophonology_path, 'rb'), open(morphophonology_script_path, 'wb'))
else:
# Generate the parser's morphophonology FST, compile it and generate the morphemic language model
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Generate the parser's morphophonology FST, compile it and generate the morphemic language model
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
# Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.
requester = lambda: self.app.get(url('morphologicalparser', id=morphological_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,
wait=10, vocal=True, task_descr='compile morphological parser %s' % morphological_parser_id)
assert resp['compile_message'] == \
u'Compilation process terminated successfully and new binary file was written.'
# Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.
requester = lambda: self.app.get(url('morphologicalparser', id=morphological_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,
wait=10, vocal=True, task_descr='compile morphological parser %s' % morphological_parser_id)
assert resp['compile_message'] == \
u'Compilation process terminated successfully and new binary file was written.'
# Some reusable transcriptions and their parses
transcription1 = u'nitsspiyi'
transcription1_correct_parse = u'%s-%s' % (
h.rare_delimiter.join([u'nit', u'1', u'agra']),
h.rare_delimiter.join([u'ihpiyi', u'dance', u'vai']))
transcription1_impoverished_parse = u'nit-ihpiyi'
transcription2 = u'aaniit'
transcription2_correct_parse = u'%s-%s' % (
h.rare_delimiter.join([u'waanii', u'say', u'vai']),
h.rare_delimiter.join([u't', u'IMP', u'agrb']))
transcription2_impoverished_parse = u'waanii-t'
# Test applyup on the mophological parser's morphophonology FST
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='applyup',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
if rich_upper:
assert transcription1_correct_parse in resp[transcription1]
assert transcription2_correct_parse in resp[transcription2]
else:
assert transcription1_impoverished_parse in resp[transcription1]
assert transcription2_impoverished_parse in resp[transcription2]
# Test how well the morphological parser parses some test words.
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_correct_parse
# aaniit will have waaniit 'scatter' as its most likely parse and the correct parse waanii-t 'say-IMP'
# as its second most likely...
assert resp[transcription2] != transcription2_correct_parse
"""
################################################################################
# LOUIE MORPHOLOGY
################################################################################
# Create a form search that returns forms containing analyzed words elicited by Louie.
conjuncts = [['or', [['Form', 'syntactic_category', 'name', '=', u'sent'],
['Form', 'morpheme_break', 'like', '% %'],
['Form', 'morpheme_break', 'like', '%-%']]],
['Form', 'syntactic_category_string', '!=', None],
['Form', 'grammaticality', '=', ''],
['Form', 'elicitor', 'last_name', '=', 'Louie']]
query = {'filter': ['and', conjuncts]}
params = self.form_search_create_params.copy()
params.update({
'name': u'Forms containing analyzed words elicited by Louie',
'search': query
})
params = json.dumps(params)
response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)
louie_form_search_id = json.loads(response.body)['id']
params = self.corpus_create_params.copy()
params.update({
'name': u'Corpus of forms containing analyzed words elicited by Louie',
'form_search': louie_form_search_id
})
params = json.dumps(params)
response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)
louie_corpus_id = json.loads(response.body)['id']
# Now create a morphology using the Louie corpus as both the lexicon and rules corpora.
rich_upper = False
name = u'Morphology of Blackfoot based on words elicited by Louie'
params = self.morphology_create_params.copy()
params.update({
'name': name,
'lexicon_corpus': louie_corpus_id,
'rules_corpus': louie_corpus_id,
'script_type': u'regex',
'extract_morphemes_from_rules_corpus': True,
'rich_upper': rich_upper
})
params = json.dumps(params)
response = self.app.post(url('morphologies'), params, self.json_headers, self.extra_environ_admin_appset)
resp = json.loads(response.body)
louie_morphology_id = resp['id']
assert resp['name'] == name
assert resp['script_type'] == u'regex'
# Generate the morphology's script without compiling it.
response = self.app.put(url(controller='morphologies', action='generate',
id=louie_morphology_id), headers=self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
generate_attempt = resp['generate_attempt']
# Poll ``GET /morphologies/morphology_id`` until ``generate_attempt`` has changed.
seconds_elapsed = 0
wait = 2
while True:
response = self.app.get(url('morphology', id=louie_morphology_id),
headers=self.json_headers, extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
if generate_attempt != resp['generate_attempt']:
log.debug('Generate attempt for morphology %d has terminated.' % louie_morphology_id)
break
else:
log.debug('Waiting for morphology %d\'s script to generate: %s' % (
louie_morphology_id, self.human_readable_seconds(seconds_elapsed)))
sleep(wait)
seconds_elapsed = seconds_elapsed + wait
################################################################################
# MORPHEME LANGUAGE MODEL -- LOUIE
################################################################################
# Create a morpheme language model based on the data elicited by Louie
name = u'Blackfoot morpheme language model based on data elicited by Louie'
params = self.morpheme_language_model_create_params.copy()
params.update({
'name': name,
'corpus': louie_corpus_id,
'toolkit': 'mitlm'
})
params = json.dumps(params)
response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
louie_language_model_id = resp['id']
assert resp['name'] == name
assert resp['toolkit'] == u'mitlm'
assert resp['order'] == 3
assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM
# Generate the files of the language model
response = self.app.put(url(controller='morphemelanguagemodels', action='generate',
id=louie_language_model_id),
{}, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
lm_generate_attempt = resp['generate_attempt']
# Poll GET /morphemelanguagemodels/id until generate_attempt changes.
requester = lambda: self.app.get(url('morphemelanguagemodel', id=louie_language_model_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)
assert resp['generate_message'] == u'Language model successfully generated.'
################################################################################
# MORPHOLOGICAL PARSER -- LOUIE
################################################################################
# Create a morphological parser for Blackfoot based on data elicited by Louie
params = self.morphological_parser_create_params.copy()
params.update({
'name': u'Morphological parser for Blackfoot based on data elicited by Louie',
'phonology': phonology_id,
'morphology': louie_morphology_id,
'language_model': louie_language_model_id
})
params = json.dumps(params)
response = self.app.post(url('morphologicalparsers'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
louie_parser_id = resp['id']
# Compile the morphological parser's morphophonology script if necessary, cf.
# precompiled_morphophonology and pregenerated_morphophonology.
morphological_parser_directory = os.path.join(self.morphological_parsers_path,
'morphological_parser_%d' % louie_parser_id)
morphophonology_binary_filename = 'morphophonology.foma'
morphophonology_script_filename = 'morphological_parser.script'
morphophonology_binary_path = os.path.join(morphological_parser_directory,
morphophonology_binary_filename )
morphophonology_script_path = os.path.join(morphological_parser_directory,
morphophonology_script_filename )
try:
precompiled_morphophonology_path = os.path.join(self.test_morphophonologies_path,
precompiled_morphophonology)
pregenerated_morphophonology_path = os.path.join(self.test_morphophonologies_path,
pregenerated_morphophonology)
except Exception:
precompiled_morphophonology_path = None
pregenerated_morphophonology_path = None
if (precompiled_morphophonology_path and pregenerated_morphophonology_path and
os.path.exists(precompiled_morphophonology_path) and os.path.exists(pregenerated_morphophonology_path)):
# Use the precompiled morphophonology script if it's available,
copyfileobj(open(precompiled_morphophonology_path, 'rb'), open(morphophonology_binary_path, 'wb'))
copyfileobj(open(pregenerated_morphophonology_path, 'rb'), open(morphophonology_script_path, 'wb'))
else:
# Generate the parser's morphophonology FST, compile it and generate the morphemic language model
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=louie_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
morphological_parser_compile_attempt = resp['compile_attempt']
# Generate the parser's morphophonology FST, compile it and generate the morphemic language model
response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',
id=louie_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)
# Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.
requester = lambda: self.app.get(url('morphologicalparser', id=louie_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,
wait=10, vocal=True, task_descr='compile morphological parser %s' % louie_parser_id)
assert resp['compile_message'] == \
u'Compilation process terminated successfully and new binary file was written.'
# Poll ``GET /morphologicalparsers/mophological_parser_id`` until ``compile_attempt`` has changed.
requester = lambda: self.app.get(url('morphologicalparser', id=louie_parser_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt, log,
wait=10, vocal=True, task_descr='compile morphological parser %s' % louie_parser_id)
assert resp['compile_message'] == \
u'Compilation process terminated successfully and new binary file was written.'
# Some reusable transcriptions and their parses
transcription1 = u'nitsspiyi'
transcription1_correct_parse = u'%s-%s' % (
h.rare_delimiter.join([u'nit', u'1', u'agra']),
h.rare_delimiter.join([u'ihpiyi', u'dance', u'vai']))
transcription1_impoverished_parse = u'nit-ihpiyi'
transcription2 = u'aaniit'
transcription2_correct_parse = u'%s-%s' % (
h.rare_delimiter.join([u'waanii', u'say', u'vai']),
h.rare_delimiter.join([u't', u'IMP', u'agrb']))
transcription2_impoverished_parse = u'waanii-t'
# Test applyup on the mophological parser's morphophonology FST
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='applyup',
id=louie_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
if rich_upper:
assert transcription1_correct_parse in resp[transcription1]
assert transcription2_correct_parse not in resp[transcription2]
else:
assert transcription1_impoverished_parse in resp[transcription1]
assert transcription2_impoverished_parse not in resp[transcription2]
# Test how well the morphological parser parses some test words.
params = json.dumps({'transcriptions': [transcription1, transcription2]})
response = self.app.put(url(controller='morphologicalparsers', action='parse',
id=louie_parser_id), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp[transcription1] == transcription1_correct_parse
# aaniit will have waaniit 'scatter' as its most likely parse and the correct parse waanii-t 'say-IMP'
# as its second most likely...
assert resp[transcription2] != transcription2_correct_parse
# Finally, load the original database back in so that subsequent tests can work.
with open(tmp_script_path, 'w') as tmpscript:
tmpscript.write('#!/bin/sh\nmysql -u %s -p%s %s < %s' % (username, password, db_name, backup_dump_file_path))
with open(os.devnull, "w") as fnull:
call([tmp_script_path], stdout=fnull, stderr=fnull)
os.remove(tmp_script_path)
os.remove(backup_dump_file_path)
# Implement category-based class LMs and test them against morpheme-based ones.
# Build multiple Bf morphological parsers and test them out, find the best one, write a paper on it!
@nottest
def test_z_cleanup(self):
"""Clean up after the tests."""
TestController.tearDown(
self,
clear_all_tables=True,
del_global_app_set=True,
dirs_to_destroy=['user', 'morphology', 'corpus', 'morphological_parser'])
| {
"content_hash": "84f5a9aebc3e942674dcc59fef277a1a",
"timestamp": "",
"source": "github",
"line_count": 1736,
"max_line_length": 193,
"avg_line_length": 55.84101382488479,
"alnum_prop": 0.6129770992366412,
"repo_name": "jrwdunham/old",
"id": "2f5bcf82c14ce00b6094cc73912896a0d9bcbc12",
"size": "97524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onlinelinguisticdatabase/tests/functional/test_morphologicalparsers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "66"
},
{
"name": "Python",
"bytes": "2840936"
},
{
"name": "Shell",
"bytes": "778"
}
],
"symlink_target": ""
} |
from sqlalchemy.dialects import sqlite
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.tests.functional import test_servers
from nova.tests.unit import fake_network
class TestDatabaseArchive(test_servers.ServersTestBase):
"""Tests DB API for archiving (soft) deleted records"""
def setUp(self):
super(TestDatabaseArchive, self).setUp()
# TODO(mriedem): pull this out so we can re-use it in
# test_archive_deleted_rows_fk_constraint
# SQLite doesn't enforce foreign key constraints without a pragma.
engine = sqlalchemy_api.get_engine()
dialect = engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
engine.connect().execute("PRAGMA foreign_keys = ON")
def _create_server(self):
"""Creates a minimal test server via the compute API
Ensures the server is created and can be retrieved from the compute API
and waits for it to be ACTIVE.
:returns: created server (dict)
"""
# TODO(mriedem): We should pull this up into the parent class so we
# don't have so much copy/paste in these functional tests.
fake_network.set_stub_network_methods(self)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
return found_server
def test_archive_deleted_rows(self):
# Boots a server, deletes it, and then tries to archive it.
server = self._create_server()
server_id = server['id']
# Assert that there are instance_actions. instance_actions are
# interesting since we don't soft delete them but they have a foreign
# key back to the instances table.
actions = self.api.get_instance_actions(server_id)
self.assertTrue(len(actions),
'No instance actions for server: %s' % server_id)
self._delete_server(server_id)
# Verify we have the soft deleted instance in the database.
admin_context = context.get_admin_context(read_deleted='yes')
# This will raise InstanceNotFound if it's not found.
instance = db.instance_get_by_uuid(admin_context, server_id)
# Make sure it's soft deleted.
self.assertNotEqual(0, instance.deleted)
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results = db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
results['instance_system_metadata'])
# Verify that instances rows are dropped
self.assertIn('instances', results)
# Verify that instance_actions and actions_event are dropped
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
| {
"content_hash": "85eaa8973b3e6d88d32f623d35f1385f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 45.94444444444444,
"alnum_prop": 0.6435308343409916,
"repo_name": "bigswitch/nova",
"id": "8d482c42b7c25070577529e7c82ff1c812cf1684",
"size": "4737",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/functional/db/test_archive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17220528"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
"""Unittest for ipaddress module."""
from __future__ import unicode_literals
import contextlib
import operator
import pickle
import re
import sys
import unittest
import weakref
import ipaddress
# Compatibility function
import binascii
try:
_compat_bytes_fromhex = bytes.fromhex
except AttributeError:
def _compat_bytes_fromhex(s):
return binascii.unhexlify(s)
_compat_str = ipaddress._compat_str
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Commented out - this is not easily possible in 2.x
# # Ensure we produce clean tracebacks on failure
# if exc.exception.__context__ is not None:
# self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
# Commented out: bytes semantics are different in 2.x
# self.assertRaises(TypeError, bytes, self.factory(1))
def pickle_test(self, addr):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
x = self.factory(addr)
y = pickle.loads(pickle.dumps(x, proto))
self.assertEqual(y, x)
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("000.000.000.000", "0.0.0.0")
self.assertInstancesEqual("192.168.000.001", "192.168.0.1")
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(
_compat_bytes_fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(
_compat_bytes_fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (2 ** 32))):
self.factory(2 ** 32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = b'\0' * length
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = b'\0'*12 + _compat_bytes_fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = b'\0'*12 + _compat_bytes_fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = _compat_bytes_fromhex("c0a80001") + b'\0'*12
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2 ** 128)):
self.factory(2 ** 128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = b'\0' * length
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octal_decimal_ambiguity(self):
def assertBadOctet(addr, octet):
msg = "Ambiguous (octal/decimal) value in %r not permitted in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("016.016.016.016", "016")
assertBadOctet("001.000.008.016", "008")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
def test_pickle(self):
self.pickle_test('192.0.2.1')
def test_weakref(self):
weakref.ref(self.factory('192.0.2.1'))
def test_bytes_message(self):
with self.assertAddressError(r'bytes'):
ipaddress.IPv4Address(b'192.0.2.1')
with self.assertAddressError(r'bytes'):
ipaddress.ip_address(b'192.0.2.1')
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in u?'1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in u?'127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in u?'1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in u?'1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in u?'net' "
"in u?'1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
def test_pickle(self):
self.pickle_test('2001:db8::')
def test_weakref(self):
weakref.ref(self.factory('2001:db8::'))
def test_bytes_message(self):
with self.assertAddressError(r'bytes'):
ipaddress.IPv6Address(b'::123')
with self.assertAddressError(r'bytes'):
ipaddress.ip_address(b'::123')
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
self.assertEqual(_compat_str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
# Generate and re-parse the CIDR format (trivial).
net_str = '0.0.0.0/%d' % i
net = self.factory(net_str)
self.assertEqual(_compat_str(net), net_str)
# Generate and re-parse the expanded netmask.
self.assertEqual(
_compat_str(self.factory('0.0.0.0/%s' % net.netmask)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(
_compat_str(self.factory('0.0.0.0/0%d' % i)),
net_str)
# Generate and re-parse the expanded hostmask. The ambiguous
# cases (/0 and /32) are treated as netmasks.
if i in (32, 0):
net_str = '0.0.0.0/%d' % (32 - i)
self.assertEqual(
_compat_str(self.factory('0.0.0.0/%s' % net.hostmask)),
net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "-1")
assertBadNetmask("1.2.3.4", "+1")
assertBadNetmask("1.2.3.4", " 1 ")
assertBadNetmask("1.2.3.4", "0x1")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.2.3.4", "1.a.2.3")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "255.254.128.0")
assertBadNetmask("1.1.1.1", "0.1.127.255")
assertBadNetmask("1.1.1.1", "pudding")
assertBadNetmask("1.1.1.1", "::")
def test_pickle(self):
self.pickle_test('192.0.2.0/27')
self.pickle_test('192.0.2.0/31') # IPV4LENGTH - 1
self.pickle_test('192.0.2.0') # IPV4LENGTH
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
def test_subnet_of(self):
# containee left of container
self.assertFalse(
self.factory('10.0.0.0/30').subnet_of(
self.factory('10.0.1.0/24')))
# containee inside container
self.assertTrue(
self.factory('10.0.0.0/30').subnet_of(
self.factory('10.0.0.0/24')))
# containee right of container
self.assertFalse(
self.factory('10.0.0.0/30').subnet_of(
self.factory('10.0.1.0/24')))
# containee larger than container
self.assertFalse(
self.factory('10.0.1.0/24').subnet_of(
self.factory('10.0.0.0/30')))
def test_supernet_of(self):
# containee left of container
self.assertFalse(
self.factory('10.0.0.0/30').supernet_of(
self.factory('10.0.1.0/24')))
# containee inside container
self.assertFalse(
self.factory('10.0.0.0/30').supernet_of(
self.factory('10.0.0.0/24')))
# containee right of container
self.assertFalse(
self.factory('10.0.0.0/30').supernet_of(
self.factory('10.0.1.0/24')))
# containee larger than container
self.assertTrue(
self.factory('10.0.0.0/24').supernet_of(
self.factory('10.0.0.0/30')))
def test_subnet_of_mixed_types(self):
with self.assertRaises(TypeError):
ipaddress.IPv4Network('10.0.0.0/30').supernet_of(
ipaddress.IPv6Network('::1/128'))
with self.assertRaises(TypeError):
ipaddress.IPv6Network('::1/128').supernet_of(
ipaddress.IPv4Network('10.0.0.0/30'))
with self.assertRaises(TypeError):
ipaddress.IPv4Network('10.0.0.0/30').subnet_of(
ipaddress.IPv6Network('::1/128'))
with self.assertRaises(TypeError):
ipaddress.IPv6Network('::1/128').subnet_of(
ipaddress.IPv4Network('10.0.0.0/30'))
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
self.assertEqual(
_compat_str(self.factory('2001:db8::/32')),
'2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
net_str = '::/%d' % i
self.assertEqual(_compat_str(self.factory(net_str)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(_compat_str(self.factory('::/0%d' % i)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "-1")
assertBadNetmask("::1", "+1")
assertBadNetmask("::1", " 1 ")
assertBadNetmask("::1", "0x1")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "1.2.3.4")
assertBadNetmask("::1", "pudding")
assertBadNetmask("::", "::")
def test_pickle(self):
self.pickle_test('2001:db8::1000/124')
self.pickle_test('2001:db8::1000/127') # IPV6LENGTH - 1
self.pickle_test('2001:db8::1000') # IPV6LENGTH
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
def test_subnet_of(self):
# containee left of container
self.assertFalse(
self.factory('2000:999::/56').subnet_of(
self.factory('2000:aaa::/48')))
# containee inside container
self.assertTrue(
self.factory('2000:aaa::/56').subnet_of(
self.factory('2000:aaa::/48')))
# containee right of container
self.assertFalse(
self.factory('2000:bbb::/56').subnet_of(
self.factory('2000:aaa::/48')))
# containee larger than container
self.assertFalse(
self.factory('2000:aaa::/48').subnet_of(
self.factory('2000:aaa::/56')))
def test_supernet_of(self):
# containee left of container
self.assertFalse(
self.factory('2000:999::/56').supernet_of(
self.factory('2000:aaa::/48')))
# containee inside container
self.assertFalse(
self.factory('2000:aaa::/56').supernet_of(
self.factory('2000:aaa::/48')))
# containee right of container
self.assertFalse(
self.factory('2000:bbb::/56').supernet_of(
self.factory('2000:aaa::/48')))
# containee larger than container
self.assertTrue(
self.factory('2000:aaa::/48').supernet_of(
self.factory('2000:aaa::/56')))
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
class LargestObject(ipaddress._TotalOrderingMixin):
def __eq__(self, other):
return isinstance(other, LargestObject)
def __lt__(self, other):
return False
class SmallestObject(ipaddress._TotalOrderingMixin):
def __eq__(self, other):
return isinstance(other, SmallestObject)
def __lt__(self, other):
return True
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
v4addr2 = ipaddress.IPv4Address(2)
v4net2 = ipaddress.IPv4Network(2)
v4intf2 = ipaddress.IPv4Interface(2)
v6addr2 = ipaddress.IPv6Address(2)
v6net2 = ipaddress.IPv6Network(2)
v6intf2 = ipaddress.IPv6Interface(2)
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_same_type_equality(self):
for obj in self.objects:
self.assertEqual(obj, obj)
self.assertTrue(obj <= obj)
self.assertTrue(obj >= obj)
def test_same_type_ordering(self):
for lhs, rhs in (
(self.v4addr, self.v4addr2),
(self.v4net, self.v4net2),
(self.v4intf, self.v4intf2),
(self.v6addr, self.v6addr2),
(self.v6net, self.v6net2),
(self.v6intf, self.v6intf2),
):
self.assertNotEqual(lhs, rhs)
self.assertTrue(lhs < rhs)
self.assertTrue(lhs <= rhs)
self.assertTrue(rhs > lhs)
self.assertTrue(rhs >= lhs)
self.assertFalse(lhs > rhs)
self.assertFalse(rhs < lhs)
self.assertFalse(lhs >= rhs)
self.assertFalse(rhs <= lhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_foreign_type_ordering(self):
# In Python 2.x, the semantics are much less convenient, so skip all of
# these tests there.
if sys.version_info < (3, 0):
return
other = object()
smallest = SmallestObject()
largest = LargestObject()
for obj in self.objects:
with self.assertRaises(TypeError):
obj < other
with self.assertRaises(TypeError):
obj > other
with self.assertRaises(TypeError):
obj <= other
with self.assertRaises(TypeError):
obj >= other
self.assertTrue(obj < largest)
self.assertFalse(obj > largest)
self.assertTrue(obj <= largest)
self.assertFalse(obj >= largest)
self.assertFalse(obj < smallest)
self.assertTrue(obj > smallest)
self.assertFalse(obj <= smallest)
self.assertTrue(obj >= smallest)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_network('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertTrue(re.match("IPv4Interface\(u?'1.2.3.4/32'\)",
repr(ipaddress.IPv4Interface('1.2.3.4'))))
self.assertTrue(re.match("IPv6Interface\(u?'::1/128'\)",
repr(ipaddress.IPv6Interface('::1'))))
# issue #16531: constructing IPv4Network from an (address, mask) tuple
def testIPv4Tuple(self):
# /32
ip = ipaddress.IPv4Address('192.0.2.1')
net = ipaddress.IPv4Network('192.0.2.1/32')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 32)), net)
self.assertEqual(ipaddress.IPv4Network((ip, 32)), net)
self.assertEqual(ipaddress.IPv4Network((3221225985, 32)), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
'255.255.255.255')), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.255')), net)
self.assertEqual(ipaddress.IPv4Network((3221225985,
'255.255.255.255')), net)
# strict=True and host bits set
self.assertRaises(ValueError, ipaddress.IPv4Network, ('192.0.2.1', 24))
self.assertRaises(ValueError, ipaddress.IPv4Network, (ip, 24))
self.assertRaises(ValueError, ipaddress.IPv4Network, (3221225985, 24))
self.assertRaises(
ValueError, ipaddress.IPv4Network, ('192.0.2.1', '255.255.255.0'))
self.assertRaises(
ValueError, ipaddress.IPv4Network, (ip, '255.255.255.0'))
self.assertRaises(
ValueError, ipaddress.IPv4Network, (3221225985, '255.255.255.0'))
# strict=False and host bits set
net = ipaddress.IPv4Network('192.0.2.0/24')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((ip, 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((3221225985, 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
'255.255.255.0'),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.0'),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((3221225985,
'255.255.255.0'),
strict=False), net)
# /24
ip = ipaddress.IPv4Address('192.0.2.0')
net = ipaddress.IPv4Network('192.0.2.0/24')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.0',
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network((3221225984,
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.0', 24)), net)
self.assertEqual(ipaddress.IPv4Network((ip, 24)), net)
self.assertEqual(ipaddress.IPv4Network((3221225984, 24)), net)
self.assertEqual(ipaddress.IPv4Interface(('192.0.2.1', 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
self.assertEqual(ipaddress.IPv4Interface((3221225985, 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
# issue #16531: constructing IPv6Network from an (address, mask) tuple
def testIPv6Tuple(self):
# /128
ip = ipaddress.IPv6Address('2001:db8::')
net = ipaddress.IPv6Network('2001:db8::/128')
self.assertEqual(
ipaddress.IPv6Network(('2001:db8::', '128')),
net)
self.assertEqual(
ipaddress.IPv6Network(
(42540766411282592856903984951653826560, 128)),
net)
self.assertEqual(ipaddress.IPv6Network((ip, '128')),
net)
ip = ipaddress.IPv6Address('2001:db8::')
net = ipaddress.IPv6Network('2001:db8::/96')
self.assertEqual(
ipaddress.IPv6Network(('2001:db8::', '96')),
net)
self.assertEqual(
ipaddress.IPv6Network(
(42540766411282592856903984951653826560, 96)),
net)
self.assertEqual(
ipaddress.IPv6Network((ip, '96')),
net)
# strict=True and host bits set
ip = ipaddress.IPv6Address('2001:db8::1')
self.assertRaises(
ValueError, ipaddress.IPv6Network, ('2001:db8::1', 96))
self.assertRaises(
ValueError, ipaddress.IPv6Network,
(42540766411282592856903984951653826561, 96))
self.assertRaises(ValueError, ipaddress.IPv6Network, (ip, 96))
# strict=False and host bits set
net = ipaddress.IPv6Network('2001:db8::/96')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::1', 96),
strict=False),
net)
self.assertEqual(
ipaddress.IPv6Network(
(42540766411282592856903984951653826561, 96), strict=False),
net)
self.assertEqual(
ipaddress.IPv6Network((ip, 96), strict=False),
net)
# /96
self.assertEqual(ipaddress.IPv6Interface(('2001:db8::1', '96')),
ipaddress.IPv6Interface('2001:db8::1/96'))
self.assertEqual(
ipaddress.IPv6Interface(
(42540766411282592856903984951653826561, '96')),
ipaddress.IPv6Interface('2001:db8::1/96'))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2 ** 16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2 ** 16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
ip1 = ipaddress.IPv4Address('10.10.10.10')
ip2 = ipaddress.IPv4Address('10.10.10.11')
ip3 = ipaddress.IPv4Address('10.10.10.12')
self.assertEqual(list(ipaddress._find_address_range([ip1])),
[(ip1, ip1)])
self.assertEqual(list(ipaddress._find_address_range([ip1, ip3])),
[(ip1, ip1), (ip3, ip3)])
self.assertEqual(list(ipaddress._find_address_range([ip1, ip2, ip3])),
[(ip1, ip3)])
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertTrue(
re.match("IPv4Network\(u?'1.2.3.0/24'\)", repr(self.ipv4_network)))
def testMissingAddressVersion(self):
class Broken(ipaddress._BaseAddress):
pass
broken = Broken()
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingNetworkVersion(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingAddressClass(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*address"):
broken._address_class
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(
_compat_str(self.ipv4_network.network_address),
'1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(_compat_str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(_compat_str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(
self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(
address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(_compat_str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(_compat_str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(
_compat_str(self.ipv4_network.netmask),
'255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertEqual(ipv4_zero_netmask._prefix_from_prefix_string('0'), 0)
# Removed all _is_valid_netmask tests - the method was unused upstream
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertEqual(ipv6_zero_netmask._prefix_from_prefix_string('0'), 0)
def testIPv4NetAndHostmasks(self):
net = self.ipv4_network
# Removed all _is_valid_netmask tests - the method was unused upstream
self.assertFalse(net._is_hostmask('invalid'))
self.assertTrue(net._is_hostmask('128.255.255.255'))
self.assertFalse(net._is_hostmask('255.255.255.255'))
self.assertFalse(net._is_hostmask('1.2.3.4'))
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(net.prefixlen, 24)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(
_compat_str(self.ipv4_network.broadcast_address),
'1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(_compat_str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(
_compat_str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(
_compat_str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(
ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(
_compat_str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(
_compat_str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
# special case where only 1 bit is left for address
self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')],
list(ipaddress.ip_network('2.0.0.0/31').hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(
_compat_str(list(self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(
_compat_str(list(self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [_compat_str(x) for x in ip.subnets()]
subnets2 = [_compat_str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [_compat_str(x) for x in ip.subnets()]
subnets2 = [_compat_str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testGetSubnets3(self):
subnets = [str(x) for x in self.ipv4_network.subnets(8)]
self.assertEqual(
subnets[:3],
['1.2.3.0/32', '1.2.3.1/32', '1.2.3.2/32'])
self.assertEqual(
subnets[-3:],
['1.2.3.253/32', '1.2.3.254/32', '1.2.3.255/32'])
self.assertEqual(len(subnets), 256)
ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/120')
subnets = [str(x) for x in ipv6_network.subnets(8)]
self.assertEqual(
subnets[:3],
['2001:658:22a:cafe::/128',
'2001:658:22a:cafe::1/128',
'2001:658:22a:cafe::2/128'])
self.assertEqual(
subnets[-3:],
['2001:658:22a:cafe::fd/128',
'2001:658:22a:cafe::fe/128',
'2001:658:22a:cafe::ff/128'])
self.assertEqual(len(subnets), 256)
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertTrue(ipaddress.IPv4Interface('1.2.3.128/25') in
self.ipv4_network)
self.assertFalse(ipaddress.IPv4Interface('1.2.4.1/24') in
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertTrue(addr1 in self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(_compat_str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(_compat_str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
self.assertRaises(IndexError, self.ipv6_network.__getitem__, 1 << 64)
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(
self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(
self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(
self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(
self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(
_compat_str(ipaddress.IPv4Interface('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(
_compat_str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(
_compat_str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(
list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(
list(ipaddress.collapse_addresses([ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(
list(ipaddress.collapse_addresses([ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test an IPv6 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') ==
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/16') <
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') <
ipaddress.ip_interface('1.1.1.2/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.2/16') <
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') >
ipaddress.ip_interface('1.1.1.1/16'))
self.assertTrue(ipaddress.ip_interface('1.1.1.2/24') >
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') >
ipaddress.ip_interface('1.1.1.2/16'))
self.assertTrue(ipaddress.ip_interface('::1/64') ==
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::1/64') <
ipaddress.ip_interface('::1/80'))
self.assertTrue(ipaddress.ip_interface('::1/64') <
ipaddress.ip_interface('::2/64'))
self.assertTrue(ipaddress.ip_interface('::2/48') <
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::1/80') >
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::2/64') >
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::1/64') >
ipaddress.ip_interface('::2/48'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__,
ipaddress.ip_address('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__,
ipaddress.ip_address('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff' +
b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(
True,
ipaddress.ip_interface('169.254.100.200/24').is_link_local)
self.assertEqual(
False,
ipaddress.ip_interface('169.255.100.200/24').is_link_local)
self.assertEqual(
True,
ipaddress.ip_network('127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
self.assertEqual(False,
ipaddress.ip_network('100.64.0.0/10').is_private)
self.assertEqual(
False, ipaddress.ip_network('100.64.0.0/10').is_global)
self.assertEqual(True,
ipaddress.ip_network('192.0.2.128/25').is_private)
self.assertEqual(True,
ipaddress.ip_network('192.0.3.0/24').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertTrue(ipaddress.ip_address('192.0.7.1').is_global)
self.assertFalse(ipaddress.ip_address('203.0.113.1').is_global)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2 ** 128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
self.assertEqual(True,
ipaddress.ip_network('2001::1/128').is_private)
self.assertEqual(True,
ipaddress.ip_network('200::1/128').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2 ** 128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
addr6 = ipaddress.ip_network('10.1.1.5/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
self.assertEqual(sorted(list(addr1.address_exclude(addr6))),
[ipaddress.ip_network('10.1.1.0/30'),
ipaddress.ip_network('10.1.1.4/32'),
ipaddress.ip_network('10.1.1.6/31'),
ipaddress.ip_network('10.1.1.8/29'),
ipaddress.ip_network('10.1.1.16/28'),
ipaddress.ip_network('10.1.1.32/27'),
ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(
hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4_address in dummy)
self.assertTrue(ip2 in dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2 ** 128 + 1)
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testReversePointer(self):
addr1 = ipaddress.IPv4Address('127.0.0.1')
addr2 = ipaddress.IPv6Address('2001:db8::1')
self.assertEqual('1.0.0.127.in-addr.arpa', addr1.reverse_pointer)
self.assertEqual('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.' +
'b.d.0.1.0.0.2.ip6.arpa',
addr2.reverse_pointer)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse('network_address' in self.ipv4_network._cache)
self.assertFalse('broadcast_address' in self.ipv4_network._cache)
self.assertFalse('hostmask' in self.ipv4_network._cache)
# V4 - populate and test
self.assertEqual(self.ipv4_network.network_address,
ipaddress.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue('broadcast_address' in self.ipv4_network._cache)
self.assertTrue('hostmask' in self.ipv4_network._cache)
# V6 - make sure we're empty
self.assertFalse('broadcast_address' in self.ipv6_network._cache)
self.assertFalse('hostmask' in self.ipv6_network._cache)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue('broadcast_address' in self.ipv6_network._cache)
self.assertTrue('hostmask' in self.ipv6_network._cache)
self.assertTrue(
'broadcast_address' in self.ipv6_interface.network._cache)
self.assertTrue('hostmask' in self.ipv6_interface.network._cache)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
# Monkey-patch test runner
if not hasattr(BaseTestCase, 'assertRaisesRegex'):
class _AssertRaisesRegex(object):
def __init__(self, expected_exception, expected_regex):
self.expected = expected_exception
self.expected_regex = re.compile(expected_regex)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(
exc_name, self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
raise AssertionError('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
BaseTestCase.assertRaisesRegex = _AssertRaisesRegex
IpaddrUnitTest.assertRaisesRegex = _AssertRaisesRegex
if not hasattr(BaseTestCase, 'assertIn'):
def _assertIn(self, o, iterable):
self.assertTrue(o in iterable)
def _assertNotIn(self, o, iterable):
self.assertFalse(o in iterable)
BaseTestCase.assertIn = _assertIn
BaseTestCase.assertNotIn = _assertNotIn
IpaddrUnitTest.assertIn = _assertIn
IpaddrUnitTest.assertNotIn = _assertNotIn
ComparisonTests.assertIn = _assertIn
ComparisonTests.assertNotIn = _assertNotIn
if not hasattr(BaseTestCase, 'subTest'):
class _SubTest(object):
def __init__(*a, **kw):
pass
def __enter__(*a):
pass
def __exit__(*a):
pass
BaseTestCase.subTest = _SubTest
# Test for https://github.com/phihag/ipaddress/pull/6
class Python2RangeTest(unittest.TestCase):
def test_network_hosts(self):
net = ipaddress.ip_network('::/0')
next(net.hosts()) # This should not throw OverflowError
def test_network_iter(self):
net = ipaddress.ip_network('::/0')
next(iter(net)) # This should not throw OverflowError
class CompatTest(unittest.TestCase):
def test_bit_length(self):
self.assertEqual(ipaddress._compat_bit_length(0), 0)
self.assertEqual(ipaddress._compat_bit_length(1), 1)
self.assertEqual(ipaddress._compat_bit_length(2), 2)
self.assertEqual(ipaddress._compat_bit_length(3), 2)
self.assertEqual(ipaddress._compat_bit_length(4), 3)
class SingleIssuesTest(unittest.TestCase):
# https://github.com/phihag/ipaddress/issues/14
def test_issue_14(self):
self.assertTrue(ipaddress.ip_address('127.0.0.1').is_private)
def test_issue_18(self):
net1 = ipaddress.ip_network("192.0.2.0/24")
net2 = ipaddress.ip_network("192.0.2.112/29")
self.assertFalse(net1.subnet_of(net2))
self.assertTrue(net1.supernet_of(net2))
self.assertTrue(net2.subnet_of(net1))
self.assertFalse(net2.supernet_of(net1))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "29239dd5f3883e04cd1dce7ad1464c85",
"timestamp": "",
"source": "github",
"line_count": 2219,
"max_line_length": 79,
"avg_line_length": 42.765660207300584,
"alnum_prop": 0.5839699885138624,
"repo_name": "cloudera/hue",
"id": "145dc1a922b52a3587024cf9c612317b7811bf9d",
"size": "94977",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/ipaddress-1.0.19/test_ipaddress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
"""Tests for Adam."""
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.compiler.xla.experimental import xla_sharding
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.RefVariable(var0_np)
var1 = variables.RefVariable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = indexed_slices.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparse(self):
with ops.Graph().as_default():
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
with ops.Graph().as_default():
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
with ops.Graph().as_default():
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = indexed_slices.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = indexed_slices.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_resource=False, use_callable_params=False):
if context.executing_eagerly() and not use_resource:
self.skipTest(
"Skipping test with use_resource=False and executing eagerly.")
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.RefVariable(var0_np)
var1 = variables.RefVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.AdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertTrue(beta2_power is not None)
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
# Ensure that non-slot variables are the same type as the requested
# variables.
self.assertEqual(
use_resource,
resource_variable_ops.is_resource_variable(beta1_power))
self.assertEqual(
use_resource,
resource_variable_ops.is_resource_variable(beta2_power))
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_tfrt("b/168527439: invalid runtime fallback "
"resource variable reference on GPU.")
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
@test_util.disable_tfrt("b/153089059: cannot create half tensor on GPU.")
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testTensorLearningRate(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t,
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t,
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.disable_tfrt("b/168527439: invalid runtime fallback "
"resource variable reference on GPU.")
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
@test_util.disable_tfrt("b/168527439: invalid runtime fallback "
"resource variable reference on GPU.")
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.AdamOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(6, len({id(v) for v in opt.variables()}))
@test_util.deprecated_graph_mode_only
def testXlaSharding(self):
dtype = dtypes.float32
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np, name="var0")
var1 = resource_variable_ops.ResourceVariable(var1_np, name="var1")
var0, var1 = [
xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
for v in (var0, var1)
]
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
opt = adam.AdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
self.evaluate(update)
# The beta accumulators are not sharded.
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertIsNone(xla_sharding.get_tensor_sharding(beta1_power))
self.assertIsNone(xla_sharding.get_tensor_sharding(beta2_power))
# Variables and slots are sharded.
for v in (var0, var1):
self.assertIsNotNone(xla_sharding.get_tensor_sharding(v))
for slot_name in ("m", "v"):
slot = opt.get_slot(v, slot_name)
self.assertIsNotNone(xla_sharding.get_tensor_sharding(slot))
if __name__ == "__main__":
test.main()
| {
"content_hash": "e9d89a4e8deb77c0c72e562f7c8026cd",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 80,
"avg_line_length": 43.34320987654321,
"alnum_prop": 0.607838669249174,
"repo_name": "tensorflow/tensorflow",
"id": "0f570888d499b80821ba79e5c3f9b8801df54c56",
"size": "18243",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/adam_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1400913"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "126099822"
},
{
"name": "CMake",
"bytes": "182430"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11447433"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300213"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42782002"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621854"
},
{
"name": "Smarty",
"bytes": "89538"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7738020"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from django.template import Context
from django.template.loader import get_template
from django import template
register = template.Library()
@register.filter
def as_bootstrap(form):
template = get_template('bootstrap/form.html')
ctx = Context({'form': form})
return template.render(ctx)
@register.filter
def as_bootstrap_field(field):
template = get_template('bootstrap/field.html')
ctx = Context({'field': field})
return template.render(ctx)
| {
"content_hash": "812284dddec85dd93ae4c6c27221255d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 51,
"avg_line_length": 23.7,
"alnum_prop": 0.729957805907173,
"repo_name": "furious-luke/django-bootstrap-theme",
"id": "a1a238473138a3611a646f9860bf79c7d469f8d0",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrap_theme/templatetags/bootstrap_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "55574"
},
{
"name": "Python",
"bytes": "1877"
}
],
"symlink_target": ""
} |
"""Various tools fpr surface detection"""
import numpy as np
import pandas as pd
import scipy.signal
from . import utils
import copy
def detector(rdg, y0=[], winsize=100, method='grima2012', axis=0, **kwargs):
"""Surface detection with the choosen method
Input
-----
rdg: 2d-array
radargram.
y0: array
Initial estimation for the location of the surface.
Optional.
winsize: float
Size of the window around y0 to look for the surface.
Activated only if y0 > 0.
method: string
method to use for surface detection.
axis: 0 or 1
Long-time axis.
Output
------
y: float
index of the location of the detected echo.
"""
if axis == 1:
rdg = np.rot90(rdg)
xsize = rdg.shape[0]
ysize = rdg.shape[1]
y = np.zeros(xsize)
# Detection
for xi in np.arange(xsize):
signal = rdg[xi,:]
#index vector
if len(y0) > 0:
idx = np.arange(winsize)+y0[xi]-winsize/2.
else:
idx = np.arange(ysize)
# Method selection
if method == 'grima2012':
y[xi], c = grima2012(signal, idx=idx, **kwargs)
if method == 'mouginot2010':
y[xi], c = mouginot2010(signal, idx=idx, **kwargs)
return y
def mouginot2010(signal, idx=[], period=3, window=30, **kwargs):
"""Surface detection using [Mouginot et al. 2010]
Parameters
----------
signal: array
signal vector
idx: array
the indices of the array where to search for the echo
period: float
window shift to compute the noise (=1 in the original paper)
window: float
size of the window where to compute the noise
Output
------
y: float
index of the location of the detected echo
c: array
criteria computed with idx
"""
# array of index where to search for the surface
idx = np.array(idx)
if idx.size == 0 :
idx = np.arange(len(signal)).astype(int)
else:
idx = np.array(idx).astype(int) # make idx an integer array
# Estimator calculation
noise = pd.Series(signal[idx]).shift(periods=period).rolling(window).mean().values
#noise = [np.nanmean(signal[i-30:i-3]) for i in idx]
c = signal[idx]/noise
# surface index
try:
y = idx[np.nanargmax(c)]
except ValueError:
y = np.nan
return y, c
def grima2012(signal, idx=[], **kwargs):
"""Surface detection from [Grima et al. 2012]
Parameters
----------
signal: array
signal vector
idx: array
the indices of the array where to search for the echo
Return
------
y: float
index of the location of the detected echo
c: array
criteria computed with idx
"""
# array of index where to search for the surface
idx = np.array(idx)
if idx.size == 0 :
idx = np.arange(len(signal)).astype(int)
else:
idx = np.array(idx).astype(int) # make idx an integer array
# Estimator calculation
derivative = np.roll(np.gradient(signal[idx]), 2)
c = signal[idx]*derivative
# surface index
try:
y = idx[np.nanargmax(c)]
except ValueError:
y = np.nan
return y, c
def gcc(rdg, tau_threshold=2, **kwargs):
"""Surface detection from relative time delay obtained through generalized
cross-correlation of each contiguous range lines
Parameters
----------
rdg: 2d-array
radargram
Return
------
"""
#---------------
# Initialization
yn = np.arange(rdg.shape[1])
tau = np.zeros(yn.size, dtype=int)
val = np.zeros(yn.size)
cc = np.abs(rdg)*0
ch = np.abs(rdg)*0
offset = np.zeros(yn.size, dtype=int)
#-------------------------
# GCC applied on radargram
# All records except last
for i in yn[:-1]:
x, y = rdg[:, i], rdg[:, i+1]
_ = utils.gcc(x, y, **kwargs)
tau[i] = _['tau']
val[i] = _['val']
cc[:,i] = _['cc']
#ch[:,i] = _['ch']
# Last record
_ = utils.gcc(rdg[:, i], rdg[:, i-1], **kwargs)
tau[-1] = _['tau']
val[-1] = _['val']
cc[:,-1] = _['cc']
#ch[:,-1] = _['ch']
# Quality flag when tau gradient higher than dtau_threshold
#dtau = np.roll( np.gradient( np.abs(tau)) ,-1)
where_bad = np.where(np.abs(tau) > tau_threshold)[0]
#where_bad = np.intersect1d(np.where(np.abs(dtau) > dtau_threshold)[0], np.where(val < np.median(val))[0])
ok =np.zeros(yn.size)+1
ok[where_bad] = 0
#----------------------------------------
# Vertical offset that correponsds to tau
offset = [np.sum(tau[:i]) for i in yn]
offset = np.array(offset)
#-------------------
# Corrected offsets
#Radargram rolled with offset
rdg2 = copy.deepcopy(rdg)
for i in yn:
rdg2[:,i] = np.roll(rdg[:,i], offset[i])
# Radargram is divided by chunks that are bounded where ok=0
def _data_chunks(data, stepsize=1):
data_id = np.arange(data.size)*data
pieces = np.split(data_id, np.where(np.diff(data_id) != stepsize)[0]+1)
chunks = [i for i in pieces if (i.size > 1)]
return [np.array(chunk, dtype=int) for chunk in chunks]
chunks = _data_chunks(ok)
# Cumulative sum of each chunk to assess the average coordinate
# of the surface echo in each chunk
chunk_cumsums = [np.abs(rdg2[:, chunk].sum(axis=1)) for chunk in chunks]
chunk_cumsum_argmaxs = [np.argmax(chunk_cumsum) for chunk_cumsum in chunk_cumsums]
# Chunks are aligned for their average surface echo coordinate to match
offset2 = copy.deepcopy(offset)
for i, chunk in enumerate(chunks):
offset2[chunk] = offset[chunk] - chunk_cumsum_argmaxs[i] + chunk_cumsum_argmaxs[0]
del rdg2
#-------------------------------
# Coordinate of the surface echo
rdg3 = copy.deepcopy(rdg)
for i in yn:
rdg3[:,i] = np.roll(rdg[:,i], offset2[i])
y0 = np.argmax( np.abs(rdg3.sum(axis=1)) )
y = y0 + offset2
del rdg3
return {'tau':tau.astype(int), 'val':val, 'cc':cc, 'ok':ok, 'yn':yn,
'offset':offset, 'offset2':offset2, 'y':y}
| {
"content_hash": "043805df42850154ed19722a7e53adc5",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 110,
"avg_line_length": 25.65991902834008,
"alnum_prop": 0.5590091511517828,
"repo_name": "cgrima/subradar",
"id": "b677a5ed0aaced9de4187bc4f7922d04a21b27b5",
"size": "6338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subradar/surface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26239"
}
],
"symlink_target": ""
} |
"""Generator for C++ features from json files.
Usage example:
features_compiler.py --destdir gen --root /home/Work/src _permissions.json
"""
from __future__ import print_function
import optparse
import os
from schema_loader import SchemaLoader
from features_cc_generator import CCGenerator
from features_h_generator import HGenerator
from model import CreateFeature
def _GenerateSchema(filename, root, destdir, namespace):
"""Generates C++ features files from the json file |filename|.
"""
# Load in the feature permissions from the JSON file.
schema = os.path.normpath(filename)
schema_loader = SchemaLoader(os.path.dirname(os.path.relpath(schema, root)),
os.path.dirname(schema),
[],
None)
schema_filename = os.path.splitext(schema)[0]
feature_defs = schema_loader.LoadSchema(schema)
# Generate a list of the features defined and a list of their models.
feature_list = []
for feature_def, feature in feature_defs.items():
feature_list.append(CreateFeature(feature_def, feature))
source_file_dir, _ = os.path.split(schema)
relpath = os.path.relpath(os.path.normpath(source_file_dir), root)
full_path = os.path.join(relpath, schema)
generators = [
('%s.cc' % schema_filename, CCGenerator()),
('%s.h' % schema_filename, HGenerator())
]
# Generate and output the code for all features.
output_code = []
for filename, generator in generators:
code = generator.Generate(feature_list, full_path, namespace).Render()
if destdir:
with open(os.path.join(destdir, relpath, filename), 'w') as f:
f.write(code)
output_code += [filename, '', code, '']
return '\n'.join(output_code)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Generates a C++ features model from JSON schema',
usage='usage: %prog [option]... schema')
parser.add_option('-r', '--root', default='.',
help='logical include root directory. Path to schema files from '
'specified dir will be the include path.')
parser.add_option('-d', '--destdir',
help='root directory to output generated files.')
parser.add_option('-n', '--namespace', default='generated_features',
help='C++ namespace for generated files. e.g extensions::api.')
(opts, filenames) = parser.parse_args()
# Only one file is currently specified.
if len(filenames) != 1:
raise ValueError('One (and only one) file is required (for now).')
result = _GenerateSchema(filenames[0], opts.root, opts.destdir,
opts.namespace)
if not opts.destdir:
print(result)
| {
"content_hash": "1bc690ab9e6266c550034231d498eab0",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 35.25,
"alnum_prop": 0.6606942889137738,
"repo_name": "nwjs/chromium.src",
"id": "14bcd65fd19df293665bf8bfb24f5049a117515d",
"size": "2842",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw70",
"path": "tools/json_schema_compiler/features_compiler.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from abc import abstractmethod
from enum import Enum
import collections
class Action(Enum):
OFF = 0,
ON = 1,
def __str__(self):
return "on" if self == Action.ON else "off" if self == Action.OFF else None
def opposite(self):
return Action.OFF if self == Action.ON else Action.ON
def action_from_string(name):
return Action.ON if name == "on" else Action.OFF if name == "off" else None
class Device(object):
def __getstate__(self):
state = dict(self.__dict__)
result = {'display_name': state['display_name'], 'short_name': state['short_name']}
return result
def __init__(self, display_name, short_name, light_control, parent=None):
self.parent = parent
self.light_control = light_control
self.short_name = short_name
self.display_name = display_name
@abstractmethod
def is_group(self):
pass
def set(self, action):
batch = []
self.collect_real(batch)
slots = [device.slot for device in batch]
if action == Action.ON:
self.light_control.set_lights(True, slots)
return batch
elif action == Action.OFF:
self.light_control.set_lights(False, slots)
return batch
else:
raise ()
@abstractmethod
def collect_real(self, batch):
pass
@abstractmethod
def collect_all(self, batch):
pass
@abstractmethod
def is_controlled_manually(self):
pass
@abstractmethod
def is_controlled_automatically(self):
pass
@abstractmethod
def get_real_devices_recursive(self):
pass
@abstractmethod
def get_all_devices_recursive(self):
pass
@abstractmethod
def get_super_start(self):
pass
@abstractmethod
def get_super_stop(self):
pass
class DeviceGroup(Device):
def is_group(self):
return True
def __init__(self, display_name, short_name, light_control, parent=None):
super().__init__(display_name, short_name, light_control, parent)
self.light_control = light_control
self.devices = collections.OrderedDict()
def register_device(self, device):
self.devices[device.short_name] = device
def has_device(self, name):
return name in self.devices
def get_device(self, name):
ret = None
try:
ret = self.devices[name]
except KeyError:
for key in self.devices:
device = self.devices[key]
if type(device) is DeviceGroup:
found = device.get_device(name)
if found is not None:
return found
return ret
def collect_real(self, batch):
for key in self.devices:
device = self.devices[key]
device.collect_real(batch)
def get_real_devices_recursive(self):
batch = []
self.collect_real(batch)
return batch
def collect_all(self, batch):
batch.append(self)
for key in self.devices:
device = self.devices[key]
device.collect_all(batch)
pass
def get_all_devices_recursive(self):
batch = []
self.collect_all(batch)
return batch
def get_devices(self):
all_devices = []
for key in self.devices:
device = self.devices[key]
all_devices.append(device)
return all_devices
def is_controlled_manually(self):
for device in self.get_real_devices_recursive():
if not device.is_controlled_manually():
return False
return True
def is_controlled_automatically(self):
for device in self.get_real_devices_recursive():
if not device.is_controlled_automatically():
return False
return True
def get_super_start(self):
devices = self.get_real_devices_recursive()
rules = [device.super_rule_start for device in devices]
if rules and rules.count(rules[0]) == len(rules):
return rules[0]
return None
def get_super_stop(self):
devices = self.get_real_devices_recursive()
rules = [device.super_rule_stop for device in devices if device.super_rule_stop is not None]
uuids = [rule.uuid for rule in rules]
if rules and len(devices) == len(rules) and equal(uuids):
return rules[0]
pass
def equal(iterator):
try:
iterator = iter(iterator)
first = next(iterator)
return all(first == rest for rest in iterator)
except StopIteration:
return True
class DefaultDevice(Device):
def is_group(self):
return False
def __init__(self, slot, display_name, short_name, light_control, parent):
super().__init__(display_name, short_name, light_control, parent)
self.slot = slot
self.manually = False
self.super_rule_start = None
self.super_rule_stop = None
def collect_real(self, batch):
batch.append(self)
def collect_all(self, batch):
self.collect_real(batch)
def is_controlled_manually(self):
return self.manually
def is_controlled_automatically(self):
return not self.manually
def control_manually(self):
self.manually = True
def control_automatically(self):
self.manually = False
def get_real_devices_recursive(self):
return [self]
def get_all_devices_recursive(self):
self.get_real_devices_recursive()
def get_super_start(self):
return self.super_rule_start
def clear_super_rule(self):
self.super_rule_start = None
self.super_rule_stop = None
def clear_super_start(self):
self.super_rule_start = None
def clear_super_stop(self):
self.super_rule_stop = None
def get_super_stop(self):
return self.super_rule_stop
| {
"content_hash": "692ae84d15471877a8e985f8a880bd16",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 100,
"avg_line_length": 25.345991561181435,
"alnum_prop": 0.5954719493923756,
"repo_name": "ammannbros/garden-lighting",
"id": "4649898926749bbe2fe5aa106286ed626b4b85c3",
"size": "6007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "garden_lighting/web/devices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1638"
},
{
"name": "HTML",
"bytes": "17197"
},
{
"name": "Java",
"bytes": "2321"
},
{
"name": "JavaScript",
"bytes": "5258"
},
{
"name": "Prolog",
"bytes": "653"
},
{
"name": "Python",
"bytes": "64862"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import patch
from google.datacatalog_connectors.sqlserver import datacatalog_cli
@patch('google.datacatalog_connectors.rdbms.sync.'
'datacatalog_synchronizer.DataCatalogSynchronizer.__init__',
lambda self, **kargs: None)
class DatacatalogCLITestCase(unittest.TestCase):
@patch('argparse.ArgumentParser.parse_args')
@patch('argparse.ArgumentParser.add_argument')
@patch('google.datacatalog_connectors.rdbms.sync.'
'datacatalog_synchronizer.DataCatalogSynchronizer.run')
def test_datacatalog_cli_run_should_not_raise_error(
self, run, add_argument, parse_args): # noqa
mocked_parse_args = DictWithAttributeAccess()
mocked_parse_args.service_account_path = 'service_account.json'
mocked_parse_args.datacatalog_project_id = 'test_project_id'
mocked_parse_args.datacatalog_location_id = 'location_id'
mocked_parse_args.datacatalog_entry_group_id = 'entry_group_id'
mocked_parse_args.datacatalog_entry_resource_url_prefix =\
'user_defined_host'
mocked_parse_args.sqlserver_host = 'host'
mocked_parse_args.sqlserver_user = 'user'
mocked_parse_args.sqlserver_pass = 'pass'
mocked_parse_args.sqlserver_database = 'db'
mocked_parse_args.raw_metadata_csv = 'csv'
mocked_parse_args.enable_monitoring = True
parse_args.return_value = mocked_parse_args
datacatalog_cli.SQLServer2DatacatalogCli().run({})
for call_arg in add_argument.call_args_list:
arg = call_arg[0]
command = arg[0]
# Verify args which should not contain the required attribute
if '--service-account-path' in command \
or '--sqlserver-user' in command \
or '--sqlserver-pass' in command \
or '--sqlserver-database' in command \
or '--datacatalog-entry-resource-url-prefix' in command \
or '--raw-metadata-csv' in command \
or '--enable-monitoring' in command \
or '--datacatalog-entry-group-id' in command:
params = call_arg[1]
required = params.get('required')
self.assertFalse(required)
elif '-h' not in command:
params = call_arg[1]
required = params['required']
self.assertTrue(required)
self.assertEqual(run.call_count, 1)
class DictWithAttributeAccess(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
| {
"content_hash": "6aa5eaa974a5c4e75e60c35bf8850acc",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 40,
"alnum_prop": 0.621268656716418,
"repo_name": "GoogleCloudPlatform/datacatalog-connectors-rdbms",
"id": "f211bfd8fb3ece13c61bc5109faa911600b9a488",
"size": "3276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-datacatalog-sqlserver-connector/tests/google/datacatalog_connectors/sqlserver/datacatalog_cli_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "9250"
},
{
"name": "Python",
"bytes": "457511"
},
{
"name": "Shell",
"bytes": "19222"
}
],
"symlink_target": ""
} |
from functools import wraps
# third party packages
from flask import Flask, jsonify, abort, request, Response
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
db = SQLAlchemy(app)
# region dbClasses
class User(db.Model):
"""
Represent a user in database
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'email': self.email
}
# endregion
# region authorization
def check_auth(username, password):
return username == 'admin' and password == 'secret'
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# endregion
@app.route('/')
@requires_auth
def hello_world():
return 'Hello World!'
@app.route('/users', methods=['GET', 'POST'])
def get_users():
if request.method == 'POST':
if (request.json['username'] is None
or request.json['email'] is None):
abort()
user = User(request.json['username'],
request.json['email'])
db.session.add(user)
db.session.commit()
return jsonify({'user': user.to_dict()}), 201
elif request.method == 'GET':
users = User.query.all()
users_dto = [user.to_dict() for user in users]
return jsonify({'users': users_dto}), 200
else:
abort(405, "Method not supported")
@app.errorhandler(405)
def custom405(error):
response = jsonify({'message': error.description})
return response, 405
if __name__ == '__main__':
if app.debug:
app.run()
else:
app.run(host='0.0.0.0')
| {
"content_hash": "fdc46df277ab4d195e2c23acc95fac29",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 68,
"avg_line_length": 24.495049504950494,
"alnum_prop": 0.6050929668552951,
"repo_name": "gardyna/WalkerAppGame",
"id": "a54db175c8f14ba5485490e5f148d870f96705a3",
"size": "2489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WalkerBuddyAPI/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "51607"
},
{
"name": "Python",
"bytes": "12758"
}
],
"symlink_target": ""
} |
get_ipython().run_cell_magic('capture', '', '%matplotlib inline\nimport numpy as np\nimport sympy as sp\nimport matplotlib.pyplot as plt\n\n# To get equations the look like, well, equations, use the following.\nfrom sympy.interactive import printing\nprinting.init_printing(use_latex=True)\nfrom IPython.display import display\n\n# Tools for manipulating quaternions.\nimport Q_tools as qt;')
# ## Lecture 1: Systems and Experiments
# ### Bracket Notation and Three Identities
# Bracket notation from this quaternion-centric perspective is just a quaternion product where the first term must necessarily be conjugated. I have called this the "Euclidean product". The quaternion product is associative but the Euclidean product is not ($(A^* B)^* C \ne A^* (B^* C)$ although their norms are equal). Write out three things in bracket notation that are known to be true about inner products(QM:TTH, p. 31).
# 1. $<A|A> \rightarrow A^* A$ is real
# 1. $<A|B> = <B|A>^* \rightarrow A^* B = (B^* A)^*$
# 1. $(<A|+<B|)|C> = <A|C> + <B|C> \rightarrow (A+ B)^*C = A^*C + B^* C$
#
# This may provide the first signs that the odd math of quantum mechanics is the math of Euclidean products of quaternions.
#
# So, is $A^* A$ real? Yes and no.
# In[3]:
a0, A1, A2, A3 = sp.symbols("a0 A1 A2 A3")
b0, B1, B2, B3 = sp.symbols("b0 B1 B2 B3")
c0, C1, C2, C3 = sp.symbols("c0 C1 C2 C3")
A = qt.QH([a0, A1, A2, A3], qtype="A")
B = qt.QH([b0, B1, B2, B3], qtype="B")
C = qt.QH([c0, C1, C2, C3], qtype="C")
display(A.conj().product(A).t)
display(A.conj().product(A).x)
display(A.conj().product(A).y)
display(A.conj().product(A).z)
# The first term is a real-valued, with the 3-imaginary vector equal to zero. I think it is bad practice to just pretend the three zeros are *not there in any way*. One can make an equivalence relation between quaternions of the form $(\mathbb{R}, 0, 0, 0)$ and the real numbers. The real numbers are a subgroup of quaternions, and never the other way around.
#
# It is important to understand exactly why the three imaginary terms are zero. It is too common for people to say "it's the norm" and give the subject no thought. No thought means no insights. A quaternion points in the direction of itself, so all the anti-symmetric cross terms are equal to zero. The conjugate operator picks out the mirror reflection of the imaginary terms. The product of an imaginary with its mirror image is an all positive real number and zero for all three imaginary numbers.
#
# Calculus is the story of neighborhoods near points. There are two broad classes of changes one can imagine for a norm. In the first, a point $A$ goes to $A'$. It could be either slightly bigger or smaller, shown in a slightly bigger or smaller first value. Or the mirror reflection to be slightly off. This would create a non-zero space-times-time 3-vector. Everyone accepts that a norm can get larger or smaller, it is a "size" thing. But a change in direction will lead to imaginary terms that can either commute, anti-commute, or be a mixture of both. This possibility makes this view of a quaternion norm sound richer.
# Test out the second identity:
#
# $$(A^* B)^* = (B^*, A)$$
# In[4]:
AB_conj = A.Euclidean_product(B)
BA = B.Euclidean_product(A).conj()
print("(A* B)* = {}".format(AB_conj))
print("B* A = {}".format(BA))
print("(A* B)* - B* A = {}".format(AB_conj.dif(BA)))
# Note on notation: someone pointed out that is *absolutely all calculations start and end with quaternions*, then it is easy to feel lost - this quaternion looks like that one. The string at the end that I call a "qtype" represents all the steps that went into a calculation. The last qtype above reads: A*xB-B*xA* which hopefully is clear in this contex.
# Despite the fact that quaternions do not commute, the conjugate operator does the job correctly because the angle between the two quaternions does not change.
# Now for the third identity about sums.
# In[5]:
A_plus_B_then_C = A.conj().add(B.conj()).product(C).expand_q()
AC_plus_BC = A.conj().product(C).add(B.conj().product(C)).expand_q()
print("(A+B)* C: {}\n".format(A_plus_B_then_C))
print("A*C + B*C: {}\n".format(AC_plus_BC))
print("(A+B)* C - (A*C + B*C): {}".format(A_plus_B_then_C.dif(AC_plus_BC)))
# Subtracting one from the other shows they are identical.
#
# There are many more algebraic relationships known for Hilbert spaces such as the triangle inequality and the Schwarz inequality which is the basis of the uncertainty principle. These all work for the [Euclidean product with quaternions](https://dougsweetser.github.io/Q/QM/bracket_notation/).
# ## Lecture 2: Quantum States
# ### Quaternion Series as Quantum States
# A quantum state is an n-dimensional vector space. This is fundamentally different from a set of states because certain math relationships are allowed. Vectors can be added to one another, multiplied by complex numbers. One can take the inner product of two vectors. Most important calculations involve taking the inner product.
#
# A perspective I will explore here is that a (possibly infinite) series of quaternions has the same algebraic properties of Hilbert spaces when one uses the Euclidean product, $A^* B = \sum_{1}^{n} a_n^* b_n$
# 
# This only works if the length of the series for **A** is exactly equal to that of **B**. Whatever can be done with a quaternion can be done with its series representation. Unlike vectors that can either be be a row or a column, quaternion series only have a length. Let's just do one calculation, < A | A >:
# In[6]:
A = qt.QHStates([qt.QH([0,1,2,3]), qt.QH([1,2,1,2])])
AA = A.Euclidean_product('bra', ket=A)
AA.print_states("<A|A>")
# A little calculation in the head should show this works as expected - except one is not used to seeing quaternion series in action.
# The first system analyzed has but 2 states, keeping things simple. The first pair of states are likewise so simple they are orthonormal to a casual observer.
# In[7]:
q0, q1, qi, qj, qk = qt.QH().q_0(), qt.QH().q_1(), qt.QH().q_i(), qt.QH().q_j(), qt.QH().q_k()
u = qt.QHStates([q1, q0])
d = qt.QHStates([q0, q1])
u.print_states("u", True)
d.print_states("d")
# Calculate $<u|u>$, $<d|d>$ and $<u|d>$:
# In[10]:
u.Euclidean_product('bra', ket=u).print_states("<u|u>")
# In[9]:
d.Euclidean_product('bra', ket=d).print_states("<d|d>")
# In[12]:
u.Euclidean_product('bra', ket=d).print_states("<u|d>")
# The next pair of states is constructed from the first pair, $u$ and $d$ like so (QM:TTM, page 41):
# In[15]:
sqrt_2op = qt.QHStates([qt.QH([sp.sqrt(1/2), 0, 0, 0])])
u2 = u.Euclidean_product('ket', operator=sqrt_2op)
d2 = d.Euclidean_product('ket', operator=sqrt_2op)
r = u2.add(d2)
L = u2.dif(d2)
r.print_states("r", True)
L.print_states("L")
# In[16]:
r.Euclidean_product('bra', ket=r).print_states("<r|r>", True)
L.Euclidean_product('bra', ket=L).print_states("<L|L>", True)
r.Euclidean_product('bra', ket=L).print_states("<r|L>", True)
# The final calculation for chapter 2 is like the one for $r$ and $L$ except one uses an arbitrarily chosen imaginary value - it could point any direction in 3D space - like so:
# In[20]:
i_op = qt.QHStates([q1, q0, q0, qi])
i = r.Euclidean_product('ket', operator=i_op)
o = L.Euclidean_product('ket', operator=i_op)
i.print_states("i", True)
o.print_states("o")
# In[22]:
i.Euclidean_product('bra', ket=i).print_states("<i|i>", True)
o.Euclidean_product('bra', ket=o).print_states("<o|o>", True)
i.Euclidean_product('bra', ket=o).print_states("<i|o>")
# Notice how long the qtypes have gotten (the strings that keep a record of all the manipulations done to a quaternion). The initial state was just a zero and a one, but that had to get added to another and normalized, then multiplied by a factor of $i$ and combined again.
# Orthonormal again, as hoped for.
# Is the quaternion series approach a faithful representation of these 6 states? On page 43-44, there are 8 products that all add up to one half. See if this works out...
# In[26]:
ou = o.Euclidean_product('bra', ket=u)
uo = i.Euclidean_product('bra', ket=o)
print("ouuo sum:\n", ou.product('bra', ket=uo).summation(), "\n")
od = o.Euclidean_product('bra', ket=d)
do = d.Euclidean_product('bra', ket=o)
print("oddo sum:\n", od.product('bra', ket=do).summation(), "\n")
iu = i.Euclidean_product('bra', ket=u)
ui = u.Euclidean_product('bra', ket=i)
print("iuui sum:\n", iu.product('bra', ket=ui).summation(), "\n")
id = i.Euclidean_product('bra', ket=d)
di = d.Euclidean_product('bra', ket=i)
print("iddi sum:\n", id.product('bra', ket=di).summation())
# In[28]:
Or = o.Euclidean_product('bra', ket=r)
ro = r.Euclidean_product('bra', ket=o)
print("orro:\n", Or.product('bra', ket=ro).summation(), "\n")
oL = o.Euclidean_product('bra', ket=L)
Lo = L.Euclidean_product('bra', ket=o)
print("oLLo:\n", oL.product('bra', ket=Lo).summation(), "\n")
ir = i.Euclidean_product('bra', ket=r)
ri = r.Euclidean_product('bra', ket=i)
print("irri:\n", ir.product('bra', ket=ri).summation(), "\n")
iL = i.Euclidean_product('bra', ket=L)
Li = L.Euclidean_product('bra', ket=i)
print("iLLi:\n", iL.product('bra', ket=Li).summation())
# There is an important technical detail in this calculation I should point out. In the <bra|ket> form, the bra gets conjugated. Notice though that if one does two of these, < i | L >< L | i >, then there has to be a product formed between the two brackets. In practice, < i | L >* < L | i > gives the wrong result:
# In[29]:
print("iL*Li:\n", iL.Euclidean_product('bra', ket=Li).summation())
| {
"content_hash": "9d8b5e3722f6605334f84bb73758c537",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 624,
"avg_line_length": 43.502262443438916,
"alnum_prop": 0.6953401289785729,
"repo_name": "dougsweetser/ipq",
"id": "f0695da76ac8c5c1acc91dfd7d4c1041692de0bb",
"size": "10580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q_notebooks/quaternion_qm_chapter_1_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25706"
},
{
"name": "HTML",
"bytes": "5632014"
},
{
"name": "JavaScript",
"bytes": "335"
},
{
"name": "Jupyter Notebook",
"bytes": "2415705"
},
{
"name": "Mathematica",
"bytes": "447691"
},
{
"name": "Python",
"bytes": "393605"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import unittest
# pylint: disable=W0403,W0212,W0612
from weatheralerts import WeatherAlerts
class Test_WeatherAlerts(unittest.TestCase):
def setUp(self):
self.nws = WeatherAlerts()
def test_almost_everything(self):
print("Alerts currently in feed {0}".format(len(self.nws.alerts)))
def test_event_state_counties(self):
self.nws.event_state_counties()
def test_samecode_alerts_method(self):
self.nws.samecode_alerts('016027')
def test_refresh(self):
self.nws.refresh()
def test_refresh_forced(self):
self.nws.refresh(force=True)
def test_county_state_alerts(self):
self.nws.county_state_alerts('canyon', 'ID')
def test_alert_attributes(self):
for alert in self.nws.alerts:
x = alert.title
x = alert.summary
x = alert.areadesc
x = alert.event
x = alert.samecodes
x = alert.zonecodes
x = alert.expiration
x = alert.updated
x = alert.effective
x = alert.published
x = alert.severity
x = alert.category
x = alert.urgency
def test_passing_samecodes(self):
# Alerts by a Samecode
testobjs = []
testobjs.append(WeatherAlerts(samecodes='016027'))
testobjs.append(WeatherAlerts(samecodes=['016027', '016001', '016073', '016075']))
samecodes = list(self.nws.geo.samecodes.keys()) # will return dict_keys obj in py3
testobjs.append(WeatherAlerts(samecodes=samecodes)) # use them for testing
for nws in testobjs:
for alert in nws.alerts:
x = alert.title
x = alert.summary
x = alert.areadesc
x = alert.event
x = alert.samecodes
x = alert.zonecodes
x = alert.expiration
x = alert.updated
x = alert.effective
x = alert.published
x = alert.severity
x = alert.category
x = alert.urgency
def test_passing_state(self):
nws = WeatherAlerts(state='ID')
for alert in nws.alerts:
x = alert.title
x = alert.summary
x = alert.areadesc
x = alert.event
x = alert.samecodes
x = alert.zonecodes
x = alert.expiration
x = alert.updated
x = alert.effective
x = alert.published
x = alert.severity
x = alert.category
x = alert.urgency
def test_break_on_samecodes(self):
'''break if you pass in non str/list samecodes'''
try:
nws = WeatherAlerts(samecodes=1)
except Exception:
pass
else:
raise Exception("That shouldn't have worked")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a5112be2e4a4522945cd55fa5d5ade1c",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 91,
"avg_line_length": 30.353535353535353,
"alnum_prop": 0.5507487520798668,
"repo_name": "zebpalmer/WeatherAlerts",
"id": "3117cc7594306a1a8cd7f8ed55e8a58f1984fd0f",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_weatheralerts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41573"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
# NLP data analysis
import nltk
nltk.download()
from nltk.corpus import stopwords
# Interact with user machine
from enum import Enum
import re
import sys
from sys import argv
import os
# INCLUDE HOW MANY WISHES WERE MADE
# INCLUDE LONGEST STREAK OF TEXT MESSAGES
# Time In Milliseconds
class TIM(Enum):
''' Time in milliseconds'''
SECOND = 1000,
MINUTE = SECOND * 60,
HOUR = MINUTE * 60,
DAY = HOUR * 24,
WEEK = DAY * 7,
# 30d/mo; 365d/yr
MONTH = WEEK * 4 + DAY * 2,
YEAR = MONTH * 12 + DAY * 5,
class NLPAnalyze:
# REWORK IMMEDIATELY
# REWORK IMMEDIATELY
# REWORK IMMEDIATELY
'''all NLP analysis starts here'''
def __init__(self):
# Some basic regex
self.laugh = re.compile('[h[e|a]]{2,}')
self.love = re.compile('[l+[u|o]+v+e*]')
self.you = re.compile('[[y+o+u+]|u+]')
self.swear = re.compile('[f+[a|u]+[c|k]+] | [s+h+i+t+] | [c+u+n+t+]')
self.babey = re.compile('[b+a+b+[e+|y+]]')
self.heart = re.compile('<+3+')
# Remove stop words
cachedStopWords = stopwords.words('english')
[word for word in text if word not in cachedStopWords]
# text4.dispersion_plot(["citizens", "democracy", "freedom", "duties", "America"])
# Really cool feature to examine changes through time
class ConvParty:
"""Store data for a conversation participant and their associated data
"""
def __init__(self, partyName):
self.name = partyName
self.dataSet = {
'sms' : ConvData('sms'),
'length' : ConvData('length'),
'responseTime' : ConvData('responseTime'),
'timeToNext' : ConvData('timeToNext')
}
def __getitem__(self, key):
'''index over the dataSet'''
if key == 'name':
return self.name
if key in self.data:
return self.dataSet[key]
return None
def __setitem__(self, idx, value):
'''index over the dataSet'''
if key in self.data:
self.dataSet[idx] = value
def __str__(self):
returnStr = 'DATA FOR ' + self.name
return returnStr
def addSMS(self, sms):
self['sms'] += sms
self['length'] += sms['length']
if sms['responseTime'] > 0:
self['responseTime'] += sms['responseTime']
if sms['timeToNext'] > 0:
self['timeToNext'] += sms['timeToNext']
def analyze(self):
print('Analyzing data for ' + self.name + '...')
for data in self.dataSet:
data.analyze()
# Do NLTK analysis here
class ConvData:
"""Store conversation data associated with one particpant and one data type
"""
def __init__(self, title):
# e.g. 'responseTime', 'message', 'length', etc.
self.title = title
self.data = []
self.count = 0
self.stats = {}
def __add__(self, other):
self.data += other
self.count += 1
return self
def __iadd__(self, other):
self.data += other
self.count += 1
return self
def __str__(self):
returnStr = self.title + ' WITH ' + self.count
return returnStr
def analyze(self, other=None):
if not self.stats:
self.stats['average'] = mean(self.data)
self.stats['median'] = median_grouped(self.data)
self.stats['mode'] = mode(self.data)
self.stats['stdev'] = stdev(self.data)
class Conversation:
"""Store data for a conversation given two participants.
This will also store values for both participants combined
"""
def __init__(self, party1, party2):
self.parties = {
'party1' : ConvParty(partyName1),
'party2' : ConvParty(partyName2),
'total' : ConvParty('total')}
def __getitem__(self, key):
if key in self.parties:
return self.parties[key]
return None
def __str__(self):
returnStr = 'Conversation between ' + party1 + ' and ' + party2
return returnStr
def addSMS(self, sms):
self[sms[party]].addSMS(sms)
self['total'].addSMS(sms)
def analyze(self):
for party in self.parties:
party.analyze()
class SMS:
"""Store data for a single SMS
"""
def __init__(self, date, party, message):
self.data = {
'date' : date,
'message' : message,
'length' : len(message),
'party' : party,
'responseTime' : 0,
'timeToNext' : 0,
'wish' : False
}
self._checkWish()
def __getitem__(self, key):
if key in self.data:
return self.data[key]
return None
def __setitem__(self, idx, value):
self.data[idx] = value
def __str__(self):
returnStr = '[' + str(self.message) + '] '
returnStr += 'FROM [' + str(self.party) + '] '
returnStr += 'AT [' + str(self.date) + '] '
returnStr += 'IN [' + str(self.responseTime) + ']'
return returnStr
def _checkWish():
'''check if a wish was made around 11:11/23:11 with this SMS'''
pass
def transcribe(root, conversation):
"""Parse ElementTree XML and fill conversation object with relevant data
"""
print('Parsing messages from XML file...')
for sms in root.findall('sms'):
# Input time as milliseconds
date = int(sms.attrib['date'])
# Determine which party sent the message
if (sms.attrib['type'] == '2'):
party = conversation['party1']['name']
elif (sms.attrib['type'] == '1'):
party = conversation['party2']['name']
# Include UTF-8 and Emoji support in later revisions
message = str(sms.attrib['body']).encode('ascii', 'ignore')
newSMS = SMS(date, party, message)
# Traverse the list backwards, get most recent SMS from both parties
reversedSMSs = reversed(conversation.messages)
for previousSMS in reversedSMSs:
if previousSMS[party] == newSMS[party]:
# Set the time between responses for one party
if not previousSMS[timeToNext]:
previousSMS[timeToNext] = newSMS[date] - previousSMS[date]
else:
break
# Set the time it took to respond to the other party
else:
newSMS[responseTime] = newSMS[date] - previousSMS[date]
conversation.addSMS(newSMS)
print('Successfully parsed ' + conversation['total']['sms'].count + ' messages!')
def main(party1, party2):
'''main function that executes program function'''
# Initialize conversation participants
partyData1 = ConvParty(party1)
partyData2 = ConvParty(party2)
# Initialize conversation
convo = Conversation(party1, party2)
# Parse messages into conversation from ET-parsed XML file
messages = transcribe(ET.parse('sms.xml').getroot(), convo)
# Perform analysis on the gathered SMS data
convo.analyze()
# Initialize graphics output
if __name__ == '__main__':
if (len(argv) < 3):
raise Exception('Please enter your name and then your contact\'s name')
main(argv[1], argv[2])
| {
"content_hash": "22605339134d2973c2d0b93a4dbd8e12",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 84,
"avg_line_length": 26.783898305084747,
"alnum_prop": 0.6568580920740389,
"repo_name": "2nd47/info-txt",
"id": "b25a89c14a925ce94e15802e7772bc49ae9e0bb9",
"size": "6335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "info-txt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6335"
}
],
"symlink_target": ""
} |
import unittest
import config
import thread_cert
from pktverify.consts import MLE_CHILD_ID_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_CHILD_UPDATE_REQUEST, MLE_CHILD_UPDATE_RESPONSE, MODE_TLV
from pktverify.packet_verifier import PacketVerifier
LEADER = 1
ED = 2
class Cert_6_5_3_ChildResetSynchronize(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ED]
},
ED: {
'name': 'ED',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [LEADER]
},
}
def _setUpEd(self):
self.nodes[ED].add_allowlist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_allowlist()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ED].reset()
self._setUpEd()
self.simulator.go(1)
self.nodes[ED].start()
self.simulator.go(1)
self.assertEqual(self.nodes[ED].get_state(), 'child')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
if addr[0:4] == 'fe80':
self.assertTrue(self.nodes[LEADER].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
ED = pv.vars['ED']
_leader_pkts = pkts.filter_wpan_src64(LEADER)
_ed_pkts = pkts.filter_wpan_src64(ED)
# Step 2: Reset the DUT for a time shorter than
# the Child Timeout Duration.
# Step 3: Send MLE Child Update Request to Leader
_ed_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next()
_leader_pkts.range(_ed_pkts.index).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
_ed_pkts.filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV} < set(p.mle.tlv.type))
# Step 4: Leader send an MLE Child Update Response
_leader_pkts.range(_ed_pkts.index).filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next()
_ed_pkts.range(_leader_pkts.index).filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_not_next()
# Step 5: The DUT MUST respond with ICMPv6 Echo Reply
_ed_pkts.filter_ping_reply().filter(lambda p: p.wpan.src64 == ED and p.wpan.dst64 == LEADER).must_next()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b7f20ecd2222ae439896c5d53ff52605",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 135,
"avg_line_length": 32.548780487804876,
"alnum_prop": 0.5878606219557887,
"repo_name": "bukepo/openthread",
"id": "203d2018178ee79aec0246c443f33f4837292131",
"size": "4274",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_6_5_03_ChildResetSynchronize.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "50"
},
{
"name": "C",
"bytes": "1080565"
},
{
"name": "C++",
"bytes": "5839893"
},
{
"name": "CMake",
"bytes": "95509"
},
{
"name": "Dockerfile",
"bytes": "6286"
},
{
"name": "M4",
"bytes": "36443"
},
{
"name": "Makefile",
"bytes": "161153"
},
{
"name": "Python",
"bytes": "3379923"
},
{
"name": "Shell",
"bytes": "134708"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Scattersmith(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "scattersmith"
_valid_props = {
"cliponaxis",
"connectgaps",
"customdata",
"customdatasrc",
"fill",
"fillcolor",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hoveron",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"imag",
"imagsrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"real",
"realsrc",
"selected",
"selectedpoints",
"showlegend",
"stream",
"subplot",
"text",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
}
# cliponaxis
# ----------
@property
def cliponaxis(self):
"""
Determines whether or not markers and text nodes are clipped
about the subplot axes. To show markers and text nodes above
axis lines and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
The 'cliponaxis' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cliponaxis"]
@cliponaxis.setter
def cliponaxis(self, val):
self["cliponaxis"] = val
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". scattersmith has a subset of the options
available to scatter. "toself" connects the endpoints of the
trace (or each segment of the trace if it has gaps) into a
closed shape. "tonext" fills the space between two traces if
one completely encloses the other (eg consecutive contour
lines), and behaves like "toself" if there is no trace before
it. "tonext" should not be used if one trace does not enclose
the other.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself', 'tonext']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['real', 'imag', 'text', 'name'] joined with '+' characters
(e.g. 'real+imag')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
Returns
-------
plotly.graph_objs.scattersmith.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hoveron
# -------
@property
def hoveron(self):
"""
Do the hover effects highlight individual points (markers or
line points) or do they highlight filled regions? If the fill
is "toself" or "tonext" and there are no markers or text, then
the default is "fills", otherwise it is "points".
The 'hoveron' property is a flaglist and may be specified
as a string containing:
- Any combination of ['points', 'fills'] joined with '+' characters
(e.g. 'points+fills')
Returns
-------
Any
"""
return self["hoveron"]
@hoveron.setter
def hoveron(self, val):
self["hoveron"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. The variables available in `hovertemplate`
are the ones emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (x,y) pair. If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (x,y) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# imag
# ----
@property
def imag(self):
"""
Sets the imaginary component of the data, in units of
normalized impedance such that real=1, imag=0 is the center of
the chart.
The 'imag' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["imag"]
@imag.setter
def imag(self, val):
self["imag"] = val
# imagsrc
# -------
@property
def imagsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `imag`.
The 'imagsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["imagsrc"]
@imagsrc.setter
def imagsrc(self, val):
self["imagsrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.scattersmith.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# legendwidth
# -----------
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
backoff
Sets the line back off from the end point of
the nth line segment (in px). This option is
useful e.g. to avoid overlap with arrowhead
markers. With "auto" the lines would trim
before markers if `marker.angleref` is set to
"previous".
backoffsrc
Sets the source reference on Chart Studio Cloud
for `backoff`.
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattersmith.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
angle
Sets the marker angle in respect to `angleref`.
angleref
Sets the reference for marker angle. With
"previous", angle 0 points along the line from
the previous point to this one. With "up",
angle 0 points toward the top of the screen.
anglesrc
Sets the source reference on Chart Studio Cloud
for `angle`.
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color` is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color` is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scattersmith.marke
r.ColorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use `marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
gradient
:class:`plotly.graph_objects.scattersmith.marke
r.Gradient` instance or dict with compatible
properties
line
:class:`plotly.graph_objects.scattersmith.marke
r.Line` instance or dict with compatible
properties
maxdisplayed
Sets a maximum number of points to be drawn on
the graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color` is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color` is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
standoff
Moves the marker away from the data point in
the direction of `angle` (in px). This can be
useful for example if you have another marker
at this location and you want to point an
arrowhead marker at it.
standoffsrc
Sets the source reference on Chart Studio Cloud
for `standoff`.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for `symbol`.
Returns
-------
plotly.graph_objs.scattersmith.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points and the trace is not
stacked then the default is "lines+markers". Otherwise,
"lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# real
# ----
@property
def real(self):
"""
Sets the real component of the data, in units of normalized
impedance such that real=1, imag=0 is the center of the chart.
The 'real' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["real"]
@real.setter
def real(self, val):
self["real"] = val
# realsrc
# -------
@property
def realsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `real`.
The 'realsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["realsrc"]
@realsrc.setter
def realsrc(self, val):
self["realsrc"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattersmith.selec
ted.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattersmith.selec
ted.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattersmith.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.scattersmith.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# subplot
# -------
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
smith subplot. If "smith" (the default value), the data refer
to `layout.smith`. If "smith2", the data refer to
`layout.smith2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'smith', that may be specified as the string 'smith'
optionally followed by an integer >= 1
(e.g. 'smith', 'smith1', 'smith2', 'smith3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair. If a single
string, the same string appears over all the data points. If an
array of string, the items are mapped in order to the this
trace's (x,y) coordinates. If trace `hoverinfo` contains a
"text" flag and "hovertext" is not set, these elements will be
seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.scattersmith.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`textposition`.
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `real`, `imag` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattersmith.unsel
ected.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattersmith.unsel
ected.Textfont` instance or dict with
compatible properties
Returns
-------
plotly.graph_objs.scattersmith.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
cliponaxis
Determines whether or not markers and text nodes are
clipped about the subplot axes. To show markers and
text nodes above axis lines and tick labels, make sure
to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scattersmith has a subset of
the options available to scatter. "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape. "tonext" fills the
space between two traces if one completely encloses the
other (eg consecutive contour lines), and behaves like
"toself" if there is no trace before it. "tonext"
should not be used if one trace does not enclose the
other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scattersmith.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
imag
Sets the imaginary component of the data, in units of
normalized impedance such that real=1, imag=0 is the
center of the chart.
imagsrc
Sets the source reference on Chart Studio Cloud for
`imag`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattersmith.Legendgroupti
tle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scattersmith.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scattersmith.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
real
Sets the real component of the data, in units of
normalized impedance such that real=1, imag=0 is the
center of the chart.
realsrc
Sets the source reference on Chart Studio Cloud for
`real`.
selected
:class:`plotly.graph_objects.scattersmith.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattersmith.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a smith subplot. If "smith" (the default value),
the data refer to `layout.smith`. If "smith2", the data
refer to `layout.smith2`, and so on.
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `real`, `imag` and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattersmith.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
cliponaxis=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
imag=None,
imagsrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
real=None,
realsrc=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
subplot=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
**kwargs,
):
"""
Construct a new Scattersmith object
The scattersmith trace type encompasses line charts, scatter
charts, text charts, and bubble charts in smith coordinates.
The data visualized as scatter point or lines is set in `real`
and `imag` (imaginary) coordinates Text (appearing either on
the chart or on hover only) is via `text`. Bubble charts are
achieved by setting `marker.size` and/or `marker.color` to
numerical arrays.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Scattersmith`
cliponaxis
Determines whether or not markers and text nodes are
clipped about the subplot axes. To show markers and
text nodes above axis lines and tick labels, make sure
to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scattersmith has a subset of
the options available to scatter. "toself" connects the
endpoints of the trace (or each segment of the trace if
it has gaps) into a closed shape. "tonext" fills the
space between two traces if one completely encloses the
other (eg consecutive contour lines), and behaves like
"toself" if there is no trace before it. "tonext"
should not be used if one trace does not enclose the
other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scattersmith.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
imag
Sets the imaginary component of the data, in units of
normalized impedance such that real=1, imag=0 is the
center of the chart.
imagsrc
Sets the source reference on Chart Studio Cloud for
`imag`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scattersmith.Legendgroupti
tle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scattersmith.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scattersmith.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
real
Sets the real component of the data, in units of
normalized impedance such that real=1, imag=0 is the
center of the chart.
realsrc
Sets the source reference on Chart Studio Cloud for
`real`.
selected
:class:`plotly.graph_objects.scattersmith.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattersmith.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a smith subplot. If "smith" (the default value),
the data refer to `layout.smith`. If "smith2", the data
refer to `layout.smith2`, and so on.
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `real`, `imag` and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattersmith.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Scattersmith
"""
super(Scattersmith, self).__init__("scattersmith")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattersmith
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scattersmith`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("cliponaxis", None)
_v = cliponaxis if cliponaxis is not None else _v
if _v is not None:
self["cliponaxis"] = _v
_v = arg.pop("connectgaps", None)
_v = connectgaps if connectgaps is not None else _v
if _v is not None:
self["connectgaps"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hoveron", None)
_v = hoveron if hoveron is not None else _v
if _v is not None:
self["hoveron"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("imag", None)
_v = imag if imag is not None else _v
if _v is not None:
self["imag"] = _v
_v = arg.pop("imagsrc", None)
_v = imagsrc if imagsrc is not None else _v
if _v is not None:
self["imagsrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("legendwidth", None)
_v = legendwidth if legendwidth is not None else _v
if _v is not None:
self["legendwidth"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("mode", None)
_v = mode if mode is not None else _v
if _v is not None:
self["mode"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("real", None)
_v = real if real is not None else _v
if _v is not None:
self["real"] = _v
_v = arg.pop("realsrc", None)
_v = realsrc if realsrc is not None else _v
if _v is not None:
self["realsrc"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("subplot", None)
_v = subplot if subplot is not None else _v
if _v is not None:
self["subplot"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "scattersmith"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "a0005e00d2330ef829979420d172d4fb",
"timestamp": "",
"source": "github",
"line_count": 2319,
"max_line_length": 90,
"avg_line_length": 36.58171625700733,
"alnum_prop": 0.5603361899260901,
"repo_name": "plotly/plotly.py",
"id": "45e6c96eb20c831bd5ea7ab215b8a88ee82e287a",
"size": "84833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/_scattersmith.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
__doc__="""
Runs the preflightg - G for Glyphs - shell script from your chosen project folder
"""
__copyright__ = 'Copyright (c) 2018, SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'Nicolas Spalinger'
import GlyphsApp
from subprocess import Popen, PIPE
def runAppleScript(scpt, args=[]):
p = Popen(['osascript', '-'] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(scpt)
if stderr:
print "AppleScript Error:"
print stderr.decode('utf-8')
return stdout
runpreflightg = """
tell application "Finder"
activate
set frontmost to true
set projectRoot to quoted form of POSIX path of (choose folder with prompt "Please select the project folder root, e.g. font-gentium" with invisibles)
tell application "Terminal"
activate
tell window 1
do script "cd " & projectRoot & "; ./preflightg"
end tell
end tell
end tell
tell application "Finder"
display notification "Running preflightg on your project, watch for errors in the output, when done you can close the window" with title "Preflightg" sound name "default"
end tell
"""
save = runAppleScript( runpreflightg )
| {
"content_hash": "4f15fa3b10db1a8fba1903c5e6fecc19",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 171,
"avg_line_length": 22.272727272727273,
"alnum_prop": 0.7183673469387755,
"repo_name": "n7s/scripts-for-glyphs",
"id": "dfe4cbd1f623948b40e586374ae6a3f440db50c6",
"size": "1293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run-preflightg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24325"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
import asyncio
import binascii
import cgi
import collections
import datetime
import http.cookies
import io
import json
import math
import time
import warnings
import enum
from email.utils import parsedate
from types import MappingProxyType
from urllib.parse import urlsplit, parse_qsl, unquote
from . import hdrs
from .helpers import reify
from .multidict import (CIMultiDictProxy,
CIMultiDict,
MultiDictProxy,
MultiDict)
from .protocol import Response as ResponseImpl, HttpVersion10, HttpVersion11
from .streams import EOF_MARKER
__all__ = ('ContentCoding', 'Request', 'StreamResponse', 'Response')
sentinel = object()
class HeadersMixin:
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
l = self.headers.get(_CONTENT_LENGTH)
if l is None:
return None
else:
return int(l)
FileField = collections.namedtuple('Field', 'name filename file content_type')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Request
############################################################
class Request(dict, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, app, message, payload, transport, reader, writer, *,
_HOST=hdrs.HOST, secure_proxy_ssl_header=None):
self._app = app
self._version = message.version
self._transport = transport
self._reader = reader
self._writer = writer
self._method = message.method
self._host = message.headers.get(_HOST)
self._path_qs = message.path
self._post = None
self._post_files_cache = None
self._headers = CIMultiDictProxy(message.headers)
if self._version < HttpVersion10:
self._keep_alive = False
else:
self._keep_alive = not message.should_close
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
self._payload = payload
self._cookies = None
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
@reify
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self._headers.get(header) == value:
return 'https'
return 'http'
@property
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@property
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@property
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
return self._host
@property
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return self._path_qs
@reify
def _splitted_path(self):
url = '{}://{}{}'.format(self.scheme, self.host, self._path_qs)
return urlsplit(url)
@property
def raw_path(self):
""" The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
return self._splitted_path.path
@reify
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return unquote(self.raw_path)
@reify
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
return self._splitted_path.query
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
return MultiDictProxy(MultiDict(parse_qsl(self.query_string,
keep_blank_values=True)))
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@property
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@property
def if_modified_since(self, _IF_MODIFIED_SINCE=hdrs.IF_MODIFIED_SINCE):
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_IF_MODIFIED_SINCE)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return self._keep_alive
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@property
def app(self):
"""Application instance."""
return self._app
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@property
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
if self._cookies is None:
raw = self.headers.get(hdrs.COOKIE, '')
parsed = http.cookies.SimpleCookie(raw)
self._cookies = MappingProxyType(
{key: val.value for key, val in parsed.items()})
return self._cookies
@property
def payload(self):
"""Return raw payload stream."""
warnings.warn('use Request.content instead', DeprecationWarning)
return self._payload
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
chunk = yield from self._payload.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if chunk is EOF_MARKER:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loader=json.loads):
"""Return BODY as JSON."""
body = yield from self.text()
return loader(body)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
_count = 1
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name+str(_count)] = field
_count += 1
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
return "<{} {} {} >".format(self.__class__.__name__,
self.method, self.path)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = http.cookies.SimpleCookie()
self.set_status(status, reason)
self._req = None
self._resp_impl = None
self._eof_sent = False
if headers is not None:
self._headers.extend(headers)
def _copy_cookies(self):
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
self.headers.add(hdrs.SET_COOKIE, value)
@property
def started(self):
return self._resp_impl is not None
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0, domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self.headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
if hdrs.LAST_MODIFIED in self.headers:
del self.headers[hdrs.LAST_MODIFIED]
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
'Response has been started with different request.')
else:
return self._resp_impl
else:
return None
def _start_compression(self, request):
def start(coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._resp_impl.add_compression_filter(coding.value)
self.content_length = None
if self._compression_force:
start(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
start(coding)
return
def start(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
self._start_compression(request)
if self._chunked:
if request.version != HttpVersion11:
raise RuntimeError("Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(
request.version))
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
return resp_impl
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
'data argument must be byte-ish (%r)' % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
def __repr__(self):
if self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None):
super().__init__(status=status, reason=reason, headers=headers)
if body is not None and text is not None:
raise ValueError("body and text are not allowed together.")
if text is not None:
if hdrs.CONTENT_TYPE not in self.headers:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError('text argument must be str (%r)' %
type(text))
if content_type is None:
content_type = 'text/plain'
self.headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=utf-8')
self._content_type = content_type
self._content_dict = {'charset': 'utf-8'}
self.body = text.encode('utf-8')
else:
self.text = text
else:
if content_type:
self.content_type = content_type
if body is not None:
self.body = body
else:
self.body = None
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError('body argument must be bytes (%r)' % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError('text argument must be str (%r)' % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
self.write(body)
yield from super().write_eof()
| {
"content_hash": "f587949631e8ded7c9b09db6183ec177",
"timestamp": "",
"source": "github",
"line_count": 778,
"max_line_length": 91,
"avg_line_length": 31.318766066838045,
"alnum_prop": 0.5570056636296479,
"repo_name": "vedun/aiohttp",
"id": "120a312e02a002b4d0ddf606824824e4b0130e37",
"size": "24366",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "aiohttp/web_reqrep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1404"
},
{
"name": "PowerShell",
"bytes": "3361"
},
{
"name": "Python",
"bytes": "903126"
}
],
"symlink_target": ""
} |
"""Produce MQM scores from MQM ratings tsv file."""
import collections
import csv
import json
from absl import app
from absl import flags
import glob
flags.DEFINE_string('input', '/dev/stdin', 'Input MQM ratings tsv file.')
flags.DEFINE_string('output', '/dev/stdout', 'Output MQM score file.')
flags.DEFINE_string(
'weights', 'Major:5 Minor:1 Neutral:0 '
'Major/Non-translation!:25 Minor/Fluency/Punctuation:0.1',
'List of weight specs, in format: "severity[/category[/subcategory]]:wt". '
'The most specific match is applied to each error.')
flags.DEFINE_bool('unbabel', False, 'Input tsv is in Unbabel format.')
flags.DEFINE_bool(
'recompute_unbabel', False,
'Apply Google-style weights to Unbabel ratings rather than reading scores '
'directly from mqm field in last column of tsv.')
flags.DEFINE_bool(
'force_contiguous', True,
'Raise an error if annotated segments within a doc aren\'t contiguous')
FLAGS = flags.FLAGS
def Score(weights, items):
while items:
if '/'.join(items) in weights:
return weights['/'.join(items)]
items = items[:-1]
return 0
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
weights = {}
for e in FLAGS.weights.split():
c, w = e.split(':')
weights[c] = float(w)
scores = {} # sys -> doc > doc_id -> rater -> [score]
quoting = csv.QUOTE_MINIMAL if FLAGS.unbabel else csv.QUOTE_NONE
with open(FLAGS.input) as f:
for row in csv.DictReader(f, delimiter='\t', quoting=quoting):
system, doc, doc_id = row['system'], row['doc'], int(row['doc_id'])
if FLAGS.unbabel and not FLAGS.recompute_unbabel:
score = json.loads(row['misc'])['mqm']
else:
score = Score(weights, [row['severity']] + row['category'].split('/'))
if system not in scores:
scores[system] = {}
if doc not in scores[system]:
scores[system][doc] = {}
if doc_id not in scores[system][doc]:
scores[system][doc][doc_id] = collections.defaultdict(list)
scores[system][doc][doc_id][row['rater']].append(score)
if FLAGS.force_contiguous:
for system in scores:
for doc in scores[system]:
ids = sorted(scores[system][doc])
if ids != list(range(min(ids), max(ids) + 1)):
raise ValueError(f'Non-contiguous segments for {system}/{doc}')
with open(FLAGS.output, 'w') as f:
for system in scores:
for doc in scores[system]:
for doc_id in sorted(scores[system][doc]):
rater_scores = {}
for rater, vals in scores[system][doc][doc_id].items():
if FLAGS.unbabel and not FLAGS.recompute_unbabel:
rater_scores[rater] = sum(vals) / len(vals)
else:
rater_scores[rater] = sum(vals)
global_score = sum(rater_scores.values()) / len(rater_scores)
if not FLAGS.unbabel or FLAGS.recompute_unbabel:
global_score *= -1
f.write(f'{system}\t{doc}\t{doc_id}\t{global_score}')
for rater in sorted(rater_scores):
f.write(f'\t{rater}={rater_scores[rater]}')
f.write('\n')
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "c0f9822dec68ba807ed4e9141ffcb2cd",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 35.82022471910113,
"alnum_prop": 0.6264115432873275,
"repo_name": "google-research/mt-metrics-eval",
"id": "d442bcb417dd8ac3c1031218ec25d681a0228d9e",
"size": "3777",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mt_metrics_eval/score_mqm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42072"
},
{
"name": "Python",
"bytes": "96436"
}
],
"symlink_target": ""
} |
import shutil
import tempfile
import pytest
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in tox.ini.
"""
import jedi
settings = jedi.settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)
| {
"content_hash": "4b8543bda617db7049455db2e7451f1e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.6898550724637681,
"repo_name": "danrharms/harmsway",
"id": "9e631922196ffc8308db7bf7e04246c2ea9aa14f",
"size": "690",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".emacs.d/elisp/emacs-jedi/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9021"
},
{
"name": "C",
"bytes": "4316"
},
{
"name": "C++",
"bytes": "3040"
},
{
"name": "CMake",
"bytes": "764"
},
{
"name": "Emacs Lisp",
"bytes": "14969539"
},
{
"name": "GDB",
"bytes": "18679"
},
{
"name": "HTML",
"bytes": "368"
},
{
"name": "M4",
"bytes": "2304"
},
{
"name": "Makefile",
"bytes": "43387"
},
{
"name": "NSIS",
"bytes": "8158"
},
{
"name": "Perl",
"bytes": "61714"
},
{
"name": "Python",
"bytes": "33538"
},
{
"name": "Roff",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "139102"
},
{
"name": "TeX",
"bytes": "316"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinxcontrib.programoutput',
]
autoclass_content = 'both'
nitpick_ignore = [
('py:exc', 'NoResultFound'),
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'P-Blog'
copyright = '2017, Nicolas Appriou'
author = 'Nicolas Appriou'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'P-Blogdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'P-Blog.tex', 'P-Blog Documentation',
'Nicolas Appriou', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'p-blog', 'P-Blog Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'P-Blog', 'P-Blog Documentation',
author, 'P-Blog', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "7f6f5b44a34c17e855284306d58bbab5",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 29.231292517006803,
"alnum_prop": 0.6572026995578311,
"repo_name": "Nicals/pblog",
"id": "aee850eac0113dd0eae37679ac8362a01945dc23",
"size": "4979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6237"
},
{
"name": "HTML",
"bytes": "2544"
},
{
"name": "Python",
"bytes": "76184"
}
],
"symlink_target": ""
} |
from groupModule.models import Group
from django import forms
from django.contrib.admin import widgets
from django.db import connection
class GroupCreateForm(forms.ModelForm):
class Meta:
model = Group
fields = ['groupName' , 'groupType' , 'description']
#class ViewAllGroups():
class GroupSettings(forms.Form):
#TODO
#change group type
#kick user
#DONE
def __init__(self, *args, **kwargs):
groupId = kwargs.pop("group")
super(GroupSettings, self).__init__(*args, **kwargs)
cursor = connection.cursor()
GROUP_MEMBERS = (('-', '-------'), )
memberssql = "SELECT j.userId_id, u.name FROM groupModule_joins j,userModule_user u WHERE j.groupId_id=%s and j.status='A' and j.userId_id=u.userId"
cursor.execute(memberssql , [groupId ,])
GROUP_MEMBERS += cursor.fetchall()
self.fields['removeMembers'] = forms.ChoiceField(choices=GROUP_MEMBERS) #change to checkbox and handle the case of admin removing himself
PENDING_REQUEST = (('-', '-------'), )
pendingsql = "SELECT userId_id, name FROM groupModule_joins JOIN userModule_user ON userId_id=userId WHERE groupModule_joins.status='P' and groupId_id=%s"
cursor.execute(pendingsql , [groupId ,])
PENDING_REQUEST += cursor.fetchall()
self.fields['pendingRequests'] = forms.ChoiceField(choices = PENDING_REQUEST)
GROUPTYPE = (('O', 'Open'), ('C', 'Closed'),)
groupTypeSql = "SELECT groupType,CASE groupType WHEN 'O' THEN 'Open' ELSE 'Closed' END FROM groupModule_group WHERE groupId=%s"
cursor.execute(groupTypeSql , [groupId ,])
GROUP_TYPE = cursor.fetchone()[0].encode('ascii')
self.fields['groupType'] = forms.ChoiceField(choices=GROUPTYPE,initial=GROUP_TYPE)
groupDescriptionsql = "SELECT description FROM groupModule_group WHERE groupId=%s"
cursor.execute(groupDescriptionsql , [groupId ,])
DESCRIPTION = cursor.fetchall()
self.fields['description'] = forms.CharField(widget = forms.Textarea,initial = DESCRIPTION[0][0])
class Groups(forms.Form):
def __init__(self, *args, **kwargs):
userId = kwargs.pop("user")
super(Groups, self).__init__(*args, **kwargs)
cursor = connection.cursor()
groupssql = "SELECT groupId_id,groupName FROM groupModule_joins NATURAL JOIN groupModule_group WHERE groupId=groupId_id and userId_id=%s and status='A'"
cursor.execute(groupssql , [userId,])
GROUPS = cursor.fetchall()
self.fields['Groups'] = forms.ChoiceField(choices = GROUPS)
| {
"content_hash": "9a9d2a734640fb691290cdea98cded2f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 162,
"avg_line_length": 44.766666666666666,
"alnum_prop": 0.6392405063291139,
"repo_name": "dipkakwani/wee_app",
"id": "020c628eb5358aacd4705ce4d4c85d17cc241c93",
"size": "2736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/groupModule/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12188"
},
{
"name": "Python",
"bytes": "60892"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import *
from edb import errors
from edb.common import verutils
from edb import edgeql
from edb.edgeql import ast as qlast
from edb.edgeql import compiler as qlcompiler
from edb.edgeql import qltypes as ft
from edb.edgeql import parser as qlparser
from edb.edgeql import utils as qlutils
from edb.edgeql import qltypes
from . import abc as s_abc
from . import annos as s_anno
from . import delta as sd
from . import expr as s_expr
from . import functions as s_func
from . import inheriting
from . import name as sn
from . import objects as so
from . import types as s_types
from . import pseudo as s_pseudo
from . import referencing
from . import utils
if TYPE_CHECKING:
from edb.common import parsing as c_parsing
from edb.schema import schema as s_schema
T = TypeVar('T')
def _assert_not_none(value: Optional[T]) -> T:
if value is None:
raise TypeError("A value is expected")
return value
def merge_constraint_params(
constraint: Constraint,
supers: List[Constraint],
field_name: str,
*,
ignore_local: bool,
schema: s_schema.Schema,
) -> Any:
if constraint.get_subject(schema) is None:
# consistency of abstract constraint params is checked
# in CreateConstraint.validate_create
return constraint.get_explicit_field_value(schema, field_name, None)
else:
# concrete constraints cannot redefined parameters and always
# inherit from super.
return supers[0].get_explicit_field_value(schema, field_name, None)
def constraintname_from_fullname(name: sn.Name) -> sn.QualName:
assert isinstance(name, sn.QualName)
# the dict key for constraints drops the first qual, which makes
# it independent of where it is declared
short = sn.shortname_from_fullname(name)
quals = sn.quals_from_fullname(name)
return sn.QualName(
name=sn.get_specialized_name(short, *quals[1:]),
module='__',
)
def _constraint_object_key(schema: s_schema.Schema, o: so.Object) -> sn.Name:
return constraintname_from_fullname(o.get_name(schema))
class ObjectIndexByConstraintName(
so.ObjectIndexBase[sn.Name, so.Object_T],
key=_constraint_object_key,
):
@classmethod
def get_key_for_name(
cls,
schema: s_schema.Schema,
name: sn.Name,
) -> sn.Name:
return constraintname_from_fullname(name)
class Constraint(
referencing.ReferencedInheritingObject,
s_func.CallableObject, s_abc.Constraint,
qlkind=ft.SchemaObjectClass.CONSTRAINT,
data_safe=True,
):
params = so.SchemaField(
s_func.FuncParameterList,
coerce=True,
compcoef=0.4,
default=so.DEFAULT_CONSTRUCTOR,
inheritable=True,
merge_fn=merge_constraint_params,
)
expr = so.SchemaField(
s_expr.Expression, default=None, compcoef=0.909,
coerce=True)
subjectexpr = so.SchemaField(
s_expr.Expression,
default=None, compcoef=0.833, coerce=True,
ddl_identity=True)
finalexpr = so.SchemaField(
s_expr.Expression,
default=None, compcoef=0.909, coerce=True)
except_expr = so.SchemaField(
s_expr.Expression,
default=None,
coerce=True,
compcoef=0.909,
ddl_identity=True,
)
subject = so.SchemaField(
so.Object, default=None, inheritable=False)
args = so.SchemaField(
s_expr.ExpressionList,
default=None, coerce=True, inheritable=False,
compcoef=0.875, ddl_identity=True)
delegated = so.SchemaField(
bool,
default=False,
inheritable=False,
special_ddl_syntax=True,
compcoef=0.9,
)
errmessage = so.SchemaField(
str, default=None, compcoef=0.971, allow_ddl_set=True)
is_aggregate = so.SchemaField(
bool, default=False, compcoef=0.971, allow_ddl_set=False)
def get_name_impacting_ancestors(
self, schema: s_schema.Schema,
) -> List[Constraint]:
if self.generic(schema):
return []
else:
return [self.get_nearest_generic_parent(schema)]
def get_constraint_origins(
self, schema: s_schema.Schema) -> List[Constraint]:
origins: List[Constraint] = []
for base in self.get_bases(schema).objects(schema):
if not base.generic(schema) and not base.get_delegated(schema):
origins.extend(
x for x in base.get_constraint_origins(schema)
if x not in origins
)
return [self] if not origins else origins
def is_independent(self, schema: s_schema.Schema) -> bool:
return (
not self.descendants(schema)
and self.get_constraint_origins(schema) == [self]
)
def get_verbosename(
self,
schema: s_schema.Schema,
*,
with_parent: bool=False
) -> str:
vn = super().get_verbosename(schema)
if self.generic(schema):
return f'abstract {vn}'
else:
if with_parent:
subject = self.get_subject(schema)
assert subject is not None
pvn = subject.get_verbosename(
schema, with_parent=True)
return f'{vn} of {pvn}'
else:
return vn
def generic(self, schema: s_schema.Schema) -> bool:
return self.get_subject(schema) is None
def get_subject(self, schema: s_schema.Schema) -> ConsistencySubject:
return cast(
ConsistencySubject,
self.get_field_value(schema, 'subject'),
)
def format_error(
self,
schema: s_schema.Schema,
) -> str:
subject = self.get_subject(schema)
titleattr = subject.get_annotation(schema, sn.QualName('std', 'title'))
if not titleattr:
subjname = subject.get_shortname(schema)
subjtitle = subjname.name
else:
subjtitle = titleattr
return self.format_error_message(schema, subjtitle)
def format_error_message(
self,
schema: s_schema.Schema,
subjtitle: str,
) -> str:
errmsg = self.get_errmessage(schema)
args = self.get_args(schema)
if args:
args_ql: List[qlast.Base] = [
qlast.Path(steps=[qlast.ObjectRef(name=subjtitle)]),
]
args_ql.extend(arg.qlast for arg in args)
constr_base: Constraint = schema.get(
self.get_name(schema), type=type(self))
index_parameters = qlutils.index_parameters(
args_ql,
parameters=constr_base.get_params(schema),
schema=schema,
)
expr = constr_base.get_field_value(schema, 'expr')
expr_ql = qlparser.parse(expr.text)
qlutils.inline_parameters(expr_ql, index_parameters)
args_map = {name: edgeql.generate_source(val, pretty=False)
for name, val in index_parameters.items()}
else:
args_map = {'__subject__': subjtitle}
assert errmsg is not None
formatted = errmsg.format(**args_map)
return formatted
def as_alter_delta(
self,
other: Constraint,
*,
self_schema: s_schema.Schema,
other_schema: s_schema.Schema,
confidence: float,
context: so.ComparisonContext,
) -> sd.ObjectCommand[Constraint]:
return super().as_alter_delta(
other,
self_schema=self_schema,
other_schema=other_schema,
confidence=confidence,
context=context,
)
def as_delete_delta(
self,
*,
schema: s_schema.Schema,
context: so.ComparisonContext,
) -> sd.ObjectCommand[Constraint]:
return super().as_delete_delta(schema=schema, context=context)
def get_ddl_identity(
self,
schema: s_schema.Schema,
) -> Optional[Dict[str, str]]:
ddl_identity = super().get_ddl_identity(schema)
if (
ddl_identity is not None
and self.field_is_inherited(schema, 'subjectexpr')
):
ddl_identity.pop('subjectexpr', None)
return ddl_identity
@classmethod
def get_root_classes(cls) -> Tuple[sn.QualName, ...]:
return (
sn.QualName(module='std', name='constraint'),
)
@classmethod
def get_default_base_name(self) -> sn.QualName:
return sn.QualName('std', 'constraint')
class ConsistencySubject(
so.QualifiedObject,
so.InheritingObject,
s_anno.AnnotationSubject,
):
constraints_refs = so.RefDict(
attr='constraints',
ref_cls=Constraint)
constraints = so.SchemaField(
ObjectIndexByConstraintName[Constraint],
inheritable=False, ephemeral=True, coerce=True, compcoef=0.887,
default=so.DEFAULT_CONSTRUCTOR
)
def add_constraint(
self,
schema: s_schema.Schema,
constraint: Constraint,
replace: bool = False,
) -> s_schema.Schema:
return self.add_classref(
schema,
'constraints',
constraint,
replace=replace,
)
def can_accept_constraints(self, schema: s_schema.Schema) -> bool:
return True
class ConsistencySubjectCommandContext:
# context mixin
pass
class ConsistencySubjectCommand(
inheriting.InheritingObjectCommand[so.InheritingObjectT],
):
pass
class ConstraintCommandContext(sd.ObjectCommandContext[Constraint],
s_anno.AnnotationSubjectCommandContext):
pass
class ConstraintCommand(
referencing.ReferencedInheritingObjectCommand[Constraint],
s_func.CallableCommand[Constraint],
context_class=ConstraintCommandContext,
referrer_context_class=ConsistencySubjectCommandContext,
):
@classmethod
def _validate_subcommands(
cls,
astnode: qlast.DDLOperation,
) -> None:
# check that 'subject' and 'subjectexpr' are not set as annotations
for command in astnode.commands:
if isinstance(command, qlast.SetField):
cname = command.name
if cname in {'subject', 'subjectexpr'}:
raise errors.InvalidConstraintDefinitionError(
f'{cname} is not a valid constraint annotation',
context=command.context)
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: sn.Name,
referrer_name: sn.QualName,
context: sd.CommandContext,
) -> Tuple[str, ...]:
if isinstance(astnode, qlast.CreateConstraint):
return ()
exprs = []
args = cls._constraint_args_from_ast(schema, astnode, context)
for arg in args:
exprs.append(arg.text)
assert isinstance(astnode, qlast.ConcreteConstraintOp)
if astnode.subjectexpr:
# use the normalized text directly from the expression
expr = s_expr.Expression.from_ast(
astnode.subjectexpr, schema, context.modaliases)
exprs.append(expr.text)
if astnode.except_expr:
# use the normalized text directly from the expression
expr = s_expr.Expression.from_ast(
astnode.except_expr, schema, context.modaliases)
# but mangle it a bit, so that we can distinguish between
# on and except when only one is present
exprs.append('!' + expr.text)
return (cls._name_qual_from_exprs(schema, exprs),)
@classmethod
def _classname_quals_from_name(
cls,
name: sn.QualName
) -> Tuple[str, ...]:
quals = sn.quals_from_fullname(name)
return (quals[-1],)
@classmethod
def _constraint_args_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext,
) -> List[s_expr.Expression]:
args = []
assert isinstance(astnode, qlast.ConcreteConstraintOp)
if astnode.args:
for arg in astnode.args:
arg_expr = s_expr.Expression.from_ast(
arg, schema, context.modaliases)
args.append(arg_expr)
return args
@classmethod
def as_inherited_ref_ast(
cls,
schema: s_schema.Schema,
context: sd.CommandContext,
name: sn.Name,
parent: so.Object,
) -> qlast.ObjectDDL:
assert isinstance(parent, Constraint)
astnode_cls = cls.referenced_astnode # type: ignore
nref = cls.get_inherited_ref_name(schema, context, parent, name)
args = []
parent_args = parent.get_args(schema)
if parent_args:
parent_args = parent.get_args(schema)
assert parent_args is not None
for arg_expr in parent_args:
arg = edgeql.parse_fragment(arg_expr.text)
args.append(arg)
subj_expr = parent.get_subjectexpr(schema)
if (
subj_expr is None
# Don't include subjectexpr if it was inherited from an
# abstract constraint.
or parent.get_nearest_generic_parent(
schema).get_subjectexpr(schema) is not None
):
subj_expr_ql = None
else:
subj_expr_ql = edgeql.parse_fragment(subj_expr.text)
except_expr = parent.get_except_expr(schema)
if except_expr:
except_expr_ql = except_expr.qlast
else:
except_expr_ql = None
astnode = astnode_cls(
name=nref, args=args, subjectexpr=subj_expr_ql,
except_expr=except_expr_ql)
return cast(qlast.ObjectDDL, astnode)
def compile_expr_field(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
field: so.Field[Any],
value: s_expr.Expression,
track_schema_ref_exprs: bool=False,
) -> s_expr.Expression:
from . import pointers as s_pointers
base: Optional[so.Object] = None
if isinstance(self, AlterConstraint):
base = self.scls.get_subject(schema)
else:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx:
base = referrer_ctx.op.scls
if base is not None:
assert isinstance(base, (s_types.Type, s_pointers.Pointer))
# Concrete constraint
if field.name == 'expr':
# Concrete constraints cannot redefine the base check
# expressions, and so the only way we should get here
# is through field inheritance, so check that the
# value is compiled and move on.
if not value.is_compiled():
mcls = self.get_schema_metaclass()
dn = mcls.get_schema_class_displayname()
raise errors.InternalServerError(
f'uncompiled expression in the {field.name!r} field of'
f' {dn} {self.classname!r}'
)
return value
elif field.name in {'subjectexpr', 'finalexpr', 'except_expr'}:
return s_expr.Expression.compiled(
value,
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
anchors={qlast.Subject().name: base},
path_prefix_anchor=qlast.Subject().name,
singletons=frozenset([base]),
allow_generic_type_output=True,
schema_object_context=self.get_schema_metaclass(),
apply_query_rewrites=False,
track_schema_ref_exprs=track_schema_ref_exprs,
),
)
else:
return super().compile_expr_field(
schema, context, field, value)
elif field.name in ('expr', 'subjectexpr'):
# Abstract constraint.
params = self._get_params(schema, context)
param_anchors = s_func.get_params_symtable(
params,
schema,
inlined_defaults=False,
)
return s_expr.Expression.compiled(
value,
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
anchors=param_anchors,
func_params=params,
allow_generic_type_output=True,
schema_object_context=self.get_schema_metaclass(),
apply_query_rewrites=not context.stdmode,
track_schema_ref_exprs=track_schema_ref_exprs,
),
)
else:
return super().compile_expr_field(
schema, context, field, value, track_schema_ref_exprs)
def get_dummy_expr_field_value(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
field: so.Field[Any],
value: Any,
) -> Optional[s_expr.Expression]:
if field.name in {'expr', 'subjectexpr', 'finalexpr'}:
return s_expr.Expression(text='SELECT false')
else:
raise NotImplementedError(f'unhandled field {field.name!r}')
@classmethod
def get_inherited_ref_name(
cls,
schema: s_schema.Schema,
context: sd.CommandContext,
parent: so.Object,
name: sn.Name,
) -> qlast.ObjectRef:
bn = sn.shortname_from_fullname(name)
return utils.name_to_ast_ref(bn)
def get_ref_implicit_base_delta(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refcls: Constraint,
implicit_bases: List[Constraint],
) -> inheriting.BaseDelta_T[Constraint]:
child_bases = refcls.get_bases(schema).objects(schema)
return inheriting.delta_bases(
[b.get_name(schema) for b in child_bases],
[b.get_name(schema) for b in implicit_bases],
t=Constraint,
)
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if field in ('subjectexpr', 'args', 'except_expr'):
return field
elif (
field == 'delegated'
and astnode is qlast.CreateConcreteConstraint
):
return field
else:
return super().get_ast_attr_for_field(field, astnode)
def get_ddl_identity_fields(
self,
context: sd.CommandContext,
) -> Tuple[so.Field[Any], ...]:
id_fields = super().get_ddl_identity_fields(context)
omit_fields = set()
if not self.has_ddl_identity('subjectexpr'):
omit_fields.add('subjectexpr')
if self.get_referrer_context(context) is None:
omit_fields.add('args')
if omit_fields:
return tuple(f for f in id_fields if f.name not in omit_fields)
else:
return id_fields
@classmethod
def localnames_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> Set[str]:
localnames = super().localnames_from_ast(
schema, astnode, context
)
# Set up the constraint parameters as part of names to be
# ignored in expression normalization.
if isinstance(astnode, qlast.CreateConstraint):
localnames |= {param.name for param in astnode.params}
elif isinstance(astnode, qlast.AlterConstraint):
# ALTER ABSTRACT CONSTRAINT doesn't repeat the params,
# but we can get them from the schema.
objref = astnode.name
# Merge the context modaliases and the command modaliases.
modaliases = dict(context.modaliases)
modaliases.update(
cls._modaliases_from_ast(schema, astnode, context))
# Get the original constraint.
constr = schema.get(
utils.ast_ref_to_name(objref),
module_aliases=modaliases,
type=Constraint,
)
localnames |= {param.get_parameter_name(schema) for param in
constr.get_params(schema).objects(schema)}
return localnames
def _populate_concrete_constraint_attrs(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
subject_obj: Optional[so.Object],
*,
name: sn.QualName,
subjectexpr: Optional[s_expr.Expression] = None,
subjectexpr_inherited: bool = False,
sourcectx: Optional[c_parsing.ParserContext] = None,
args: Any = None,
**kwargs: Any
) -> None:
from edb.ir import ast as ir_ast
from edb.ir import utils as ir_utils
from . import pointers as s_pointers
from . import links as s_links
from . import objtypes as s_objtypes
from . import scalars as s_scalars
bases = self.get_resolved_attribute_value(
'bases', schema=schema, context=context,
)
if not bases:
bases = self.scls.get_bases(schema)
constr_base = bases.objects(schema)[0]
# If we have a concrete base, then we should inherit all of
# these attrs through the normal inherit_fields() mechanisms,
# and populating them ourselves will just mess up
# inherited_fields.
if not constr_base.generic(schema):
return
orig_subjectexpr = subjectexpr
orig_subject = subject_obj
base_subjectexpr = constr_base.get_field_value(schema, 'subjectexpr')
if subjectexpr is None:
subjectexpr = base_subjectexpr
elif (base_subjectexpr is not None
and subjectexpr.text != base_subjectexpr.text):
raise errors.InvalidConstraintDefinitionError(
f'subjectexpr is already defined for {name}',
context=sourcectx,
)
if (isinstance(subject_obj, s_scalars.ScalarType)
and constr_base.get_is_aggregate(schema)):
raise errors.InvalidConstraintDefinitionError(
f'{constr_base.get_verbosename(schema)} may not '
f'be used on scalar types',
context=sourcectx,
)
if (
subjectexpr is None
and isinstance(subject_obj, s_objtypes.ObjectType)
):
raise errors.InvalidConstraintDefinitionError(
"constraints on object types must have an 'on' clause",
context=sourcectx,
)
if subjectexpr is not None:
subject_ql = subjectexpr.qlast
subject = subject_ql
else:
subject = subject_obj
expr: s_expr.Expression = constr_base.get_field_value(schema, 'expr')
if not expr:
raise errors.InvalidConstraintDefinitionError(
f'missing constraint expression in {name}')
# Re-parse instead of using expr.qlast, because we mutate
# the AST below.
expr_ql = qlparser.parse(expr.text)
if not args:
args = constr_base.get_field_value(schema, 'args')
attrs = dict(kwargs)
inherited = dict()
if orig_subjectexpr is not None:
attrs['subjectexpr'] = orig_subjectexpr
inherited['subjectexpr'] = subjectexpr_inherited
else:
base_subjectexpr = constr_base.get_subjectexpr(schema)
if base_subjectexpr is not None:
attrs['subjectexpr'] = base_subjectexpr
inherited['subjectexpr'] = True
errmessage = attrs.get('errmessage')
if not errmessage:
errmessage = constr_base.get_errmessage(schema)
inherited['errmessage'] = True
attrs['errmessage'] = errmessage
if subject is not orig_subject:
# subject has been redefined
assert isinstance(subject, qlast.Base)
qlutils.inline_anchors(
expr_ql, anchors={qlast.Subject().name: subject})
subject = orig_subject
if args:
args_ql: List[qlast.Base] = [
qlast.Path(steps=[qlast.Subject()]),
]
args_ql.extend(arg.qlast for arg in args)
args_map = qlutils.index_parameters(
args_ql,
parameters=constr_base.get_params(schema),
schema=schema,
)
qlutils.inline_parameters(expr_ql, args_map)
attrs['args'] = args
if subject_obj:
assert isinstance(subject_obj, (s_types.Type, s_pointers.Pointer))
singletons = frozenset({subject_obj})
else:
singletons = frozenset()
assert subject is not None
final_expr = s_expr.Expression.compiled(
s_expr.Expression.from_ast(expr_ql, schema, {}),
schema=schema,
options=qlcompiler.CompilerOptions(
anchors={qlast.Subject().name: subject},
path_prefix_anchor=qlast.Subject().name,
singletons=singletons,
apply_query_rewrites=False,
schema_object_context=self.get_schema_metaclass(),
),
)
assert isinstance(final_expr.irast, ir_ast.Statement)
bool_t = schema.get('std::bool', type=s_scalars.ScalarType)
expr_type = final_expr.irast.stype
expr_schema = final_expr.irast.schema
if not expr_type.issubclass(expr_schema, bool_t):
raise errors.InvalidConstraintDefinitionError(
f'{name} constraint expression expected '
f'to return a bool value, got '
f'{expr_type.get_verbosename(expr_schema)}',
context=sourcectx
)
except_expr = attrs.get('except_expr')
if except_expr:
if isinstance(subject, s_pointers.Pointer):
raise errors.InvalidConstraintDefinitionError(
"only object constraints may use EXCEPT",
context=sourcectx
)
if subjectexpr is not None:
options = qlcompiler.CompilerOptions(
anchors={qlast.Subject().name: subject},
path_prefix_anchor=qlast.Subject().name,
singletons=singletons,
apply_query_rewrites=False,
schema_object_context=self.get_schema_metaclass(),
)
final_subjectexpr = s_expr.Expression.compiled(
subjectexpr, schema=schema, options=options
)
assert isinstance(final_subjectexpr.irast, ir_ast.Statement)
refs = ir_utils.get_longest_paths(final_expr.irast)
final_except_expr = None
if except_expr:
final_except_expr = s_expr.Expression.compiled(
except_expr, schema=schema, options=options
)
assert isinstance(final_except_expr.irast, ir_ast.Statement)
refs |= ir_utils.get_longest_paths(final_except_expr.irast)
has_multi = False
for ref in refs:
assert subject_obj
while ref.rptr:
rptr = ref.rptr
if rptr.dir_cardinality.is_multi():
has_multi = True
# We don't need to look further than the subject,
# which is always valid. (And which is a singleton
# in a constraint expression if it is itself a
# singleton, regardless of other parts of the path.)
if (
isinstance(rptr.ptrref, ir_ast.PointerRef)
and rptr.ptrref.id == subject_obj.id
):
break
if (not isinstance(rptr.ptrref,
ir_ast.TupleIndirectionPointerRef)
and rptr.ptrref.source_ptr is None
and rptr.source.rptr is not None):
if isinstance(subject, s_links.Link):
raise errors.InvalidConstraintDefinitionError(
"link constraints may not access "
"the link target",
context=sourcectx
)
else:
raise errors.InvalidConstraintDefinitionError(
"constraints cannot contain paths with more "
"than one hop",
context=sourcectx
)
ref = rptr.source
if has_multi and len(refs) > 1:
raise errors.InvalidConstraintDefinitionError(
"cannot reference multiple links or properties in a "
"constraint where at least one link or property is MULTI",
context=sourcectx
)
if has_multi and ir_utils.contains_set_of_op(
final_subjectexpr.irast):
raise errors.InvalidConstraintDefinitionError(
"cannot use aggregate functions or operators "
"in a non-aggregating constraint",
context=sourcectx
)
if (
final_subjectexpr.irast.volatility
!= qltypes.Volatility.Immutable
):
raise errors.InvalidConstraintDefinitionError(
f'constraint expressions must be immutable',
context=final_subjectexpr.irast.context,
)
if final_except_expr:
assert isinstance(final_except_expr.irast, ir_ast.Statement)
if (
final_except_expr.irast.volatility
!= qltypes.Volatility.Immutable
):
raise errors.InvalidConstraintDefinitionError(
f'constraint expressions must be immutable',
context=final_except_expr.irast.context,
)
if final_expr.irast.volatility != qltypes.Volatility.Immutable:
raise errors.InvalidConstraintDefinitionError(
f'constraint expressions must be immutable',
context=sourcectx,
)
attrs['finalexpr'] = final_expr
attrs['params'] = constr_base.get_params(schema)
inherited['params'] = True
attrs['abstract'] = False
for k, v in attrs.items():
self.set_attribute_value(k, v, inherited=bool(inherited.get(k)))
class CreateConstraint(
ConstraintCommand,
s_func.CreateCallableObject[Constraint],
referencing.CreateReferencedInheritingObject[Constraint],
):
astnode = [qlast.CreateConcreteConstraint, qlast.CreateConstraint]
referenced_astnode = qlast.CreateConcreteConstraint
@classmethod
def _get_param_desc_from_ast(
cls,
schema: s_schema.Schema,
modaliases: Mapping[Optional[str], str],
astnode: qlast.ObjectDDL,
*,
param_offset: int=0
) -> List[s_func.ParameterDesc]:
if not isinstance(astnode, qlast.CallableObjectCommand):
# Concrete constraint.
return []
params = super()._get_param_desc_from_ast(
schema, modaliases, astnode, param_offset=param_offset + 1)
params.insert(0, s_func.ParameterDesc(
num=param_offset,
name=sn.UnqualName('__subject__'),
default=None,
type=s_pseudo.PseudoTypeShell(name=sn.UnqualName('anytype')),
typemod=ft.TypeModifier.SingletonType,
kind=ft.ParameterKind.PositionalParam,
))
return params
def validate_create(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
super().validate_create(schema, context)
if self.get_referrer_context(context) is not None:
# The checks below apply only to abstract constraints.
return
base_params: Optional[s_func.FuncParameterList] = None
base_with_params: Optional[Constraint] = None
bases = self.get_resolved_attribute_value(
'bases',
schema=schema,
context=context,
)
for base in bases.objects(schema):
params = base.get_params(schema)
if params and len(params) > 1:
# All constraints have __subject__ parameter
# auto-injected, hence the "> 1" check.
if base_params is not None:
raise errors.InvalidConstraintDefinitionError(
f'{self.get_verbosename()} '
f'extends multiple constraints '
f'with parameters',
context=self.source_context,
)
base_params = params
base_with_params = base
if base_params:
assert base_with_params is not None
params = self._get_params(schema, context)
if not params or len(params) == 1:
# All constraints have __subject__ parameter
# auto-injected, hence the "== 1" check.
raise errors.InvalidConstraintDefinitionError(
f'{self.get_verbosename()} '
f'must define parameters to reflect parameters of '
f'the {base_with_params.get_verbosename(schema)} '
f'it extends',
context=self.source_context,
)
if len(params) < len(base_params):
raise errors.InvalidConstraintDefinitionError(
f'{self.get_verbosename()} '
f'has fewer parameters than the '
f'{base_with_params.get_verbosename(schema)} '
f'it extends',
context=self.source_context,
)
# Skipping the __subject__ param
for base_param, param in zip(base_params.objects(schema)[1:],
params.objects(schema)[1:]):
param_name = param.get_parameter_name(schema)
base_param_name = base_param.get_parameter_name(schema)
if param_name != base_param_name:
raise errors.InvalidConstraintDefinitionError(
f'the {param_name!r} parameter of the '
f'{self.get_verbosename()} '
f'must be renamed to {base_param_name!r} '
f'to match the signature of the base '
f'{base_with_params.get_verbosename(schema)} ',
context=self.source_context,
)
param_type = param.get_type(schema)
base_param_type = base_param.get_type(schema)
if (
not base_param_type.is_polymorphic(schema)
and param_type.is_polymorphic(schema)
):
raise errors.InvalidConstraintDefinitionError(
f'the {param_name!r} parameter of the '
f'{self.get_verbosename()} cannot '
f'be of generic type because the corresponding '
f'parameter of the '
f'{base_with_params.get_verbosename(schema)} '
f'it extends has a concrete type',
context=self.source_context,
)
if (
not base_param_type.is_polymorphic(schema) and
not param_type.is_polymorphic(schema) and
not param_type.implicitly_castable_to(
base_param_type, schema)
):
raise errors.InvalidConstraintDefinitionError(
f'the {param_name!r} parameter of the '
f'{self.get_verbosename()} has type of '
f'{param_type.get_displayname(schema)} that '
f'is not implicitly castable to the '
f'corresponding parameter of the '
f'{base_with_params.get_verbosename(schema)} with '
f'type {base_param_type.get_displayname(schema)}',
context=self.source_context,
)
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
schema = super()._create_begin(schema, context)
return schema
subject = referrer_ctx.scls
assert isinstance(subject, ConsistencySubject)
if not subject.can_accept_constraints(schema):
raise errors.UnsupportedFeatureError(
f'constraints cannot be defined on '
f'{subject.get_verbosename(schema)}',
context=self.source_context,
)
if not context.canonical:
props = self.get_attributes(schema, context)
props.pop('name')
props.pop('subject', None)
fullname = self.classname
shortname = sn.shortname_from_fullname(fullname)
assert isinstance(shortname, sn.QualName), \
"expected qualified name"
self._populate_concrete_constraint_attrs(
schema,
context,
subject_obj=subject,
name=shortname,
subjectexpr_inherited=self.is_attribute_inherited(
'subjectexpr'),
sourcectx=self.source_context,
**props,
)
self.set_attribute_value('subject', subject)
return super()._create_begin(schema, context)
@classmethod
def as_inherited_ref_cmd(
cls,
*,
schema: s_schema.Schema,
context: sd.CommandContext,
astnode: qlast.ObjectDDL,
bases: List[Constraint],
referrer: so.Object,
) -> sd.ObjectCommand[Constraint]:
cmd = super().as_inherited_ref_cmd(
schema=schema,
context=context,
astnode=astnode,
bases=bases,
referrer=referrer,
)
args = cls._constraint_args_from_ast(schema, astnode, context)
if args:
cmd.set_attribute_value('args', args)
subj_expr = bases[0].get_subjectexpr(schema)
if subj_expr is not None:
cmd.set_attribute_value('subjectexpr', subj_expr, inherited=True)
params = bases[0].get_params(schema)
if params is not None:
cmd.set_attribute_value('params', params, inherited=True)
return cmd
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> CreateConstraint:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, qlast.CreateConcreteConstraint):
if astnode.delegated:
cmd.set_attribute_value('delegated', astnode.delegated)
args = cls._constraint_args_from_ast(schema, astnode, context)
if args:
cmd.set_attribute_value('args', args)
elif isinstance(astnode, qlast.CreateConstraint):
params = cls._get_param_desc_from_ast(
schema, context.modaliases, astnode)
for param in params:
if param.get_kind(schema) is ft.ParameterKind.NamedOnlyParam:
raise errors.InvalidConstraintDefinitionError(
'named only parameters are not allowed '
'in this context',
context=astnode.context)
if param.get_default(schema) is not None:
raise errors.InvalidConstraintDefinitionError(
'constraints do not support parameters '
'with defaults',
context=astnode.context)
if cmd.get_attribute_value('return_type') is None:
cmd.set_attribute_value(
'return_type',
schema.get('std::bool'),
)
if cmd.get_attribute_value('return_typemod') is None:
cmd.set_attribute_value(
'return_typemod',
ft.TypeModifier.SingletonType,
)
assert isinstance(astnode, (qlast.CreateConstraint,
qlast.CreateConcreteConstraint))
# 'subjectexpr' can be present in either astnode type
if astnode.subjectexpr:
orig_text = cls.get_orig_expr_text(schema, astnode, 'subjectexpr')
if (
orig_text is not None
and context.compat_ver_is_before(
(1, 0, verutils.VersionStage.ALPHA, 6)
)
):
# Versions prior to a6 used a different expression
# normalization strategy, so we must renormalize the
# expression.
expr_ql = qlcompiler.renormalize_compat(
astnode.subjectexpr,
orig_text,
schema=schema,
localnames=context.localnames,
)
else:
expr_ql = astnode.subjectexpr
subjectexpr = s_expr.Expression.from_ast(
expr_ql,
schema,
context.modaliases,
context.localnames,
)
cmd.set_attribute_value(
'subjectexpr',
subjectexpr,
)
if (
isinstance(astnode, qlast.CreateConcreteConstraint)
and astnode.except_expr
):
except_expr = s_expr.Expression.from_ast(
astnode.except_expr,
schema,
context.modaliases,
context.localnames,
)
cmd.set_attribute_value('except_expr', except_expr)
cls._validate_subcommands(astnode)
assert isinstance(cmd, CreateConstraint)
return cmd
def _skip_param(self, props: Dict[str, Any]) -> bool:
pname = s_func.Parameter.paramname_from_fullname(props['name'])
return pname == '__subject__'
def _get_params_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
) -> List[Tuple[int, qlast.FuncParam]]:
if isinstance(node, qlast.CreateConstraint):
return super()._get_params_ast(schema, context, node)
else:
return []
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if (
op.property == 'args'
and isinstance(node, (qlast.CreateConcreteConstraint,
qlast.AlterConcreteConstraint))
):
assert isinstance(op.new_value, s_expr.ExpressionList)
args = []
for arg in op.new_value:
exprast = arg.qlast
assert isinstance(exprast, qlast.Expr), "expected qlast.Expr"
args.append(exprast)
node.args = args
return
super()._apply_field_ast(schema, context, node, op)
@classmethod
def _classbases_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.ObjectDDL,
context: sd.CommandContext,
) -> List[so.ObjectShell[Constraint]]:
if isinstance(astnode, qlast.CreateConcreteConstraint):
classname = cls._classname_from_ast(schema, astnode, context)
base_name = sn.shortname_from_fullname(classname)
assert isinstance(base_name, sn.QualName), \
"expected qualified name"
base = utils.ast_objref_to_object_shell(
qlast.ObjectRef(
module=base_name.module,
name=base_name.name,
),
metaclass=Constraint,
schema=schema,
modaliases=context.modaliases,
)
return [base]
else:
return super()._classbases_from_ast(schema, astnode, context)
class RenameConstraint(
ConstraintCommand,
s_func.RenameCallableObject[Constraint],
referencing.RenameReferencedInheritingObject[Constraint],
):
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: sn.Name,
referrer_name: sn.QualName,
context: sd.CommandContext,
) -> Tuple[str, ...]:
parent_op = cls.get_parent_op(context)
assert isinstance(parent_op.classname, sn.QualName)
return cls._classname_quals_from_name(parent_op.classname)
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._alter_begin(schema, context)
if not context.canonical and self.scls.get_abstract(schema):
self._propagate_ref_rename(schema, context, self.scls)
return schema
class AlterConstraintOwned(
referencing.AlterOwned[Constraint],
ConstraintCommand,
field='owned',
referrer_context_class=ConsistencySubjectCommandContext,
):
pass
class AlterConstraint(
ConstraintCommand,
referencing.AlterReferencedInheritingObject[Constraint],
):
astnode = [qlast.AlterConcreteConstraint, qlast.AlterConstraint]
referenced_astnode = qlast.AlterConcreteConstraint
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
schema = super()._alter_begin(schema, context)
return schema
subject = referrer_ctx.scls
assert isinstance(subject, ConsistencySubject)
if not context.canonical:
props = self.get_attributes(schema, context)
props.pop('name', None)
props.pop('subject', None)
props.pop('expr', None)
args = props.pop('args', None)
if not args:
args = self.scls.get_args(schema)
subjectexpr = props.pop('subjectexpr', None)
subjectexpr_inherited = self.is_attribute_inherited('subjectexpr')
if not subjectexpr:
subjectexpr_inherited = self.scls.field_is_inherited(
schema, 'subjectexpr')
subjectexpr = self.scls.get_subjectexpr(schema)
fullname = self.classname
shortname = sn.shortname_from_fullname(fullname)
assert isinstance(shortname, sn.QualName), \
"expected qualified name"
self._populate_concrete_constraint_attrs(
schema,
context,
subject_obj=subject,
name=shortname,
subjectexpr=subjectexpr,
subjectexpr_inherited=subjectexpr_inherited,
args=args,
sourcectx=self.source_context,
**props,
)
return super()._alter_begin(schema, context)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterConstraint:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterConstraint)
if isinstance(astnode, (qlast.CreateConcreteConstraint,
qlast.AlterConcreteConstraint)):
if getattr(astnode, 'delegated', False):
assert isinstance(astnode, qlast.CreateConcreteConstraint)
cmd.set_attribute_value('delegated', astnode.delegated)
new_name = None
for op in cmd.get_subcommands(type=RenameConstraint):
new_name = op.new_name
if new_name is not None:
cmd.set_attribute_value('name', new_name)
cls._validate_subcommands(astnode)
return cmd
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
if self.scls.get_abstract(schema):
return super()._get_ast(schema, context, parent_node=parent_node)
# We need to make sure to include subjectexpr and args
# in the AST, since they are really part of the name.
op = self.as_inherited_ref_ast(
schema, context, self.scls.get_name(schema),
self.scls,
)
self._apply_fields_ast(schema, context, op)
if (op is not None and hasattr(op, 'commands') and
not op.commands):
return None
return op
def validate_alter(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
super().validate_alter(schema, context)
self_delegated = self.get_attribute_value('delegated')
if not self_delegated:
return
concrete_bases = [
b for b in self.scls.get_bases(schema).objects(schema)
if not b.generic(schema) and not b.get_delegated(schema)
]
if concrete_bases:
tgt_repr = self.scls.get_verbosename(schema, with_parent=True)
bases_repr = ', '.join(
b.get_subject(schema).get_verbosename(schema, with_parent=True)
for b in concrete_bases
)
raise errors.InvalidConstraintDefinitionError(
f'cannot redefine {tgt_repr} as delegated:'
f' it is defined as non-delegated in {bases_repr}',
context=self.source_context,
)
def canonicalize_alter_from_external_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
if (
not self.get_attribute_value('abstract')
and (subjectexpr :=
self.get_attribute_value('subjectexpr')) is not None
):
# To compute the new name, we construct an AST of the
# constraint, since that is the infrastructure we have for
# computing the classname.
name = sn.shortname_from_fullname(self.classname)
assert isinstance(name, sn.QualName), "expected qualified name"
ast = qlast.CreateConcreteConstraint(
name=qlast.ObjectRef(name=name.name, module=name.module),
subjectexpr=subjectexpr.qlast,
args=[],
)
quals = sn.quals_from_fullname(self.classname)
new_name = self._classname_from_ast_and_referrer(
schema, sn.QualName.from_string(quals[0]), ast, context)
if new_name == self.classname:
return
rename = self.scls.init_delta_command(
schema, sd.RenameObject, new_name=new_name)
rename.set_attribute_value(
'name', value=new_name, orig_value=self.classname)
self.add(rename)
def _get_params(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_func.FuncParameterList:
return self.scls.get_params(schema)
class DeleteConstraint(
ConstraintCommand,
referencing.DeleteReferencedInheritingObject[Constraint],
s_func.DeleteCallableObject[Constraint],
):
astnode = [qlast.DropConcreteConstraint, qlast.DropConstraint]
referenced_astnode = qlast.DropConcreteConstraint
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if op.property == 'args':
assert isinstance(op.old_value, s_expr.ExpressionList)
assert isinstance(node, qlast.DropConcreteConstraint)
node.args = [arg.qlast for arg in op.old_value]
return
super()._apply_field_ast(schema, context, node, op)
class RebaseConstraint(
ConstraintCommand,
referencing.RebaseReferencedInheritingObject[Constraint],
):
def _get_bases_for_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
bases: Tuple[so.ObjectShell[Constraint], ...],
) -> Tuple[so.ObjectShell[Constraint], ...]:
return ()
| {
"content_hash": "a12378a9301c7d54a855f97b93c0e9d7",
"timestamp": "",
"source": "github",
"line_count": 1562,
"max_line_length": 79,
"avg_line_length": 34.58066581306018,
"alnum_prop": 0.5611959640840507,
"repo_name": "edgedb/edgedb",
"id": "5f7716acfc1b7920af5d5f1d1bc19970fdf61679",
"size": "54696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edb/schema/constraints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "372837"
},
{
"name": "JavaScript",
"bytes": "7481"
},
{
"name": "Makefile",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "9860929"
},
{
"name": "Rust",
"bytes": "238373"
}
],
"symlink_target": ""
} |
"""
Common declarations.
"""
from contextlib import contextmanager
import os
import socket
import subprocess
import tempfile
import time
import psutil
def free_port():
"""
Find a free TCP port.
"""
with socket.socket() as sock:
sock.bind(('', 0))
return sock.getsockname()[1]
def wait_for(func, expected_exceptions=(), retries=60):
"""
Wait for a function to return a truthy value, possibly ignoring some
exceptions if they are raised until the very last retry
Parameters:
func - the function to continually call until truthy
expected_exceptions - list of exceptions to ignore, unless the final
retry is reached (then any exceptions are reraised)
retries - number of times to retry before giving up
Return value:
The return value of func the last time it was run
"""
retries = int(retries)
for retry in range(1, retries + 1):
try:
return_value = func()
if return_value:
break
except expected_exceptions:
if retry == retries:
raise
else:
pass
time.sleep(1)
return return_value
def wait_for_parent():
"""
Use wait_for_pid to wait for your parent process
"""
wait_for_pid(os.getppid())
def wait_for_pid(pid):
"""
Wait for a given PID in the best way possible. If PID is a child, we use
os.waitpid. Otherwise, we fall back to a polling approach.
"""
try:
# Try to wait for a child
os.waitpid(pid, 0)
except OSError:
# Fallback to polling process status
try:
proc = psutil.Process(pid)
while proc.status() not in ('zombie', 'dead'):
time.sleep(1)
except psutil.NoSuchProcess:
pass
@contextmanager
def open_root_owned(source, *args, **kwargs):
"""
Copy a file as root, open it for writing, then copy it back as root again
when done
"""
with tempfile.NamedTemporaryFile(*args, **kwargs) as dest_fh:
if os.path.isfile(source):
subprocess.check_call(['sudo', 'cp', source, dest_fh.name])
yield dest_fh
subprocess.check_call(['sudo', 'cp', dest_fh.name, source])
def rm_tree_root_owned(path):
"""
Do an equivalent of shutil.rmtree, but as root
"""
subprocess.check_call(['sudo', 'rm', '-rf', path])
class ImproperlyConfigured(Exception):
"""
The host is not properly configured for running Docker.
"""
pass
try:
DEVNULL = subprocess.DEVNULL # pylint:disable=no-member
except AttributeError:
DEVNULL = open(os.devnull)
| {
"content_hash": "246e3d464f0e965c79d914e77052de9b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 77,
"avg_line_length": 23.84070796460177,
"alnum_prop": 0.607275426874536,
"repo_name": "infoxchange/docker-forklift",
"id": "b211418847bc196f24ffc5fd70ba360c7b8bf100",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forklift/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143084"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/crafted/shared_medpack_disease_area_stamina_a.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","medic_disease_area_stamina_a")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "f03c1577d9114df16c2144c8e80c8579",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.7175792507204611,
"repo_name": "obi-two/Rebelion",
"id": "acb0529cb8b6736ce275448343fbaa914c265586",
"size": "492",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_disease_area_stamina_a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
__author__="UShareSoft"
from ussclicore.argumentParser import ArgumentParser, ArgumentParserError
from ussclicore.cmd import Cmd, CoreGlobal
from texttable import Texttable
from uforgecli.utils.org_utils import org_get
from ussclicore.utils import generics_utils, printer
from ussclicore.utils.generics_utils import order_list_object_by
from uforgecli.utils.uforgecli_utils import *
from uforgecli.utils.compare_utils import compare
from uforgecli.utils import *
from hurry.filesize import size
import shlex
# Try on 10.1.2.114
# Where are the appliances
class Images_Cmd(Cmd, CoreGlobal):
"""Administer generated images for a user"""
cmd_name="images"
def __init__(self):
super(Images_Cmd, self).__init__()
def arg_list(self):
doParser = ArgumentParser(add_help = True, description="List all the images created by a user")
mandatory = doParser.add_argument_group("Mandatory arguments")
optional = doParser.add_argument_group("Optional arguments")
mandatory.add_argument('--account', dest='account', type=str, required=True, help="The account on which you want to list the images")
optional.add_argument('--os', dest='os', nargs='+', required=False, help="Only display images that have been built from the operating system(s). You can use Unix matching system (*,?,[seq],[!seq]) and multiple match separating by space.")
optional.add_argument('--name', dest='name', nargs='+', required=False, help="Only display images that have the name matching this name. You can use Unix matching system (*,?,[seq],[!seq]) and multiple match separating by space.")
optional.add_argument('--format', dest='format', nargs='+', required=False, help="Only display images that have been generated by the following format(s). You can use Unix matching system (*,?,[seq],[!seq]) and multiple match separating by space.")
return doParser
def do_list(self, args):
try:
doParser = self.arg_list()
doArgs = doParser.parse_args(shlex.split(args))
id = doArgs.account
imagesList = self.api.Users(id).Images.Get()
imagesList = imagesList.images.image
appliancesList = self.api.Users(id).Appliances.Getall()
appliancesList = appliancesList.appliances.appliance
if imagesList is None or len(imagesList) == 0:
printer.out("There is no images for user [" + doArgs.account + "].")
return 0
if doArgs.name is not None:
imagesList = compare(imagesList, doArgs.name, "name")
if doArgs.format is not None:
imagesList = compare(imagesList, doArgs.format, "format", "name")
if doArgs.os is not None:
imagesList = compare(list=imagesList, values=doArgs.os, attrName='distributionName', subattrName=None, otherList=appliancesList, linkProperties=['applianceUri', 'uri'])
if len(imagesList) == 0:
printer.out("There is no images for user [" + doArgs.account + "] with these filters.")
return 0
imagesListSorted = order_list_object_by(imagesList, "name")
printer.out("List of images :", printer.OK)
table = Texttable(200)
table.set_cols_align(["c", "c", "c", "c", "c", "c", "c", "c", "c", "c"])
table.header(["ID", "Name", "Version", "Rev", "OS", "Format", "Created", "Size", "Compressed", "Status"])
count = 0
error = 0
for item in imagesListSorted:
count = count + 1
if not item.compress:
compressed = "No"
else:
compressed = "Yes"
if item.status.error:
status = "Error"
error = error + 1
else:
status = "Done"
timeCreated = item.created.strftime("%Y-%m-%d %H:%M:%S")
for item3 in imagesList:
for item2 in appliancesList:
if item3.applianceUri == item2.uri:
os = "" + item2.distributionName + " " + item2.archName
break
table.add_row([item.dbId, item.name, item.version, item.revision, os, item.format.name, timeCreated, size(item.size), compressed, status])
print table.draw() + "\n"
printer.out(str(count)+" images found.")
if error != 0:
printer.out(str(error)+" images with error status.")
return 0
except ArgumentParserError as e:
printer.out("In Arguments: "+str(e), printer.ERROR)
self.help_list()
except Exception as e:
return handle_uforge_exception(e)
def help_list(self):
doParser = self.arg_list()
doParser.print_help()
def arg_info(self):
doParser = ArgumentParser(add_help = True, description="Retrieve detailed information of a generated image")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', type=str, required=True, help="The unique identifier of the image to retrieve")
mandatory.add_argument('--account', dest='account', type=str, required=True, help="The account on which you want to get the image info")
return doParser
def do_info(self, args):
try:
doParser = self.arg_info()
doArgs = doParser.parse_args(shlex.split(args))
id = doArgs.account
imagesList = self.api.Users(id).Images.Get()
imagesList = imagesList.images.image
infoImage = None
for item in imagesList:
if str(item.dbId) == str(doArgs.id):
infoImage = item
if infoImage is None:
printer.out("The image with id \"" + doArgs.id + "\" doesn't exist.")
return 0
printer.out("Informations about [" + infoImage.name + "] :")
table = Texttable(200)
table.set_cols_align(["l", "l"])
table.add_row(["Name", infoImage.name])
table.add_row(["Format", infoImage.format.name])
table.add_row(["Id", infoImage.dbId])
table.add_row(["Version", infoImage.version])
table.add_row(["Revision", infoImage.revision])
table.add_row(["Uri", infoImage.uri])
appliancesList = self.api.Users(id).Appliances.Getall()
appliancesList = appliancesList.appliances.appliance
for item in appliancesList:
Exist = False
if infoImage.applianceUri == item.uri:
template = item
os = "" + item.distributionName + " " + item.archName
Exist = True
break
if not Exist:
os = "Unknown"
table.add_row(["OS", os])
table.add_row(["Template Id", template.dbId])
table.add_row(["Created", infoImage.created.strftime("%Y-%m-%d %H:%M:%S")])
table.add_row(["Size", size(infoImage.size)])
if not infoImage.compress:
compressed = "No"
else:
compressed = "Yes"
table.add_row(["Compressed", compressed])
table.add_row(["Description", template.description])
print table.draw() + "\n"
table = Texttable(200)
table.set_cols_align(["l", "l"])
table.header(["Status Details", ""])
if infoImage.status.error:
status = "Error"
else:
status = "Done"
table.add_row(["Status", status])
if infoImage.status.error:
table.add_row(["Error Message", infoImage.status.message])
table.add_row(["Detailed Error Message", infoImage.status.errorMessage])
else:
table.add_row(["Message", infoImage.status.message])
print table.draw() + "\n"
pimagesList = self.api.Users(id).Pimages.Get()
table = Texttable(200)
table.set_cols_align(["l", "l"])
table.header(["Published To Details", "Cloud Id"])
if len(pimagesList.publishImages.publishImage) == 0:
table.add_row(["# Published", "No published images"])
else:
for item2 in pimagesList.publishImages.publishImage:
if item2.imageUri == infoImage.uri:
table.add_row(["# Published","Format Id"+item2.format.dbId+"\nCloud Id : "+item2.cloudId])
print table.draw() + "\n"
return 0
except ArgumentParserError as e:
printer.out("In Arguments: "+str(e), printer.ERROR)
self.help_info()
except Exception as e:
return handle_uforge_exception(e)
def help_info(self):
doParser = self.arg_info()
doParser.print_help() | {
"content_hash": "bbdc6a5ef6aed03057946dc95fbe2766",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 264,
"avg_line_length": 52.94285714285714,
"alnum_prop": 0.46753013131858245,
"repo_name": "pedrolegold/uforge-cli",
"id": "815c9396e5ebd866d166fa9c0e4c3480fce9d585",
"size": "11118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/uforgecli/commands/images/images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Python",
"bytes": "382973"
}
],
"symlink_target": ""
} |
import angr
from angr.sim_type import SimTypeString, SimTypeInt
######################################
# puts
######################################
class puts(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, string):
self.argument_types = {0: self.ty_ptr(SimTypeString())}
self.return_type = SimTypeInt(32, True)
# TODO: use a write that's not a linux syscall
write = angr.SIM_PROCEDURES['linux_kernel']['write']
strlen = angr.SIM_PROCEDURES['libc']['strlen']
length = self.inline_call(strlen, string).ret_expr
self.inline_call(write, self.state.se.BVV(1, self.state.arch.bits), string, length)
self.state.posix.write(1, self.state.se.BVV(0x0a, 8), 1)
# TODO: return values
return self.state.se.Unconstrained('puts', self.state.arch.bits, key=('api', 'puts'))
| {
"content_hash": "4a18884fe507bddc7c50c3a220b5dba0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 93,
"avg_line_length": 36.375,
"alnum_prop": 0.5967926689576174,
"repo_name": "f-prettyland/angr",
"id": "e24c3ef7eb61a8a63a9bf055d586dbb0c9293acf",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/procedures/libc/puts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39375"
},
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "2934645"
}
],
"symlink_target": ""
} |
"""Tools used tool work with Avro files in the context of BigQuery.
Classes, constants and functions in this file are experimental and have no
backwards compatibility guarantees.
NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
"""
# BigQuery types as listed in
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
# with aliases (RECORD, BOOLEAN, FLOAT, INTEGER) as defined in
# https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/java/latest/com/google/api/services/bigquery/model/TableFieldSchema.html#setType-java.lang.String-
BIG_QUERY_TO_AVRO_TYPES = {
"STRUCT": "record",
"RECORD": "record",
"STRING": "string",
"BOOL": "boolean",
"BOOLEAN": "boolean",
"BYTES": "bytes",
"FLOAT64": "double",
"FLOAT": "double",
"INT64": "long",
"INTEGER": "long",
"TIME": {
"type": "long",
"logicalType": "time-micros",
},
"TIMESTAMP": {
"type": "long",
"logicalType": "timestamp-micros",
},
"DATE": {
"type": "int",
"logicalType": "date",
},
"DATETIME": "string",
"NUMERIC": {
"type": "bytes",
"logicalType": "decimal",
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
"precision": 38,
"scale": 9,
},
"GEOGRAPHY": "string",
}
def get_record_schema_from_dict_table_schema(
schema_name, table_schema, namespace="apache_beam.io.gcp.bigquery"):
# type: (Text, Dict[Text, Any], Text) -> Dict[Text, Any]
"""Convert a table schema into an Avro schema.
Args:
schema_name (Text): The name of the record.
table_schema (Dict[Text, Any]): A BigQuery table schema in dict form.
namespace (Text): The namespace of the Avro schema.
Returns:
Dict[Text, Any]: The schema as an Avro RecordSchema.
"""
avro_fields = [
table_field_to_avro_field(field, ".".join((namespace, schema_name)))
for field in table_schema["fields"]
]
return {
"type": "record",
"name": schema_name,
"fields": avro_fields,
"doc": "Translated Avro Schema for {}".format(schema_name),
"namespace": namespace,
}
def table_field_to_avro_field(table_field, namespace):
# type: (Dict[Text, Any], str) -> Dict[Text, Any]
"""Convert a BigQuery field to an avro field.
Args:
table_field (Dict[Text, Any]): A BigQuery field in dict form.
Returns:
Dict[Text, Any]: An equivalent Avro field in dict form.
"""
assert "type" in table_field, \
"Unable to get type for table field {}".format(table_field)
assert table_field["type"] in BIG_QUERY_TO_AVRO_TYPES, \
"Unable to map BigQuery field type {} to avro type".format(
table_field["type"])
avro_type = BIG_QUERY_TO_AVRO_TYPES[table_field["type"]]
if avro_type == "record":
element_type = get_record_schema_from_dict_table_schema(
table_field["name"],
table_field,
namespace=".".join((namespace, table_field["name"])))
else:
element_type = avro_type
field_mode = table_field.get("mode", "NULLABLE")
if field_mode in (None, "NULLABLE"):
field_type = ["null", element_type]
elif field_mode == "REQUIRED":
field_type = element_type
elif field_mode == "REPEATED":
field_type = {"type": "array", "items": element_type}
else:
raise ValueError("Unkown BigQuery field mode: {}".format(field_mode))
avro_field = {"type": field_type, "name": table_field["name"]}
doc = table_field.get("description")
if doc:
avro_field["doc"] = doc
return avro_field
| {
"content_hash": "a7a535e4ced2a6989e983c8ba2ec259b",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 180,
"avg_line_length": 30.025,
"alnum_prop": 0.6341937274493478,
"repo_name": "robertwb/incubator-beam",
"id": "85f600459056641bc43acec11667699151245d23",
"size": "4388",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/gcp/bigquery_avro_tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "59582"
},
{
"name": "Dart",
"bytes": "541526"
},
{
"name": "Dockerfile",
"bytes": "48191"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "4688736"
},
{
"name": "Groovy",
"bytes": "888171"
},
{
"name": "HCL",
"bytes": "101646"
},
{
"name": "HTML",
"bytes": "164685"
},
{
"name": "Java",
"bytes": "38649211"
},
{
"name": "JavaScript",
"bytes": "105966"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "209531"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "9785295"
},
{
"name": "SCSS",
"bytes": "312814"
},
{
"name": "Sass",
"bytes": "19336"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "336583"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "181369"
}
],
"symlink_target": ""
} |
"""Test suite for the deepreload module."""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
from IPython.lib.deepreload import reload as dreload
#-----------------------------------------------------------------------------
# Test functions begin
#-----------------------------------------------------------------------------
@dec.skipif_not_numpy
def test_deepreload_numpy():
"Test that NumPy can be deep reloaded."
import numpy
exclude = [
# Standard exclusions:
'sys', 'os.path', '__builtin__', '__main__',
# Test-related exclusions:
'unittest', 'UserDict',
]
dreload(numpy, exclude=exclude)
def test_deepreload():
"Test that dreload does deep reloads and skips excluded modules."
with TemporaryDirectory() as tmpdir:
with prepended_to_syspath(tmpdir):
with open(os.path.join(tmpdir, 'A.py'), 'w') as f:
f.write("class Object(object):\n pass\n")
with open(os.path.join(tmpdir, 'B.py'), 'w') as f:
f.write("import A\n")
import A
import B
# Test that A is not reloaded.
obj = A.Object()
dreload(B, exclude=['A'])
nt.assert_true(isinstance(obj, A.Object))
# Test that A is reloaded.
obj = A.Object()
dreload(B)
nt.assert_false(isinstance(obj, A.Object))
| {
"content_hash": "17b1115949d44af557dbea876b5e2234",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 33.90196078431372,
"alnum_prop": 0.4933487565066512,
"repo_name": "noslenfa/tdjangorest",
"id": "aa28dbfb0006069088de011a15e62a53bec2f768",
"size": "1753",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "uw/lib/python2.7/site-packages/IPython/lib/tests/test_deepreload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189930"
},
{
"name": "Groff",
"bytes": "7138"
},
{
"name": "HTML",
"bytes": "279754"
},
{
"name": "JavaScript",
"bytes": "1017625"
},
{
"name": "Makefile",
"bytes": "7062"
},
{
"name": "Python",
"bytes": "11886731"
},
{
"name": "Shell",
"bytes": "3741"
},
{
"name": "Smarty",
"bytes": "20972"
}
],
"symlink_target": ""
} |
"""
Base URL Configuration for the food-doctor project
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from config import views
from food.urls import urlpatterns as food_urls
urlpatterns = [
url(
r'^login/$',
auth_views.login,
{'template_name': 'auth/login.html'},
name='login'
),
url(
r'^logout/$',
auth_views.logout,
{'next_page': '/'},
name='logout'
),
url(
r'^register$',
views.Register.as_view(),
name='register'
),
url(
r'^admin/',
admin.site.urls
),
url(
r'',
include(food_urls)
),
url(
r'^oauth/',
include('social_django.urls', namespace='social')
),
]
| {
"content_hash": "6d24fb6e7e3c722bc761750cb31dda9a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 57,
"avg_line_length": 20.24390243902439,
"alnum_prop": 0.5421686746987951,
"repo_name": "savijoki/food-doctor",
"id": "db78362572669fd250ef662e0dfece67f19f7be7",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3311"
},
{
"name": "HTML",
"bytes": "16470"
},
{
"name": "JavaScript",
"bytes": "8077"
},
{
"name": "Python",
"bytes": "17058"
}
],
"symlink_target": ""
} |
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_rollsum', [dirname(__file__)])
except ImportError:
import _rollsum
return _rollsum
if fp is not None:
try:
_mod = imp.load_module('_rollsum', fp, pathname, description)
finally:
fp.close()
return _mod
_rollsum = swig_import_helper()
del swig_import_helper
else:
import _rollsum
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class Rollsum(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Rollsum, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Rollsum, name)
__repr__ = _swig_repr
__swig_setmethods__["s1"] = _rollsum.Rollsum_s1_set
__swig_getmethods__["s1"] = _rollsum.Rollsum_s1_get
if _newclass:s1 = _swig_property(_rollsum.Rollsum_s1_get, _rollsum.Rollsum_s1_set)
__swig_setmethods__["s2"] = _rollsum.Rollsum_s2_set
__swig_getmethods__["s2"] = _rollsum.Rollsum_s2_get
if _newclass:s2 = _swig_property(_rollsum.Rollsum_s2_get, _rollsum.Rollsum_s2_set)
__swig_setmethods__["window"] = _rollsum.Rollsum_window_set
__swig_getmethods__["window"] = _rollsum.Rollsum_window_get
if _newclass:window = _swig_property(_rollsum.Rollsum_window_get, _rollsum.Rollsum_window_set)
__swig_setmethods__["wofs"] = _rollsum.Rollsum_wofs_set
__swig_getmethods__["wofs"] = _rollsum.Rollsum_wofs_get
if _newclass:wofs = _swig_property(_rollsum.Rollsum_wofs_get, _rollsum.Rollsum_wofs_set)
def __init__(self):
this = _rollsum.new_Rollsum()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _rollsum.delete_Rollsum
__del__ = lambda self : None;
def add(self, *args): return _rollsum.Rollsum_add(self, *args)
def roll(self, *args): return _rollsum.Rollsum_roll(self, *args)
def digest(self): return _rollsum.Rollsum_digest(self)
def bits(self): return _rollsum.Rollsum_bits(self)
def on_split(self): return _rollsum.Rollsum_on_split(self)
Rollsum_swigregister = _rollsum.Rollsum_swigregister
Rollsum_swigregister(Rollsum)
# This file is compatible with both classic and new-style classes.
| {
"content_hash": "be48bd963fc7b0865bab0db7da9bc516",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 98,
"avg_line_length": 38.177083333333336,
"alnum_prop": 0.6311050477489768,
"repo_name": "lowks/camlipy",
"id": "0f20cd4adbbeec74cde298a848075b90da128e40",
"size": "3870",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "camlipy/rollsum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "128953"
},
{
"name": "Python",
"bytes": "68498"
}
],
"symlink_target": ""
} |
import ConfigParser
DEFAULT_CONFIG = "default.conf"
CONFIG_NAME = ".scrappyrc"
import logging
import os, os.path
import ssl
import sys
import thread, threading
import time
from irclib import client as ircclient
# Logging guidelines and levels
# DEBUG - Python debugging (e.g., printing list of connected servers on startup)
# INFO - Anything we would want to see during normal operation (e.g., startup/reboot messages)
# WARNING - Things we would rather not happen (e.g., using a non-SSL connection)
# ERROR - Deviations from normal operation (e.g., failed to connect to a server)
# CRITICAL - Scrappy gonna die! But not before spitting out a final goodbye! (e.g., SIGINT)
logging.basicConfig(level=logging.DEBUG)
#logging.basicConfig(level=5)
irc_logger = logging.getLogger("irclib.client")
# Change me if you want to see IRC DEBUG lines
irc_logger.setLevel(logging.INFO)
#irc_logger.setLevel(logging.DEBUG)
# State classes
class User(object):
def __init__(self, server, nick, host=None):
self.server = server
self.nick = nick
self._host = host
self.channels = {}
@property
def host(self):
if self._host is None:
self.server.whois((self.nick,))
timeout = 1
time_spent = 0
while self._host is None and timeout > 0:
timeout -= .01
time.sleep(.01)
return self._host
@host.setter
def host(self, value):
self._host = value
def join(self, channel):
channel.usercount += 1
self.channels[channel.name] = channel
def part(self, channel):
channel.usercount -= 1
self.channels.remove(channel)
def quit(self):
for channel in self.channels:
self.channels[channel].usercount -= 1
def __str__(self):
return "%s@%s" % (self.nick, self._host)
def __repr__(self):
return self.__str__()
#TODO: use weakref dictionaries for the users
class Channel(object):
def __init__(self, server, channel_name):
self.logger = server.logger.getChild(channel_name)
self.server = server
self.name = channel_name.lower()
self.usercount = 0
class ServerState(ircclient.ServerConnection):
def __init__(self, name, config, bot, irclibobj):
super(ServerState, self).__init__(irclibobj)
irclibobj.add_connection(self)
self.logger = bot.logger.getChild(name)
self.server_name = name
# Register handlers
self.add_global_handler("all_events", bot.process_event)
# One second tick for timed functions
self.execute_every(1, bot.on_tick, arguments=(self,))
bot.register_event("server-%s"%name,"welcome", self.join_defaults)
bot.register_event("server-%s"%name,"join", self.update_channel)
bot.register_event("server-%s"%name,"part", self.update_channel)
bot.register_event("server-%s"%name,"kick", self.update_channel)
bot.register_event("server-%s"%name,"quit", self.update_channel)
bot.register_event("server-%s"%name,"namreply", self.on_namereply)
bot.register_event("server-%s"%name,"whoisuser", self.on_whoisreply)
if "password" in config:
password = config["password"]
else:
password = None
if config["ssl"].lower() == "yes":
factory = ircclient.connection.Factory(wrapper=ssl.wrap_socket)
else:
self.logger.warning("Hey, we really don't like you not using SSL!")
factory = ircclient.connection.Factory()
try:
self.connect(config["server"], int(config["port"]), config["nickname"], password=password,
username=config["username"], ircname=config["realname"], connect_factory=factory)
except ircclient.ServerConnectionError, err:
self.logger.exception("Failed to connect on port %s" % (config["port"]))
self.cmdchar = config["cmdchar"]
self.config = config
self.channels = {}
self.initial_channels = config["channels"].split()
self.users = {}
self.temp_state = {}
#self.channels = [Channel(x) for x in config["channels"].split()]
# Reply to a PRIVMSG event
def reply(self, event, message):
if event.type == "privmsg":
self.privmsg(event.source.nick, message)
elif event.type == "pubmsg":
self.privmsg(event.target, message)
def join_defaults(self, server, event, bot):
if server == self:
for channel in self.initial_channels:
self.join(channel)
def update_channel(self, server, event, bot):
if server == self:
if event.type == "kick":
nick = event.arguments[0]
host = None
elif event.type =="part" or event.type == "join" or event.type == "quit":
nick = event.source.nick
host = event.source.host
if nick not in self.users:
self.users[nick] = User(self, nick, host)
user = self.users[nick]
if event.type == "quit":
user.quit()
for channel in user.channels:
if self.channels[channel].usercount == 0:
del self.channels[channel]
del self.users[nick]
return
if event.target not in self.channels:
self.channels[event.target] = Channel(self, event.target)
channel = Channel(self, event.target)
if event.type == "join":
user.join(channel)
elif event.type == "part" or event.type == "kick":
# TODO: Need to take care of the part when _I_ part a channel, shouldn't track its state anymore
user.part(channel)
if len(user.channels) == 0:
del self.users[user.nick]
if channel.usercount == 0:
del self.channels[channel.name]
def on_namereply(self, server, event, bot):
if server == self:
# Hoping that the namreply is only additive! In theory, we shouldn't have missed any users leaving channels though
if event.type == "namreply":
channel_name = event.arguments[1]
if channel_name not in self.channels:
self.channels[channel_name] = Channel(self, channel_name)
channel = self.channels[channel_name]
nicks = event.arguments[2].strip()
for nick in nicks.split(" "):
if nick not in self.users:
self.users[nick] = User(self, nick)
self.users[nick].join(channel)
def on_whoisreply(self, server, event, bot):
if server == self:
if event.type == "whoisuser":
nick = event.arguments[0]
host = event.arguments[2]
try:
user = self.users[nick]
user.host = host
except KeyError:
return # ignore the reply, we don't know about this user
class scrappy:
"""This is our main bot class. Generally, everything past loading/unloading modules is provided
by external python modules themselves.
"""
def __init__(self):
"""Initialize the scrapster
"""
self.logger = logging.getLogger("scrappy")
self.logger.info("Scrappy bot started.")
# Read config file at CONFIG_NAME
if not os.path.exists(CONFIG_NAME):
self.logger.critical("Error: Configuration file '%s' not found." % CONFIG_NAME)
self.logger.critical("Copy %s to %s and modify as necessary." % (DEFAULT_CONFIG, CONFIG_NAME))
sys.exit(1)
self.config = ConfigParser.SafeConfigParser()
self.config.read(CONFIG_NAME)
self.modules = {}
#our event lists.
#each module adds functions to be called to these events.
#each event handler calls all of the functions within its list.
# Event types are added as they are registered
self.events = {"privmsg":{},
"pubmsg":{},
"tick":{}}
# Load these modules before any events occur, since core handles server welcome message
self.load_module("core")
self.load_module("modmanage")
self.lock = threading.Lock()
#start the bot
self.__main()
########################################################################
def __main(self):
"""The real work. Initialize our connection and register events."""
# Save arguments for rebooting (see 'core' module)
self.argv = sys.argv
# Create a new socket
self.ircsock = ircclient.IRC()
self.servers = {}
for servername in self.config.sections():
server = ServerState(servername, dict(self.config.items(servername)), self, self.ircsock)
self.servers[servername] = server
#enter main event loop after this
try:
self.ircsock.process_forever()
except KeyboardInterrupt:
for server in self.servers:
self.servers[server].quit("BAIL OUT!!")
########################################################################
def shutdown(self, code=0):
"""Exit
Keyword arguments:
code -- Exit code (default 0)
"""
sys.exit(code)
########################################################################
###################
#Event Registering#
###################
def register_event(self, modname, event_type, func):
"""Register a callback to an IRC event
Call this with an event_type and a function to call when that event_type happens.
Arguments:
modname -- Modules shortname
event_type -- Event type, see irclib/events.py for events
func -- Callback function, needs to have the signature func(server, event, scrappy)
"""
event_dict = self.events.setdefault(event_type, {})
event_dict.setdefault(modname, set()).add(func)
# TODO: Nuke this. Modules can decide if they want both.
if event_type == "msg":
self.events["privmsg"].setdefault(modname, set()).add(func)
self.events["pubmsg"].setdefault(modname, set()).add(func)
def unregister_event(self, event_type, func):
"""Not Implemented - Unregister a callback to an IRC event
"""
pass
########################################################################
##################
#Event Handlers #
##################
def process_event(self, conn, event):
"""Processes IRC event on connection
Arguments:
conn -- IRC connection
event -- IRC event
"""
if event.type in self.events:
for module_events in self.events[event.type].values():
for func in module_events:
thread.start_new_thread(func, (conn, event, self))
# If the on_EVENT method doesn't exist, call a NOP-like function instead
do_nothing = lambda c, e: None
custom_handler = getattr(self, "on_" + event.type, do_nothing)
custom_handler(conn, event)
########################################################################
def on_tick(self, conn):
"""Calls the tick event callbacks
Arguments:
conn -- IRC connection
"""
for module_events in self.events["tick"].values():
for func in module_events:
thread.start_new_thread(func, (conn, self))
def load_module(self, name):
"""Loads module
Arguments:
name -- Name of module
"""
self.logger.debug("Loading module '%s'." % name)
if name in self.modules:
self.logger.debug("Actually, module '%s' already loaded, reloading." % name)
return self.reload_module(name)
try:
# from modules.core.core import core
module = __import__("modules.%s.%s" % (name, name), globals(), locals(), [name])
# Get the class
cls = getattr(module, name)
except AttributeError as err:
self.logger.exception("Module '%s' not found, make sure %s/%s.py exists." % (name,name,name))
raise err
except ImportError as err:
self.logger.exception("No such module '%s'." % name)
raise err
except Exception as err:
self.logger.exception("Error loading module '%s': %s" % (name, err))
raise err
try:
self.modules[name] = cls(self)
except Exception as err:
self.logger.exception("Module '%s' failed to initialize." % name)
raise err
return True
def reload_module(self, name):
"""Reloads module
Arguments:
name -- Name of module"""
self.unload_module(name)
return self.load_module(name)
def unload_module(self, name):
"""Unloads module
May not work consistently, see:
http://stackoverflow.com/questions/3105801/unload-a-module-in-python
Arguments:
name -- Name of module"""
with self.lock:
if name in self.modules:
self.unregister_module(name)
self.modules.pop(name)
fullname = name+"."+name
if fullname in sys.modules:
# Package name is kept in sys.modules, but it doesn't seem to care if I don't delete it
#del(sys.modules[name])
del(sys.modules[fullname])
else:
return False
return True
def unregister_module(self, name):
"""Unregisters module callbacks
Arguments:
name -- Name of module
"""
for event in self.events.values():
if name in event:
event.pop(name)
if(__name__ == "__main__"):
bot = scrappy()
| {
"content_hash": "247213b0dad48a290432ba0eb51299ec",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 126,
"avg_line_length": 34.65609756097561,
"alnum_prop": 0.5559856429023858,
"repo_name": "tcoppi/scrappy",
"id": "7d113dc9b6c6f681dd0a902433d2f4255bcf2183",
"size": "14431",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrappy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "241000"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
} |
"""" GAEO view helper module """ | {
"content_hash": "b645532ff9db398a33c777ab64e7466e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 32,
"alnum_prop": 0.625,
"repo_name": "naokits/adminkun_viewer_old",
"id": "764cf3098b2f6cb78225f9f0e428c2b3fe9244fe",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/gaeo/gaeo/view/helper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "17"
},
{
"name": "Objective-C",
"bytes": "188283"
},
{
"name": "Python",
"bytes": "739446"
},
{
"name": "Ruby",
"bytes": "45322"
}
],
"symlink_target": ""
} |
import collections
import datetime
import enum
import hashlib
import logging
import re
import time
import uuid
from concurrent.futures import as_completed
import six
from azure.graphrbac.models import DirectoryObject, GetObjectsParameters
from azure.keyvault import KeyVaultAuthentication, AccessToken
from azure.keyvault import KeyVaultClient, KeyVaultId
from azure.mgmt.managementgroups import ManagementGroupsAPI
from azure.mgmt.web.models import NameValuePair
from c7n_azure import constants
from c7n_azure.constants import RESOURCE_VAULT
from msrestazure.azure_active_directory import MSIAuthentication
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from netaddr import IPNetwork, IPRange, IPSet
from c7n.utils import chunks, local_session
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
class ResourceIdParser(object):
@staticmethod
def get_namespace(resource_id):
parsed = parse_resource_id(resource_id)
if parsed.get('children'):
return '/'.join([parsed.get('namespace'), parsed.get('type')])
return parsed.get('namespace')
@staticmethod
def get_subscription_id(resource_id):
return parse_resource_id(resource_id).get('subscription')
@staticmethod
def get_resource_group(resource_id):
result = parse_resource_id(resource_id).get("resource_group")
# parse_resource_id fails to parse resource id for resource groups
if result is None:
return resource_id.split('/')[4]
return result
@staticmethod
def get_resource_type(resource_id):
parsed = parse_resource_id(resource_id)
# parse_resource_id returns dictionary with "child_type_#" to represent
# types sequence. "type" stores root type.
child_type_keys = [k for k in parsed.keys() if k.find("child_type_") != -1]
types = [parsed.get(k) for k in sorted(child_type_keys)]
if not types:
types.insert(0, parsed.get('type'))
return '/'.join(types)
@staticmethod
def get_resource_name(resource_id):
return parse_resource_id(resource_id).get('resource_name')
@staticmethod
def get_full_type(resource_id):
return '/'.join([ResourceIdParser.get_namespace(resource_id),
ResourceIdParser.get_resource_type(resource_id)])
class StringUtils(object):
@staticmethod
def equal(a, b, case_insensitive=True):
if isinstance(a, six.string_types) and isinstance(b, six.string_types):
if case_insensitive:
return a.strip().lower() == b.strip().lower()
else:
return a.strip() == b.strip()
return False
@staticmethod
def snake_to_camel(string):
components = string.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
@staticmethod
def naming_hash(val, length=8):
if isinstance(val, six.string_types):
val = val.encode('utf8')
return hashlib.sha256(val).hexdigest().lower()[:length]
def utcnow():
"""The datetime object for the current time in UTC
"""
return datetime.datetime.utcnow()
def now(tz=None):
"""The datetime object for the current time in UTC
"""
return datetime.datetime.now(tz=tz)
def azure_name_value_pair(name, value):
return NameValuePair(**{'name': name, 'value': value})
send_logger = logging.getLogger('custodian.azure.utils.ServiceClient.send')
def custodian_azure_send_override(self, request, headers=None, content=None, **kwargs):
""" Overrides ServiceClient.send() function to implement retries & log headers
"""
retries = 0
max_retries = 3
while retries < max_retries:
response = self.orig_send(request, headers, content, **kwargs)
send_logger.debug(response.status_code)
for k, v in response.headers.items():
if k.startswith('x-ms-ratelimit'):
send_logger.debug(k + ':' + v)
# Retry codes from urllib3/util/retry.py
if response.status_code in [413, 429, 503]:
retry_after = None
for k in response.headers.keys():
if StringUtils.equal('retry-after', k):
retry_after = int(response.headers[k])
if retry_after is not None and retry_after < constants.DEFAULT_MAX_RETRY_AFTER:
send_logger.warning('Received retriable error code %i. Retry-After: %i'
% (response.status_code, retry_after))
time.sleep(retry_after)
retries += 1
else:
send_logger.error("Received throttling error, retry time is %i"
"(retry only if < %i seconds)."
% (retry_after or 0, constants.DEFAULT_MAX_RETRY_AFTER))
break
else:
break
return response
class ThreadHelper:
disable_multi_threading = False
@staticmethod
def execute_in_parallel(resources, event, execution_method, executor_factory, log,
max_workers=constants.DEFAULT_MAX_THREAD_WORKERS,
chunk_size=constants.DEFAULT_CHUNK_SIZE,
**kwargs):
futures = []
results = []
exceptions = []
if ThreadHelper.disable_multi_threading:
try:
result = execution_method(resources, event, **kwargs)
if result:
results.extend(result)
except Exception as e:
exceptions.append(e)
else:
with executor_factory(max_workers=max_workers) as w:
for resource_set in chunks(resources, chunk_size):
futures.append(w.submit(execution_method, resource_set, event, **kwargs))
for f in as_completed(futures):
if f.exception():
log.error(
"Execution failed with error: %s" % f.exception())
exceptions.append(f.exception())
else:
result = f.result()
if result:
results.extend(result)
return results, list(set(exceptions))
class Math(object):
@staticmethod
def mean(numbers):
clean_numbers = [e for e in numbers if e is not None]
return float(sum(clean_numbers)) / max(len(clean_numbers), 1)
@staticmethod
def sum(numbers):
clean_numbers = [e for e in numbers if e is not None]
return float(sum(clean_numbers))
class GraphHelper(object):
log = logging.getLogger('custodian.azure.utils.GraphHelper')
@staticmethod
def get_principal_dictionary(graph_client, object_ids, raise_on_graph_call_error=False):
"""Retrieves Azure AD Objects for corresponding object ids passed.
:param graph_client: A client for Microsoft Graph.
:param object_ids: The object ids to retrieve Azure AD objects for.
:param raise_on_graph_call_error: A boolean indicate whether an error should be
raised if the underlying Microsoft Graph call fails.
:return: A dictionary keyed by object id with the Azure AD object as the value.
Note: empty Azure AD objects could be returned if not found in the graph.
"""
if not object_ids:
return {}
object_params = GetObjectsParameters(
include_directory_object_references=True,
object_ids=object_ids)
principal_dics = {object_id: DirectoryObject() for object_id in object_ids}
aad_objects = graph_client.objects.get_objects_by_object_ids(object_params)
try:
for aad_object in aad_objects:
principal_dics[aad_object.object_id] = aad_object
except CloudError as e:
if e.status_code in [403, 401]:
GraphHelper.log.warning(
'Credentials not authorized for access to read from Microsoft Graph. \n '
'Can not query on principalName, displayName, or aadType. \n')
else:
GraphHelper.log.error(
'Exception in call to Microsoft Graph. \n '
'Can not query on principalName, displayName, or aadType. \n'
'Error: {0}'.format(e))
if raise_on_graph_call_error:
raise
return principal_dics
@staticmethod
def get_principal_name(graph_object):
"""Attempts to resolve a principal name.
:param graph_object: the Azure AD Graph Object
:return: The resolved value or an empty string if unsuccessful.
"""
if hasattr(graph_object, 'user_principal_name'):
return graph_object.user_principal_name
elif hasattr(graph_object, 'service_principal_names'):
return graph_object.service_principal_names[0]
elif hasattr(graph_object, 'display_name'):
return graph_object.display_name
return ''
class PortsRangeHelper(object):
PortsRange = collections.namedtuple('PortsRange', 'start end')
@staticmethod
def _get_port_range(range_str):
""" Given a string with a port or port range: '80', '80-120'
Returns tuple with range start and end ports: (80, 80), (80, 120)
"""
if range_str == '*':
return PortsRangeHelper.PortsRange(start=0, end=65535)
s = range_str.split('-')
if len(s) == 2:
return PortsRangeHelper.PortsRange(start=int(s[0]), end=int(s[1]))
return PortsRangeHelper.PortsRange(start=int(s[0]), end=int(s[0]))
@staticmethod
def _get_string_port_ranges(ports):
""" Extracts ports ranges from the string
Returns an array of PortsRange tuples
"""
return [PortsRangeHelper._get_port_range(r) for r in ports.split(',') if r != '']
@staticmethod
def _get_rule_port_ranges(rule):
""" Extracts ports ranges from the NSG rule object
Returns an array of PortsRange tuples
"""
properties = rule['properties']
if 'destinationPortRange' in properties:
return [PortsRangeHelper._get_port_range(properties['destinationPortRange'])]
else:
return [PortsRangeHelper._get_port_range(r)
for r in properties['destinationPortRanges']]
@staticmethod
def _port_ranges_to_set(ranges):
""" Converts array of port ranges to the set of integers
Example: [(10-12), (20,20)] -> {10, 11, 12, 20}
"""
return set([i for r in ranges for i in range(r.start, r.end + 1)])
@staticmethod
def validate_ports_string(ports):
""" Validate that provided string has proper port numbers:
1. port number < 65535
2. range start < range end
"""
pattern = re.compile('^\\d+(-\\d+)?(,\\d+(-\\d+)?)*$')
if pattern.match(ports) is None:
return False
ranges = PortsRangeHelper._get_string_port_ranges(ports)
for r in ranges:
if r.start > r.end or r.start > 65535 or r.end > 65535:
return False
return True
@staticmethod
def get_ports_set_from_string(ports):
""" Convert ports range string to the set of integers
Example: "10-12, 20" -> {10, 11, 12, 20}
"""
ranges = PortsRangeHelper._get_string_port_ranges(ports)
return PortsRangeHelper._port_ranges_to_set(ranges)
@staticmethod
def get_ports_set_from_rule(rule):
""" Extract port ranges from NSG rule and convert it to the set of integers
"""
ranges = PortsRangeHelper._get_rule_port_ranges(rule)
return PortsRangeHelper._port_ranges_to_set(ranges)
@staticmethod
def get_ports_strings_from_list(data):
""" Transform a list of port numbers to the list of strings with port ranges
Example: [10, 12, 13, 14, 15] -> ['10', '12-15']
"""
if len(data) == 0:
return []
# Transform diff_ports list to the ranges list
first = 0
result = []
for it in range(1, len(data)):
if data[first] == data[it] - (it - first):
continue
result.append(PortsRangeHelper.PortsRange(start=data[first], end=data[it - 1]))
first = it
# Update tuples with strings, representing ranges
result.append(PortsRangeHelper.PortsRange(start=data[first], end=data[-1]))
result = [str(x.start) if x.start == x.end else "%i-%i" % (x.start, x.end) for x in result]
return result
@staticmethod
def build_ports_dict(nsg, direction_key, ip_protocol):
""" Build entire ports array filled with True (Allow), False (Deny) and None(default - Deny)
based on the provided Network Security Group object, direction and protocol.
"""
rules = nsg['properties']['securityRules']
rules = sorted(rules, key=lambda k: k['properties']['priority'])
ports = {}
for rule in rules:
# Skip rules with different direction
if not StringUtils.equal(direction_key, rule['properties']['direction']):
continue
# Check the protocol: possible values are 'TCP', 'UDP', '*' (both)
# Skip only if rule and ip_protocol are 'TCP'/'UDP' pair.
protocol = rule['properties']['protocol']
if not StringUtils.equal(protocol, "*") and \
not StringUtils.equal(ip_protocol, "*") and \
not StringUtils.equal(protocol, ip_protocol):
continue
IsAllowed = StringUtils.equal(rule['properties']['access'], 'allow')
ports_set = PortsRangeHelper.get_ports_set_from_rule(rule)
for p in ports_set:
if p not in ports:
ports[p] = IsAllowed
return ports
class IpRangeHelper(object):
@staticmethod
def parse_ip_ranges(data, key):
'''
Parses IP range or CIDR mask.
:param data: Dictionary where to look for the value.
:param key: Key for the value to be parsed.
:return: Set of IP ranges and networks.
'''
if key not in data:
return None
ranges = [[s.strip() for s in r.split('-')] for r in data[key]]
result = IPSet()
for r in ranges:
if len(r) > 2:
raise Exception('Invalid range. Use x.x.x.x-y.y.y.y or x.x.x.x or x.x.x.x/y.')
result.add(IPRange(*r) if len(r) == 2 else IPNetwork(r[0]))
return result
class AppInsightsHelper(object):
log = logging.getLogger('custodian.azure.utils.AppInsightsHelper')
@staticmethod
def get_instrumentation_key(url):
data = url.split('//')[1]
try:
uuid.UUID(data)
except ValueError:
values = data.split('/')
if len(values) != 2:
AppInsightsHelper.log.warning("Bad format: '%s'" % url)
return AppInsightsHelper._get_instrumentation_key(values[0], values[1])
return data
@staticmethod
def _get_instrumentation_key(resource_group_name, resource_name):
from .session import Session
s = local_session(Session)
client = s.client('azure.mgmt.applicationinsights.ApplicationInsightsManagementClient')
try:
insights = client.components.get(resource_group_name, resource_name)
return insights.instrumentation_key
except Exception:
AppInsightsHelper.log.warning("Failed to retrieve App Insights instrumentation key."
"Resource Group name: %s, App Insights name: %s" %
(resource_group_name, resource_name))
return ''
class ManagedGroupHelper(object):
@staticmethod
def get_subscriptions_list(managed_resource_group, credentials):
client = ManagementGroupsAPI(credentials)
entities = client.entities.list(filter='name eq \'%s\'' % managed_resource_group)
return [e.name for e in entities if e.type == '/subscriptions']
def generate_key_vault_url(name):
return constants.TEMPLATE_KEYVAULT_URL.format(name)
class RetentionPeriod(object):
PATTERN = re.compile("^P([1-9][0-9]*)([DWMY])$")
@enum.unique
class Units(enum.Enum):
day = ('day', 'D')
days = ('days', 'D')
week = ('week', 'W')
weeks = ('weeks', 'W')
month = ('month', 'M')
months = ('months', 'M')
year = ('year', 'Y')
years = ('years', 'Y')
def __init__(self, str_value, iso8601_symbol):
self.str_value = str_value
self.iso8601_symbol = iso8601_symbol
def __str__(self):
return self.str_value
@staticmethod
def iso8601_duration(period, retention_period_unit):
iso8601_str = "P{}{}".format(period, retention_period_unit.iso8601_symbol)
return iso8601_str
@staticmethod
def parse_iso8601_retention_period(iso8601_retention_period):
"""
A simplified iso8601 duration parser that only accepts one duration designator.
"""
match = re.match(RetentionPeriod.PATTERN, iso8601_retention_period)
if match is None:
raise ValueError("Invalid iso8601_retention_period: {}. "
"This parser only accepts a single duration designator."
.format(iso8601_retention_period))
period = int(match.group(1))
iso8601_symbol = match.group(2)
units = next(units for units in RetentionPeriod.Units
if units.iso8601_symbol == iso8601_symbol)
return period, units
@lru_cache()
def get_keyvault_secret(user_identity_id, keyvault_secret_id):
secret_id = KeyVaultId.parse_secret_id(keyvault_secret_id)
access_token = None
# Use UAI if client_id is provided
if user_identity_id:
msi = MSIAuthentication(
client_id=user_identity_id,
resource=RESOURCE_VAULT)
else:
msi = MSIAuthentication(
resource=RESOURCE_VAULT)
access_token = AccessToken(token=msi.token['access_token'])
credentials = KeyVaultAuthentication(lambda _1, _2, _3: access_token)
kv_client = KeyVaultClient(credentials)
return kv_client.get_secret(secret_id.vault, secret_id.name, secret_id.version).value
| {
"content_hash": "3bbda470890493dac7d0ff48e96e88aa",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 100,
"avg_line_length": 36.04816955684008,
"alnum_prop": 0.6030787321609921,
"repo_name": "ewbankkit/cloud-custodian",
"id": "6904f2a57549d11096675043c79d16314856cac5",
"size": "19294",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "145643"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "4913354"
},
{
"name": "Shell",
"bytes": "7277"
}
],
"symlink_target": ""
} |
from __main__ import run
| {
"content_hash": "918b344d9389db7a5a4b57da38762c42",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "ibressler/pyqtgraph",
"id": "23b7cd58ffc3d09e1b17a076c639a49d945e1d45",
"size": "25",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "1868774"
}
],
"symlink_target": ""
} |
from libcloud.common.base import ConnectionKey, BaseDriver
from libcloud.common.types import LibcloudError
__all__ = [
'Member',
'LoadBalancer',
'Algorithm',
'Driver',
'DEFAULT_ALGORITHM'
]
class Member(object):
"""
Represents a load balancer member.
"""
def __init__(self, id, ip, port, balancer=None, extra=None):
"""
:param id: Member ID.
:type id: ``str``
:param ip: IP address of this member.
:param ip: ``str``
:param port: Port of this member
:param port: ``str``
:param balancer: Balancer this member is attached to. (optional)
:param balancer: :class:`.LoadBalancer`
:param extra: Provider specific attributes.
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.ip = ip
self.port = port
self.balancer = balancer
self.extra = extra or {}
def __repr__(self):
return ('<Member: id=%s, address=%s:%s>' % (self.id,
self.ip, self.port))
class LoadBalancer(object):
"""
Provide a common interface for handling Load Balancers.
"""
def __init__(self, id, name, state, ip, port, driver, extra=None):
"""
:param id: Load balancer ID.
:type id: ``str``
:param name: Load balancer name.
:type name: ``str``
:param state: State this loadbalancer is in.
:type state: :class:`libcloud.loadbalancer.types.State`
:param ip: IP address of this loadbalancer.
:type ip: ``str``
:param port: Port of this loadbalancer.
:type port: ``int``
:param driver: Driver this loadbalancer belongs to.
:type driver: :class:`.Driver`
:param extra: Provier specific attributes. (optional)
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.state = state
self.ip = ip
self.port = port
self.driver = driver
self.extra = extra or {}
def attach_compute_node(self, node):
return self.driver.balancer_attach_compute_node(balancer=self,
node=node)
def attach_member(self, member):
return self.driver.balancer_attach_member(balancer=self,
member=member)
def detach_member(self, member):
return self.driver.balancer_detach_member(balancer=self,
member=member)
def list_members(self):
return self.driver.balancer_list_members(balancer=self)
def destroy(self):
return self.driver.destroy_balancer(balancer=self)
def __repr__(self):
return ('<LoadBalancer: id=%s, name=%s, state=%s>' % (self.id,
self.name, self.state))
class Algorithm(object):
"""
Represents a load balancing algorithm.
"""
RANDOM = 0
ROUND_ROBIN = 1
LEAST_CONNECTIONS = 2
WEIGHTED_ROUND_ROBIN = 3
WEIGHTED_LEAST_CONNECTIONS = 4
DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN
class Driver(BaseDriver):
"""
A base Driver class to derive from
This class is always subclassed by a specific driver.
"""
name = None
website = None
connectionCls = ConnectionKey
_ALGORITHM_TO_VALUE_MAP = {}
_VALUE_TO_ALGORITHM_MAP = {}
def __init__(self, key, secret=None, secure=True, host=None,
port=None, **kwargs):
super(Driver, self).__init__(key=key, secret=secret, secure=secure,
host=host, port=port, **kwargs)
def list_protocols(self):
"""
Return a list of supported protocols.
:rtype: ``list`` of ``str``
"""
raise NotImplementedError(
'list_protocols not implemented for this driver')
def list_balancers(self):
"""
List all loadbalancers
:rtype: ``list`` of :class:`LoadBalancer`
"""
raise NotImplementedError(
'list_balancers not implemented for this driver')
def create_balancer(self, name, port, protocol, algorithm, members):
"""
Create a new load balancer instance
:param name: Name of the new load balancer (required)
:type name: ``str``
:param port: Port the load balancer should listen on, defaults to 80
:type port: ``str``
:param protocol: Loadbalancer protocol, defaults to http.
:type protocol: ``str``
:param members: list of Members to attach to balancer
:type members: ``list`` of :class:`Member`
:param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN.
:type algorithm: :class:`Algorithm`
:rtype: :class:`LoadBalancer`
"""
raise NotImplementedError(
'create_balancer not implemented for this driver')
def destroy_balancer(self, balancer):
"""
Destroy a load balancer
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_balancer not implemented for this driver')
def get_balancer(self, balancer_id):
"""
Return a :class:`LoadBalancer` object.
:param balancer_id: id of a load balancer you want to fetch
:type balancer_id: ``str``
:rtype: :class:`LoadBalancer`
"""
raise NotImplementedError(
'get_balancer not implemented for this driver')
def update_balancer(self, balancer, **kwargs):
"""
Sets the name, algorithm, protocol, or port on a load balancer.
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:param name: New load balancer name
:type name: ``str``
:param algorithm: New load balancer algorithm
:type algorithm: :class:`Algorithm`
:param protocol: New load balancer protocol
:type protocol: ``str``
:param port: New load balancer port
:type port: ``int``
:rtype: :class:`LoadBalancer`
"""
raise NotImplementedError(
'update_balancer not implemented for this driver')
def balancer_attach_compute_node(self, balancer, node):
"""
Attach a compute node as a member to the load balancer.
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:param node: Node to join to the balancer
:type node: :class:`Node`
:return: Member after joining the balancer.
:rtype: :class:`Member`
"""
member = Member(id=None, ip=node.public_ips[0], port=balancer.port)
return self.balancer_attach_member(balancer, member)
def balancer_attach_member(self, balancer, member):
"""
Attach a member to balancer
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:param member: Member to join to the balancer
:type member: :class:`Member`
:return: Member after joining the balancer.
:rtype: :class:`Member`
"""
raise NotImplementedError(
'balancer_attach_member not implemented for this driver')
def balancer_detach_member(self, balancer, member):
"""
Detach member from balancer
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:param member: Member which should be used
:type member: :class:`Member`
:return: ``True`` if member detach was successful, otherwise ``False``.
:rtype: ``bool``
"""
raise NotImplementedError(
'balancer_detach_member not implemented for this driver')
def balancer_list_members(self, balancer):
"""
Return list of members attached to balancer
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:rtype: ``list`` of :class:`Member`
"""
raise NotImplementedError(
'balancer_list_members not implemented for this driver')
def list_supported_algorithms(self):
"""
Return algorithms supported by this driver.
:rtype: ``list`` of ``str``
"""
return list(self._ALGORITHM_TO_VALUE_MAP.keys())
def _value_to_algorithm(self, value):
"""
Return :class`Algorithm` based on the value.
:param value: Algorithm name (e.g. http, tcp, ...).
:type value: ``str``
@rype :class:`Algorithm`
"""
try:
return self._VALUE_TO_ALGORITHM_MAP[value]
except KeyError:
raise LibcloudError(value='Invalid value: %s' % (value),
driver=self)
def _algorithm_to_value(self, algorithm):
"""
Return string value for the provided algorithm.
:param value: Algorithm enum.
:type value: :class:`Algorithm`
@rype ``str``
"""
try:
return self._ALGORITHM_TO_VALUE_MAP[algorithm]
except KeyError:
raise LibcloudError(value='Invalid algorithm: %s' % (algorithm),
driver=self)
| {
"content_hash": "f488edbbd66428a56e46cd1ef4d8ff43",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 79,
"avg_line_length": 28.981873111782477,
"alnum_prop": 0.576566246221203,
"repo_name": "poojavade/Genomics_Docker",
"id": "b785af023239efd94361289d005ceb848b0c8319",
"size": "10375",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/apache_libcloud-0.15.1-py2.7.egg/libcloud/loadbalancer/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
} |
from sys import exit, stdout, argv
from os import environ, system, path
environ['KERAS_BACKEND'] = 'tensorflow'
# environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# environ["CUDA_VISIBLE_DEVICES"] = ""
import numpy as np
import signal
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.utils import plot_model
from keras import backend as K
from subtlenet import config
from subtlenet.generators.gen import make_coll, generate, get_dims
import subtlenet.generators.gen as generator
from subtlenet.backend.losses import sculpting_kl_penalty, emd
from subtlenet.backend.layers import Adversary
from subtlenet.backend.keras_layers import *
from subtlenet.backend.callbacks import ModelCheckpoint, PartialModelCheckpoint
from paths import basedir, figsdir
### some global definitions ###
NEPOCH = 20
TRAIN_BASELINE = False
generator.truncate = int(argv[1])
config.limit = int(argv[2])
config.bin_decorr = False
APOSTLE = 'v4_trunc%i_limit%i'%(generator.truncate, config.limit)
modeldir = 'cce_adversary/'
system('mkdir -p %s'%modeldir)
system('cp %s %s/train_%s.py'%(argv[0], modeldir, APOSTLE))
### instantiate data loaders ###
top = make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
dims = get_dims(top)
### first build the classifier! ###
# set up data
opts = {
'learn_mass' : True,
'learn_pt' : True,
'decorr_mass':False
}
classifier_train_gen = generate(data, partition='train', batch=1000, **opts)
classifier_validation_gen = generate(data, partition='validate', batch=10000, **opts)
classifier_test_gen = generate(data, partition='test', batch=10, **opts)
test_i, test_o, test_w = next(classifier_test_gen)
# build all inputs
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
# now build the particle network
h = BatchNormalization(momentum=0.6)(input_particles)
h = Conv1D(32, 2, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = Conv1D(16, 4, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = CuDNNLSTM(100)(h)
#h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6)(h)
h = Dense(100, activation='relu', kernel_initializer='lecun_uniform')(h)
particles_final = BatchNormalization(momentum=0.6)(h)
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge)
for i in xrange(1,5):
if i == 4:
h = Dense(50, activation='tanh')(h)
else:
h = Dense(50, activation='relu')(h)
h = BatchNormalization(momentum=0.6)(h)
y_hat = Dense(config.n_truth, activation='softmax')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
classifier.compile(optimizer=Adam(lr=0.005),
loss='categorical_crossentropy',
metrics=['accuracy'])
### now the adversary ###
opts['decorr_mass'] = True
adversary_train_gen = generate(data, partition='train', batch=1000, **opts)
adversary_validation_gen = generate(data, partition='validate', batch=10000, **opts)
adversary_test_gen = generate(data, partition='test', batch=10, **opts)
kin_hats = Adversary(config.n_decorr_bins, n_outputs=1, scale=0.05)(y_hat)
adversary = Model(inputs=inputs,
outputs=[y_hat]+kin_hats)
adversary.compile(optimizer=Adam(lr=0.00025),
loss=['categorical_crossentropy'] + [emd for _ in kin_hats],
loss_weights=[0.05] + ([100] * len(kin_hats)))
print '########### CLASSIFIER ############'
adversary.summary()
plot_model(adversary, show_shapes=True, show_layer_names=False, to_file=figsdir+'/emd_adversary.png')
print '###################################'
### now train ###
# ctrl+C now triggers a graceful exit
def save_classifier(name='shallow', model=classifier):
out = '%s/%s_%s.h5'%(modeldir, name, APOSTLE)
print 'Saving to',out
model.save(out)
def save_and_exit(signal=None, frame=None, name='shallow', model=classifier):
save_classifier(name, model)
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
if (not TRAIN_BASELINE) and path.isfile('%s/%s_%s_best.h5'%(modeldir,'baseline',APOSTLE)):
tmp_ = load_model('%s/%s_%s_best.h5'%(modeldir,'baseline',APOSTLE))
classifier.set_weights(tmp_.get_weights())
else:
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=3000,
epochs=50,
validation_data=classifier_validation_gen,
validation_steps=100,
callbacks = [ModelCheckpoint('%s/%s_%s_best.h5'%(modeldir,'baseline',APOSTLE),
save_best_only=True,
verbose=True)],
)
save_classifier(name='baseline')
def save_and_exit(signal=None, frame=None, name='emd_decorrelated', model=classifier):
save_classifier(name, model)
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
adversary.fit_generator(adversary_train_gen,
steps_per_epoch=3000,
epochs=NEPOCH,
validation_data=adversary_validation_gen,
validation_steps=100,
callbacks = [
PartialModelCheckpoint(classifier,
'%s/%s_%s_best.h5'%(modeldir,'emd_decorrelated',APOSTLE),
save_best_only=True,
verbose=True),
ModelCheckpoint('%s/stack_%s_%s_best.h5'%(modeldir,'emd_decorrelated',APOSTLE),
save_best_only=True,
verbose=True)
],
)
save_classifier(name='emd_decorrelated')
# def save_and_exit(signal=None, frame=None, name='shallow', model=classifier):
# save_classifier(name, model)
# exit(1)
#
# classifier.fit_generator(classifier_train_gen,
# steps_per_epoch=3000,
# epochs=NEPOCH,
# validation_data=classifier_validation_gen,
# validation_steps=100,
# callbacks = [ModelCheckpoint('%s/%s_%s_best.h5'%(modeldir,'baseline',APOSTLE),
# save_best_only=True,
# verbose=True)],
# )
# save_classifier(name='baseline')
| {
"content_hash": "3f27aeee83e950332aec4be7b5e58b5c",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 109,
"avg_line_length": 38.605555555555554,
"alnum_prop": 0.5957691754209239,
"repo_name": "sidnarayanan/BAdNet",
"id": "656f39709b4b8a2d38c6c630cf56426b9a307492",
"size": "6977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train/gen/adv/models/cce_adversary/train_v4_trunc4_limit50.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "326584"
},
{
"name": "Shell",
"bytes": "900"
}
],
"symlink_target": ""
} |
"""
This example shows how to graft a WSGI app onto mitmproxy. In this
instance, we're using the Flask framework (http://flask.pocoo.org/) to expose
a single simplest-possible page.
"""
from flask import Flask
app = Flask("proxapp")
@app.route('/')
def hello_world():
return 'Hello World!'
# Register the app using the magic domain "proxapp" on port 80. Requests to
# this domain and port combination will now be routed to the WSGI app instance.
def start(context):
context.app_registry.add(app, "proxapp", 80)
# SSL works too, but the magic domain needs to be resolvable from the mitmproxy machine due to mitmproxy's design.
# mitmproxy will connect to said domain and use serve its certificate (unless --no-upstream-cert is set)
# but won't send any data.
context.app_registry.add(app, "example.com", 443)
| {
"content_hash": "052f67c418e735341092f326ca865044",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 118,
"avg_line_length": 34.916666666666664,
"alnum_prop": 0.7255369928400954,
"repo_name": "tdickers/mitmproxy",
"id": "613d3f8b066c8c9dd84c22288b1a77df336d10e0",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/proxapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "405"
},
{
"name": "CSS",
"bytes": "194361"
},
{
"name": "HTML",
"bytes": "2878"
},
{
"name": "JavaScript",
"bytes": "127316"
},
{
"name": "Python",
"bytes": "1248282"
},
{
"name": "Shell",
"bytes": "4087"
}
],
"symlink_target": ""
} |
__author__ = 'f1ashhimself@gmail.com'
import re
from .. import TooSaltyUISoupException
class _Utils(object):
@classmethod
def convert_wildcard_to_regex(cls, wildcard):
"""
Converts wildcard to regex.
Arguments:
- wildcard: string, wildcard.
Returns:
- String with regex pattern.
"""
regex = re.escape(wildcard)
regex = regex.replace(r'\?', r'[\s\S]{1}')
regex = regex.replace(r'\*', r'[\s\S]*')
return '^%s$' % regex
@classmethod
def replace_inappropriate_symbols(cls, text):
"""
Replaces inappropriate symbols e.g. \xa0 (non-breaking space) to
normal space.
Arguments:
- text: string, text in which symbols should be replaced.
Should be in unicode.
Returns:
- string with processed text.
"""
replace_pairs = [(u'\xa0', ' '),
(u'\u2014', '-')]
for from_, to_ in replace_pairs:
text = text.replace(from_, to_)
return text
@classmethod
def verify_xy_coordinates(self, x, y):
"""
Verifies that x and y is instance of int otherwise raises exception.
Arguments:
- x: x variable.
- y: y variable.
Returns:
- None
"""
if not isinstance(x, int) or not isinstance(y, int):
raise TooSaltyUISoupException(
'x and y arguments should hold int coordinates.')
@classmethod
def verify_mouse_button_name(self, button_name, supported_names):
"""
Verifies that button name is supported otherwise raises exception.
Arguments:
- button_name: string, button name.
- supported_names: list, supported button names.
Returns:
- None
"""
if not button_name in supported_names:
raise TooSaltyUISoupException(
'Button name should be one of supported %s.' %
repr(supported_names))
| {
"content_hash": "a6f130acfcea862f12d13a6c20063623",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 25.14457831325301,
"alnum_prop": 0.5404887398179204,
"repo_name": "terkira/UISoup",
"id": "a874caf8eafea467a12b030a15d7721eec6644e3",
"size": "2727",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "uisoup/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114670"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import VeriCoinTestFramework
from test_framework.util import *
import os
import shutil
class BIP65Test(VeriCoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=3"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=4"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=3 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionFailure("Failed to mine a version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=4 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=3 block after 949 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=3 block after 950 version=4 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=3 block after 950 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=4 block")
if __name__ == '__main__':
BIP65Test().main()
| {
"content_hash": "1af5b4bdbb9026b34a9733d3ecc3b65e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 98,
"avg_line_length": 36.625,
"alnum_prop": 0.60580204778157,
"repo_name": "vericoin/vericoin-core",
"id": "f65609505fc052b2012e7c435452e37b70ee84e0",
"size": "3201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/bip65-cltv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "415522"
},
{
"name": "C++",
"bytes": "3775139"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "134402"
},
{
"name": "Makefile",
"bytes": "89021"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7243"
},
{
"name": "Protocol Buffer",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "453747"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "19809"
},
{
"name": "Shell",
"bytes": "38074"
}
],
"symlink_target": ""
} |
"""Job submission script"""
from __future__ import absolute_import
import logging
from . import opts
from . import local
from . import mpi
from . import sge
from . import yarn
from . import mesos
def config_logger(args):
"""Configure the logger according to the arguments
Parameters
----------
args: argparser.Arguments
The arguments passed in by the user.
"""
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
if args.log_file is None:
logging.basicConfig(format=fmt, level=level)
else:
logging.basicConfig(format=fmt, level=level, filename=args.log_file)
console = logging.StreamHandler()
console.setFormatter(logging.Formatter(fmt))
console.setLevel(level)
logging.getLogger('').addHandler(console)
def main():
"""Main submission function."""
args = opts.get_opts()
config_logger(args)
if args.cluster == 'local':
local.submit(args)
elif args.cluster == 'sge':
sge.submit(args)
elif args.cluster == 'yarn':
yarn.submit(args)
elif args.cluster == 'mpi':
mpi.submit(args)
elif args.cluster == 'mesos':
mesos.submit(args)
else:
raise RuntimeError('Unknown submission cluster type %s' % args.cluster)
| {
"content_hash": "1b349eb3b1284e681f7a0df401afd397",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 27.9811320754717,
"alnum_prop": 0.6331759946055293,
"repo_name": "yhpeng-git/mxnet",
"id": "6e32403820e75089f01c0a06d0d0b814146756cb",
"size": "1483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dmlc-core/tracker/dmlc_tracker/submit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11053"
},
{
"name": "C",
"bytes": "88476"
},
{
"name": "C++",
"bytes": "4755504"
},
{
"name": "CMake",
"bytes": "147856"
},
{
"name": "Cuda",
"bytes": "3403191"
},
{
"name": "Java",
"bytes": "86766"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "141324"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "575202"
},
{
"name": "Perl 6",
"bytes": "21768"
},
{
"name": "Protocol Buffer",
"bytes": "78574"
},
{
"name": "Python",
"bytes": "2825418"
},
{
"name": "R",
"bytes": "255240"
},
{
"name": "Scala",
"bytes": "828520"
},
{
"name": "Shell",
"bytes": "120692"
}
],
"symlink_target": ""
} |
"""URL endpoint to allow Buildbot slaves to post data to the dashboard."""
import copy
import json
import logging
import math
import re
from google.appengine.api import datastore_errors
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from dashboard import math_utils
from dashboard import post_data_handler
from dashboard.common import datastore_hooks
from dashboard.models import graph_data
_TASK_QUEUE_NAME = 'new-points-queue'
# Number of rows to process per task queue task. This limits the task size
# and execution time (Limits: 100KB object size and 10 minutes execution time).
_TASK_QUEUE_SIZE = 32
# Max length for a Row property name.
_MAX_COLUMN_NAME_LENGTH = 25
# Maximum length of a value for a string property.
_STRING_COLUMN_MAX_LENGTH = 400
# Maximum number of properties for a Row.
_MAX_NUM_COLUMNS = 30
# Maximum length for a test path. This limit is required because the test path
# used as the string ID for TestContainer (the parent in the datastore for Row
# entities), and datastore imposes a maximum string ID length.
_MAX_TEST_PATH_LENGTH = 500
class BadRequestError(Exception):
"""An error indicating that a 400 response status should be returned."""
pass
class AddPointHandler(post_data_handler.PostDataHandler):
"""URL endpoint to post data to the dashboard."""
def post(self):
"""Validates data parameter and add task to queue to process points.
The row data comes from a "data" parameter, which is a JSON encoding of a
list of dictionaries, each of which represents one performance result
(one point in a graph) and associated data.
[
{
"master": "ChromiumPerf",
"bot": "xp-release-dual-core",
"test": "dromaeo/dom/modify",
"revision": 123456789,
"value": 24.66,
"error": 2.33,
"units": "ms",
"supplemental_columns": {
"d_median": 24234.12,
"d_mean": 23.553,
"r_webkit": 423340,
...
},
...
},
...
]
In general, the required fields are "master", "bot", "test" (which together
form the test path which identifies the series that this point belongs to),
and "revision" and "value", which are the X and Y values for the point.
This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),
the first producer of which is Telemetry. Telemetry provides lightweight
serialization of values it produces, as JSON. If a dashboard JSON object is
passed, it will be a single dict rather than a list, with the test,
value, error, and units fields replaced by a chart_data field containing a
Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is
processed by converting it into rows (which can be viewed as Dashboard JSON
v0).
{
"master": "ChromiumPerf",
<other row fields>,
"chart_data": {
"foo": {
"bar": {
"type": "scalar",
"name": "foo.bar",
"units": "ms",
"value": 4.2,
},
"summary": {
"type": "list_of_scalar_values",
"name": "foo",
"units": "ms",
"values": [4.2, 5.7, 6.8],
"std": 1.30512,
},
},
}
Request parameters:
data: JSON encoding of a list of dictionaries.
Outputs:
Empty 200 response with if successful,
200 response with warning message if optional data is invalid,
403 response with error message if sender IP is not white-listed,
400 response with error message if required data is invalid.
500 with error message otherwise.
"""
datastore_hooks.SetPrivilegedRequest()
if not self._CheckIpAgainstWhitelist():
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return
data_str = self.request.get('data')
if not data_str:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
self.ReportError('Missing "data" parameter.', status=400)
return
self.AddData(data_str)
def AddData(self, data_str):
try:
data = json.loads(data_str)
except ValueError:
self.ReportError('Invalid JSON string.', status=400)
return
logging.info('Received data: %s', data)
try:
if type(data) is dict:
if data.get('chart_data'):
data = _DashboardJsonToRawRows(data)
if not data:
return # No data to add, bail out.
else:
self.ReportError(
'Data should be a list of rows or a Dashboard JSON v1.0 dict.',
status=400)
return
if data:
# We only need to validate the row ID for one point, since all points
# being handled by this upload should have the same row ID.
test_map = _ConstructTestPathMap(data)
_ValidateRowId(data[0], test_map)
for row_dict in data:
ValidateRowDict(row_dict)
_AddTasks(data)
except BadRequestError as error:
# If any of the data was invalid, abort immediately and return an error.
self.ReportError(error.message, status=400)
def _DashboardJsonToRawRows(dash_json_dict):
"""Formats a Dashboard JSON dict as a list of row dicts.
For the dashboard to begin accepting the Telemetry Dashboard JSON format
as per go/telemetry-json, this function chunks a Dashboard JSON literal
into rows and passes the resulting list to _AddTasks.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A list of dicts, each of which represents a point.
Raises:
AssertionError: The given argument wasn't a dict.
BadRequestError: The content of the input wasn't valid.
"""
assert type(dash_json_dict) is dict
# A Dashboard JSON dict should at least have all charts coming from the
# same master, bot and rev. It can contain multiple charts, however.
if not dash_json_dict.get('master'):
raise BadRequestError('No master name given.')
if not dash_json_dict.get('bot'):
raise BadRequestError('No bot name given.')
if not dash_json_dict.get('point_id'):
raise BadRequestError('No point_id number given.')
if not dash_json_dict.get('chart_data'):
raise BadRequestError('No chart data given.')
test_suite_name = _TestSuiteName(dash_json_dict)
chart_data = dash_json_dict.get('chart_data', {})
charts = chart_data.get('charts', {})
if not charts:
return [] # No charts implies no data to add.
# Links to about:tracing traces are listed under 'trace'; if they
# exist copy them to a separate dictionary and delete from the chartjson
# so that we don't try to process them as data points.
tracing_links = None
if 'trace' in charts:
tracing_links = charts['trace'].copy()
del charts['trace']
row_template = _MakeRowTemplate(dash_json_dict)
benchmark_description = chart_data.get('benchmark_description', '')
trace_rerun_options = dict(chart_data.get('trace_rerun_options', []))
is_ref = bool(dash_json_dict.get('is_ref'))
rows = []
for chart in charts:
for trace in charts[chart]:
# Need to do a deep copy here so we don't copy a_tracing_uri data.
row = copy.deepcopy(row_template)
specific_vals = _FlattenTrace(
test_suite_name, chart, trace, charts[chart][trace], is_ref,
tracing_links, benchmark_description)
# Telemetry may validly produce rows that represent a value of NaN. To
# avoid getting into messy situations with alerts, we do not add such
# rows to be processed.
if not (math.isnan(specific_vals['value']) or
math.isnan(specific_vals['error'])):
if specific_vals['tracing_uri']:
row['supplemental_columns']['a_tracing_uri'] = specific_vals[
'tracing_uri']
if trace_rerun_options:
row['supplemental_columns']['a_trace_rerun_options'] = (
trace_rerun_options)
row.update(specific_vals)
rows.append(row)
return rows
def _TestSuiteName(dash_json_dict):
"""Extracts a test suite name from Dashboard JSON.
The dashboard JSON may contain a field "test_suite_name". If this is not
present or it is None, the dashboard will fall back to using "benchmark_name"
in the "chart_data" dict.
"""
if dash_json_dict.get('test_suite_name'):
return dash_json_dict['test_suite_name']
try:
return dash_json_dict['chart_data']['benchmark_name']
except KeyError as e:
raise BadRequestError('Could not find test suite name. ' + e.message)
def _AddTasks(data):
"""Puts tasks on queue for adding data.
Args:
data: A list of dictionaries, each of which represents one point.
"""
task_list = []
for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE):
task_list.append(taskqueue.Task(
url='/add_point_queue',
params={'data': json.dumps(data_sublist)}))
queue = taskqueue.Queue(_TASK_QUEUE_NAME)
for task_sublist in _Chunk(task_list, taskqueue.MAX_TASKS_PER_ADD):
# Calling get_result waits for all tasks to be added. It's possible that
# this is different, and maybe faster, than just calling queue.add.
queue.add_async(task_sublist).get_result()
def _Chunk(items, chunk_size):
"""Breaks a long list into sub-lists of a particular size."""
chunks = []
for i in range(0, len(items), chunk_size):
chunks.append(items[i:i + chunk_size])
return chunks
def _MakeRowTemplate(dash_json_dict):
"""Produces a template for rows created from a Dashboard JSON v1.0 dict.
_DashboardJsonToRawRows adds metadata fields to every row that it creates.
These include things like master, bot, point ID, versions, and other
supplementary data. This method produces a dict containing this metadata
to which row-specific information (like value and error) can be added.
Some metadata needs to be transformed to conform to the v0 format, and this
method is also responsible for that transformation.
Some validation is deferred until after the input is converted to a list
of row dicts, since revision format correctness is checked on a per-point
basis.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A dict containing data to include in each row dict that is created from
|dash_json_dict|.
"""
row_template = dash_json_dict.copy()
del row_template['chart_data']
del row_template['point_id']
row_template['revision'] = dash_json_dict['point_id']
annotations = row_template['supplemental']
versions = row_template['versions']
del row_template['supplemental']
del row_template['versions']
row_template['supplemental_columns'] = {}
supplemental = row_template['supplemental_columns']
for annotation in annotations:
supplemental['a_' + annotation] = annotations[annotation]
for version in versions:
supplemental['r_' + version] = versions[version]
return row_template
def _FlattenTrace(test_suite_name, chart_name, trace_name, trace,
is_ref=False, tracing_links=None, benchmark_description=''):
"""Takes a trace dict from dashboard JSON and readies it for display.
Traces can be either scalars or lists; if scalar we take the value directly;
if list we average the values and compute their standard deviation. We also
extract fields that are normally part of v0 row dicts that are uploaded
using add_point but are actually part of traces in the v1.0 format.
Args:
test_suite_name: The name of the test suite (benchmark).
chart_name: The name of the chart to which this trace belongs.
trace_name: The name of the passed trace.
trace: A trace dict extracted from a dashboard JSON chart.
is_ref: A boolean which indicates whether this trace comes from a
reference build.
tracing_links: A dictionary mapping trace names to about:tracing trace
urls in cloud storage
benchmark_description: A string documenting the benchmark suite to which
this trace belongs.
Returns:
A dict containing units, value, and error for this trace.
Raises:
BadRequestError: The data wasn't valid.
"""
if '@@' in chart_name:
tir_label, chart_name = chart_name.split('@@')
chart_name = chart_name + '/' + tir_label
value, error = _ExtractValueAndError(trace)
# If there is a link to an about:tracing trace in cloud storage for this
# test trace_name, cache it.
tracing_uri = None
if (tracing_links and
trace_name in tracing_links and
'cloud_url' in tracing_links[trace_name]):
tracing_uri = tracing_links[trace_name]['cloud_url'].replace('\\/', '/')
trace_name = _EscapeName(trace_name)
if trace_name == 'summary':
subtest_name = chart_name
else:
subtest_name = chart_name + '/' + trace_name
name = test_suite_name + '/' + subtest_name
if trace_name == 'summary' and is_ref:
name += '/ref'
elif trace_name != 'summary' and is_ref:
name += '_ref'
row_dict = {
'test': name,
'value': value,
'error': error,
'units': trace['units'],
'tracing_uri': tracing_uri,
'benchmark_description': benchmark_description,
}
if 'improvement_direction' in trace:
improvement_direction_str = trace['improvement_direction']
if improvement_direction_str is None:
raise BadRequestError('improvement_direction must not be None')
row_dict['higher_is_better'] = _ImprovementDirectionToHigherIsBetter(
improvement_direction_str)
return row_dict
def _ExtractValueAndError(trace):
"""Returns the value and measure of error from a chartjson trace dict.
Args:
trace: A dict that has one "result" from a performance test, e.g. one
"value" in a Telemetry test, with the keys "trace_type", "value", etc.
Returns:
A pair (value, error) where |value| is a float and |error| is some measure
of variance used to show error bars; |error| could be None.
Raises:
BadRequestError: Data format was invalid.
"""
trace_type = trace.get('type')
if trace_type == 'scalar':
value = trace.get('value')
if value is None and trace.get('none_value_reason'):
return float('nan'), 0
try:
return float(value), 0
except:
raise BadRequestError('Expected scalar value, got: %r' % value)
if trace_type == 'list_of_scalar_values':
values = trace.get('values')
if not isinstance(values, list) and values is not None:
# Something else (such as a single scalar, or string) was given.
raise BadRequestError('Expected list of scalar values, got: %r' % values)
if not values or None in values:
# None was included or values is None; this is not an error if there
# is a reason.
if trace.get('none_value_reason'):
return float('nan'), float('nan')
raise BadRequestError('Expected list of scalar values, got: %r' % values)
if not all(_IsNumber(v) for v in values):
raise BadRequestError('Non-number found in values list: %r' % values)
value = math_utils.Mean(values)
std = trace.get('std')
if std is not None:
error = std
else:
error = math_utils.StandardDeviation(values)
return value, error
if trace_type == 'histogram':
return _GeomMeanAndStdDevFromHistogram(trace)
raise BadRequestError('Invalid value type in chart object: %r' % trace_type)
def _IsNumber(v):
return isinstance(v, float) or isinstance(v, int) or isinstance(v, long)
def _EscapeName(name):
"""Escapes a trace name so it can be stored in a row.
Args:
name: A string representing a name.
Returns:
An escaped version of the name.
"""
return re.sub(r'[\:|=/#&,]', '_', name)
def _GeomMeanAndStdDevFromHistogram(histogram):
"""Generates the geom. mean and std. dev. for a histogram.
A histogram is a collection of numerical buckets with associated
counts; a bucket can either represent a number of instances of a single
value ('low'), or from within a range of values (in which case 'high' will
specify the upper bound). We compute the statistics by treating the
histogram analogously to a list of individual values, where the counts tell
us how many of each value there are.
Args:
histogram: A histogram dict with a list 'buckets' of buckets.
Returns:
The geometric mean and standard deviation of the given histogram.
"""
# Note: This code comes originally from
# build/scripts/common/chromium_utils.py and was used initially for
# processing histogram results on the buildbot side previously.
if 'buckets' not in histogram:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return 0.0, 0.0
count = 0
sum_of_logs = 0
for bucket in histogram['buckets']:
if 'high' in bucket:
bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
else:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
bucket['mean'] = bucket['low']
if bucket['mean'] > 0:
sum_of_logs += math.log(bucket['mean']) * bucket['count']
count += bucket['count']
if count == 0:
return 0.0, 0.0
sum_of_squares = 0
geom_mean = math.exp(sum_of_logs / count)
for bucket in histogram['buckets']:
if bucket['mean'] > 0:
sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
return geom_mean, math.sqrt(sum_of_squares / count)
def _ImprovementDirectionToHigherIsBetter(improvement_direction_str):
"""Converts an improvement direction string to a higher_is_better boolean.
Args:
improvement_direction_str: a string, either 'up' or 'down'.
Returns:
A boolean expressing the appropriate higher_is_better value.
Raises:
BadRequestError: if improvement_direction_str is invalid.
"""
# If improvement_direction is provided, we want to use it. Otherwise, by not
# providing it we'll fall back to unit-info.json
# TODO(eakuefner): Fail instead of falling back after fixing crbug.com/459450.
if improvement_direction_str == 'up':
return True
elif improvement_direction_str == 'down':
return False
else:
raise BadRequestError('Invalid improvement direction string: ' +
improvement_direction_str)
def _ConstructTestPathMap(row_dicts):
"""Makes a mapping from test paths to last added revision."""
last_added_revision_keys = []
for row in row_dicts:
if not ('master' in row and 'bot' in row and 'test' in row):
continue
path = '%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/'))
if len(path) > _MAX_TEST_PATH_LENGTH:
continue
last_added_revision_keys.append(ndb.Key('LastAddedRevision', path))
try:
last_added_revision_entities = ndb.get_multi(last_added_revision_keys)
except datastore_errors.BadRequestError:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
logging.warn('Datastore BadRequestError when getting %s',
repr(last_added_revision_keys))
return {}
return {r.key.string_id(): r.revision
for r in last_added_revision_entities if r is not None}
def ValidateRowDict(row):
"""Checks all fields in the input dictionary.
Args:
row: A dictionary which represents one point.
Raises:
BadRequestError: The input was not valid.
"""
required_fields = ['master', 'bot', 'test']
for field in required_fields:
if field not in row:
raise BadRequestError('No "%s" field in row dict.' % field)
_ValidateMasterBotTest(row['master'], row['bot'], row['test'])
GetAndValidateRowProperties(row)
def _ValidateMasterBotTest(master, bot, test):
"""Validates the master, bot, and test properties of a row dict."""
# Trailing and leading slashes in the test name are ignored.
# The test name must consist of at least a test suite plus sub-test.
test = test.strip('/')
if '/' not in test:
raise BadRequestError('Test name must have more than one part.')
if len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS:
raise BadRequestError('Invalid test name: %s' % test)
# The master and bot names have just one part.
if '/' in master or '/' in bot:
raise BadRequestError('Illegal slash in master or bot name.')
_ValidateTestPath('%s/%s/%s' % (master, bot, test))
def _ValidateTestPath(test_path):
"""Checks whether all the parts of the test path are valid."""
# A test with a test path length over the max key length shouldn't be
# created, since the test path is used in TestContainer keys.
if len(test_path) > _MAX_TEST_PATH_LENGTH:
raise BadRequestError('Test path too long: %s' % test_path)
# Stars are reserved for test path patterns, so they can't be used in names.
if '*' in test_path:
raise BadRequestError('Illegal asterisk in test name.')
for name in test_path.split('/'):
_ValidateTestPathPartName(name)
def _ValidateTestPathPartName(name):
"""Checks whether a Master, Bot or TestMetadata name is OK."""
# NDB Datastore doesn't allow key names to start and with "__" and "__".
if name.startswith('__') and name.endswith('__'):
raise BadRequestError(
'Invalid name: "%s". Names cannot start and end with "__".' % name)
def _ValidateRowId(row_dict, test_map):
"""Checks whether the ID for a Row is OK.
Args:
row_dict: A dictionary with new point properties, including "revision".
test_map: A dictionary mapping test paths to the last previously added
revision for each test.
Raises:
BadRequestError: The revision is not acceptable for some reason.
"""
row_id = GetAndValidateRowId(row_dict)
# Get the last added revision number for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_row_id = test_map.get(test_path)
if not last_row_id:
# Could be first point in test.
logging.warning('Test %s has no last added revision entry.', test_path)
return
allow_jump = (
master.endswith('Internal') or
(master.endswith('QA') and bot.startswith('release-tests-')))
if not _IsAcceptableRowId(row_id, last_row_id, allow_jump=allow_jump):
raise BadRequestError(
'Invalid ID (revision) %d; compared to previous ID %s, it was larger '
'or smaller by too much.' % (row_id, last_row_id))
def _IsAcceptableRowId(row_id, last_row_id, allow_jump=False):
"""Checks whether the given row id (aka revision) is not too large or small.
For each data series (i.e. TestMetadata entity), we assume that row IDs are
monotonically increasing. On a given chart, points are sorted by these
row IDs. This way, points can arrive out of order but still be shown
correctly in the chart.
However, sometimes a bot might start to use a different *type* of row ID;
for example it might change from revision numbers or build numbers to
timestamps, or from timestamps to build numbers. This causes a lot of
problems, including points being put out of order.
If a sender of data actually wants to switch to a different type of
row ID, it would be much cleaner for them to start sending it under a new
chart name.
Args:
row_id: The proposed Row entity id (usually sent as "revision")
last_row_id: The previous Row id, or None if there were none previous.
Returns:
True if acceptable, False otherwise.
"""
if last_row_id is None:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return True
if row_id <= 0:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return False
# Too big of a decrease.
if row_id < 0.5 * last_row_id:
return False
# TODO(perezju): We temporarily allow for a big jump on special cased bots,
# while we migrate from using commit position to timestamp as row id.
# The jump is only allowed into a timestamp falling within Aug-Dec 2016.
# This special casing should be removed after finishing the migration.
if allow_jump and 1470009600 < row_id < 1483228800:
return True
# Too big of an increase.
if row_id > 2 * last_row_id:
return False
return True
def GetAndValidateRowId(row_dict):
"""Returns the integer ID for a new Row.
This method is also responsible for validating the input fields related
to making the new row ID.
Args:
row_dict: A dictionary obtained from the input JSON.
Returns:
An integer row ID.
Raises:
BadRequestError: The input wasn't formatted properly.
"""
if 'revision' not in row_dict:
raise BadRequestError('Required field "revision" missing.')
try:
return int(row_dict['revision'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "revision", should be numerical.')
def GetAndValidateRowProperties(row):
"""From the object received, make a dictionary of properties for a Row.
This includes the default "value" and "error" columns as well as all
supplemental columns, but it doesn't include "revision", and it doesn't
include input fields that are properties of the parent TestMetadata, such as
"units".
This method is responsible for validating all properties that are to be
properties of the new Row.
Args:
row: A dictionary obtained from the input JSON.
Returns:
A dictionary of the properties and property values to set when creating
a Row. This will include "value" and "error" as well as all supplemental
columns.
Raises:
BadRequestError: The properties weren't formatted correctly.
"""
columns = {}
# Value and error must be floating point numbers.
if 'value' not in row:
raise BadRequestError('No "value" given.')
try:
columns['value'] = float(row['value'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "value", should be numerical.')
if 'error' in row:
try:
error = float(row['error'])
columns['error'] = error
except (ValueError, TypeError):
logging.warn('Bad value for "error".')
columns.update(_GetSupplementalColumns(row))
return columns
def _GetSupplementalColumns(row):
"""Gets a dict of supplemental columns.
If any columns are invalid, a warning is logged and they just aren't included,
but no exception is raised.
Individual rows may specify up to _MAX_NUM_COLUMNS extra data, revision,
and annotation columns. These columns must follow formatting rules for
their type. Invalid columns are dropped with an error log, but the valid
data will still be graphed.
Args:
row: A dict, possibly with the key "supplemental_columns", the value of
which should be a dict.
Returns:
A dict of valid supplemental columns.
"""
columns = {}
for (name, value) in row.get('supplemental_columns', {}).iteritems():
# Don't allow too many columns
if len(columns) == _MAX_NUM_COLUMNS:
logging.warn('Too many columns, some being dropped.')
break
value = _CheckSupplementalColumn(name, value)
if value:
columns[name] = value
return columns
def _CheckSupplementalColumn(name, value):
"""Returns a possibly modified value for a supplemental column, or None."""
# Check length of column name.
name = str(name)
if len(name) > _MAX_COLUMN_NAME_LENGTH:
logging.warn('Supplemental column name too long.')
return None
# The column name has a prefix which indicates type of value.
if name[:2] not in ('d_', 'r_', 'a_'):
logging.warn('Bad column name "%s", invalid prefix.', name)
return None
# The d_ prefix means "data column", intended to hold numbers.
if name.startswith('d_'):
try:
value = float(value)
except (ValueError, TypeError):
logging.warn('Bad value for column "%s", should be numerical.', name)
return None
# The r_ prefix means "revision", and the value should look like a number,
# a version number, or a git commit hash.
if name.startswith('r_'):
revision_patterns = [
r'^\d+$',
r'^\d+\.\d+\.\d+\.\d+$',
r'^[A-Fa-f0-9]{40}$',
]
if (not value or len(str(value)) > _STRING_COLUMN_MAX_LENGTH or
not any(re.match(p, str(value)) for p in revision_patterns)):
logging.warn('Bad value for revision column "%s".', name)
return None
value = str(value)
if name.startswith('a_'):
# Annotation column, should be a short string.
if len(str(value)) > _STRING_COLUMN_MAX_LENGTH:
logging.warn('Value for "%s" too long, max length is %d.',
name, _STRING_COLUMN_MAX_LENGTH)
return None
return value
| {
"content_hash": "48644d101b65edb90c7f2a85951191f1",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 80,
"avg_line_length": 34.33695652173913,
"alnum_prop": 0.6759171327072562,
"repo_name": "benschmaus/catapult",
"id": "117611f8860efe6afaa189a5e7de2839954f65e3",
"size": "28594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/dashboard/add_point.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43486"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "58279"
},
{
"name": "HTML",
"bytes": "11801772"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6141932"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import contextlib
import sys
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
NOTIFIER = ('neutron.plugins.openvswitch.'
'ovs_neutron_plugin.AgentNotifierApi')
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class CreateAgentConfigMap(base.BaseTestCase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(ovs_neutron_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_map_fails_for_invalid_tunnel_config(self):
# An ip address is required for tunneling but there is no default,
# verify this for both gre and vxlan tunnels.
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_enable_tunneling(self):
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('tunnel_types', None, group='AGENT')
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE])
def test_create_agent_config_map_fails_no_local_ip(self):
# An ip address is required for tunneling but there is no default
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_for_invalid_tunnel_type(self):
cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_multiple_tunnel_types(self):
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE,
p_const.TYPE_VXLAN], group='AGENT')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'],
[p_const.TYPE_GRE, p_const.TYPE_VXLAN])
class TestOvsNeutronAgent(base.BaseTestCase):
def setUp(self):
super(TestOvsNeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
with contextlib.nested(
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_ancillary_bridges',
return_value=[]),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)):
self.agent = ovs_neutron_agent.OVSNeutronAgent(**kwargs)
self.agent.tun_br = mock.Mock()
self.agent.sg_agent = mock.Mock()
def _mock_port_bound(self, ofport=None, new_local_vlan=None,
old_local_vlan=None):
port = mock.Mock()
port.ofport = ofport
net_uuid = 'my-net-uuid'
if old_local_vlan is not None:
self.agent.local_vlan_map[net_uuid] = (
ovs_neutron_agent.LocalVLANMapping(
old_local_vlan, None, None, None))
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute', return_value=True),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val', return_value=str(old_local_vlan)),
mock.patch.object(self.agent.int_br, 'delete_flows')
) as (set_ovs_db_func, get_ovs_db_func, delete_flows_func):
self.agent.port_bound(port, net_uuid, 'local', None, None)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if new_local_vlan != old_local_vlan:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(new_local_vlan))
if ofport != -1:
delete_flows_func.assert_called_once_with(in_port=port.ofport)
else:
self.assertFalse(delete_flows_func.called)
else:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(delete_flows_func.called)
def test_port_bound_deletes_flows_for_valid_ofport(self):
self._mock_port_bound(ofport=1, new_local_vlan=1)
def test_port_bound_ignores_flows_for_invalid_ofport(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1)
def test_port_bound_does_not_rewire_if_already_bound(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1)
def _test_port_dead(self, cur_tag=None):
port = mock.Mock()
port.ofport = 1
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute', return_value=True),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val', return_value=cur_tag),
mock.patch.object(self.agent.int_br, 'add_flow')
) as (set_ovs_db_func, get_ovs_db_func, add_flow_func):
self.agent.port_dead(port)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if cur_tag == ovs_neutron_agent.DEAD_VLAN_TAG:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(add_flow_func.called)
else:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(ovs_neutron_agent.DEAD_VLAN_TAG))
add_flow_func.assert_called_once_with(
priority=2, in_port=port.ofport, actions="drop")
def test_port_dead(self):
self._test_port_dead()
def test_port_dead_with_port_already_dead(self):
self._test_port_dead(ovs_neutron_agent.DEAD_VLAN_TAG)
def mock_scan_ports(self, vif_port_set=None, registered_ports=None,
updated_ports=None, port_tags_dict=None):
if port_tags_dict is None: # Because empty dicts evaluate as False.
port_tags_dict = {}
with contextlib.nested(
mock.patch.object(self.agent.int_br, 'get_vif_port_set',
return_value=vif_port_set),
mock.patch.object(self.agent.int_br, 'get_port_tag_dict',
return_value=port_tags_dict)
):
return self.agent.scan_ports(registered_ports, updated_ports)
def test_scan_ports_returns_current_only_for_unchanged_ports(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 3])
expected = {'current': vif_port_set}
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def test_scan_ports_returns_port_changes(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def _test_scan_ports_with_updated_ports(self, updated_ports):
vif_port_set = set([1, 3, 4])
registered_ports = set([1, 2, 4])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_finds_known_updated_ports(self):
self._test_scan_ports_with_updated_ports(set([4]))
def test_scan_ports_ignores_unknown_updated_ports(self):
# the port '5' was not seen on current ports. Hence it has either
# never been wired or already removed and should be ignored
self._test_scan_ports_with_updated_ports(set([4, 5]))
def test_scan_ports_ignores_updated_port_if_removed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
updated_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([1]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_no_vif_changes_returns_updated_port_only(self):
vif_port_set = set([1, 2, 3])
registered_ports = set([1, 2, 3])
updated_ports = set([2])
expected = dict(current=vif_port_set, updated=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_update_ports_returns_changed_vlan(self):
br = ovs_lib.OVSBridge('br-int', 'sudo')
mac = "ca:fe:de:ad:be:ef"
port = ovs_lib.VifPort(1, 1, 1, mac, br)
lvm = ovs_neutron_agent.LocalVLANMapping(
1, '1', None, 1, {port.vif_id: port})
local_vlan_map = {'1': lvm}
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
port_tags_dict = {1: []}
expected = dict(
added=set([3]), current=vif_port_set,
removed=set([2]), updated=set([1])
)
with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map):
actual = self.mock_scan_ports(
vif_port_set, registered_ports, port_tags_dict=port_tags_dict)
self.assertEqual(expected, actual)
def test_treat_devices_added_returns_true_for_missing_device(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
side_effect=Exception()),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=mock.Mock())):
self.assertTrue(self.agent.treat_devices_added_or_updated([{}]))
def _mock_treat_devices_added_updated(self, details, port, func_name):
"""Mock treat devices added or updated.
:param details: the details to return for the device
:param port: the port that get_vif_port_by_id should return
:param func_name: the function that should be called
:returns: whether the named function was called
"""
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
return_value=details),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=port),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, func_name)
) as (get_dev_fn, get_vif_func, upd_dev_up, upd_dev_down, func):
self.assertFalse(self.agent.treat_devices_added_or_updated([{}]))
return func.called
def test_treat_devices_added_updated_ignores_invalid_ofport(self):
port = mock.Mock()
port.ofport = -1
self.assertFalse(self._mock_treat_devices_added_updated(
mock.MagicMock(), port, 'port_dead'))
def test_treat_devices_added_updated_marks_unknown_port_as_dead(self):
port = mock.Mock()
port.ofport = 1
self.assertTrue(self._mock_treat_devices_added_updated(
mock.MagicMock(), port, 'port_dead'))
def test_treat_devices_added_does_not_process_missing_port(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details'),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=None)
) as (get_dev_fn, get_vif_func):
self.assertFalse(get_dev_fn.called)
def test_treat_devices_added__updated_updates_known_port(self):
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
self.assertTrue(self._mock_treat_devices_added_updated(
details, mock.Mock(), 'treat_vif_port'))
def test_treat_devices_added_updated_put_port_down(self):
fake_details_dict = {'admin_state_up': False,
'port_id': 'xxx',
'device': 'xxx',
'network_id': 'yyy',
'physical_network': 'foo',
'segmentation_id': 'bar',
'network_type': 'baz'}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
return_value=fake_details_dict),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=mock.MagicMock()),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, 'treat_vif_port')
) as (get_dev_fn, get_vif_func, upd_dev_up,
upd_dev_down, treat_vif_port):
self.assertFalse(self.agent.treat_devices_added_or_updated([{}]))
self.assertTrue(treat_vif_port.called)
self.assertTrue(upd_dev_down.called)
def test_treat_devices_removed_returns_true_for_missing_device(self):
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
side_effect=Exception()):
self.assertTrue(self.agent.treat_devices_removed([{}]))
def _mock_treat_devices_removed(self, port_exists):
details = dict(exists=port_exists)
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=details):
with mock.patch.object(self.agent, 'port_unbound') as port_unbound:
self.assertFalse(self.agent.treat_devices_removed([{}]))
self.assertTrue(port_unbound.called)
def test_treat_devices_removed_unbinds_port(self):
self._mock_treat_devices_removed(True)
def test_treat_devices_removed_ignores_missing_port(self):
self._mock_treat_devices_removed(False)
def _test_process_network_ports(self, port_info):
with contextlib.nested(
mock.patch.object(self.agent.sg_agent, "setup_port_filters"),
mock.patch.object(self.agent, "treat_devices_added_or_updated",
return_value=False),
mock.patch.object(self.agent, "treat_devices_removed",
return_value=False)
) as (setup_port_filters, device_added_updated, device_removed):
self.assertFalse(self.agent.process_network_ports(port_info))
setup_port_filters.assert_called_once_with(
port_info['added'], port_info.get('updated', set()))
device_added_updated.assert_called_once_with(
port_info['added'] | port_info.get('updated', set()))
device_removed.assert_called_once_with(port_info['removed'])
def test_process_network_ports(self):
self._test_process_network_ports(
{'current': set(['tap0']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_process_network_port_with_updated_ports(self):
self._test_process_network_ports(
{'current': set(['tap0', 'tap1']),
'updated': set(['tap1', 'eth1']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent.int_br_device_count = 5
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertEqual(
self.agent.agent_state["configurations"]["devices"],
self.agent.int_br_device_count
)
def test_network_delete(self):
with contextlib.nested(
mock.patch.object(self.agent, "reclaim_local_vlan"),
mock.patch.object(self.agent.tun_br, "cleanup_tunnel_port")
) as (recl_fn, clean_tun_fn):
self.agent.network_delete("unused_context",
network_id="123")
self.assertFalse(recl_fn.called)
self.agent.local_vlan_map["123"] = "LVM object"
self.agent.network_delete("unused_context",
network_id="123")
self.assertFalse(clean_tun_fn.called)
recl_fn.assert_called_with("123")
def test_port_update(self):
port = {"id": "123",
"network_id": "124",
"admin_state_up": False}
self.agent.port_update("unused_context",
port=port,
network_type="vlan",
segmentation_id="1",
physical_network="physnet")
self.assertEqual(set(['123']), self.agent.updated_ports)
def test_setup_physical_bridges(self):
with contextlib.nested(
mock.patch.object(ip_lib, "device_exists"),
mock.patch.object(sys, "exit"),
mock.patch.object(utils, "execute"),
mock.patch.object(ovs_lib.OVSBridge, "remove_all_flows"),
mock.patch.object(ovs_lib.OVSBridge, "add_flow"),
mock.patch.object(ovs_lib.OVSBridge, "add_port"),
mock.patch.object(ovs_lib.OVSBridge, "delete_port"),
mock.patch.object(self.agent.int_br, "add_port"),
mock.patch.object(self.agent.int_br, "delete_port"),
mock.patch.object(ip_lib.IPWrapper, "add_veth"),
mock.patch.object(ip_lib.IpLinkCommand, "delete"),
mock.patch.object(ip_lib.IpLinkCommand, "set_up"),
mock.patch.object(ip_lib.IpLinkCommand, "set_mtu")
) as (devex_fn, sysexit_fn, utilsexec_fn, remflows_fn, ovs_addfl_fn,
ovs_addport_fn, ovs_delport_fn, br_addport_fn,
br_delport_fn, addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn):
devex_fn.return_value = True
parent = mock.MagicMock()
parent.attach_mock(utilsexec_fn, 'utils_execute')
parent.attach_mock(linkdel_fn, 'link_delete')
parent.attach_mock(addveth_fn, 'add_veth')
addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"),
ip_lib.IPDevice("phy-br-eth1"))
ovs_addport_fn.return_value = "int_ofport"
br_addport_fn.return_value = "phys_veth"
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [mock.call.link_delete(),
mock.call.utils_execute(['/sbin/udevadm',
'settle',
'--timeout=10']),
mock.call.add_veth('int-br-eth',
'phy-br-eth')]
parent.assert_has_calls(expected_calls, any_order=False)
self.assertEqual(self.agent.int_ofports["physnet1"],
"phys_veth")
self.assertEqual(self.agent.phys_ofports["physnet1"],
"int_ofport")
def test_port_unbound(self):
with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn:
self.agent.enable_tunneling = True
lvm = mock.Mock()
lvm.network_type = "gre"
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.local_vlan_map["netuid12345"] = lvm
self.agent.port_unbound("vif1", "netuid12345")
self.assertTrue(reclvl_fn.called)
reclvl_fn.called = False
lvm.vif_ports = {}
self.agent.port_unbound("vif1", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.port_unbound("vif3", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
def _check_ovs_vxlan_version(self, installed_usr_version,
installed_klm_version,
installed_kernel_version,
expecting_ok):
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_klm_version'
) as klm_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_usr_version'
) as usr_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_kernel_version'
) as kernel_cmd:
try:
klm_cmd.return_value = installed_klm_version
usr_cmd.return_value = installed_usr_version
kernel_cmd.return_value = installed_kernel_version
self.agent.tunnel_types = 'vxlan'
self.agent._check_ovs_version()
version_ok = True
except SystemExit as e:
self.assertEqual(e.code, 1)
version_ok = False
self.assertEqual(version_ok, expecting_ok)
def test_check_minimum_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
def test_check_future_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) + 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=True)
def test_check_fail_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) - 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=False)
def test_check_fail_no_version(self):
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(None, None,
min_kernel_ver, expecting_ok=False)
def test_check_fail_klm_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = OVS_LINUX_KERN_VERS_WITHOUT_VXLAN
install_ver = str(float(min_vxlan_ver) - 0.01)
self._check_ovs_vxlan_version(min_vxlan_ver, install_ver,
min_kernel_ver, expecting_ok=False)
def _prepare_l2_pop_ofports(self):
lvm1 = mock.Mock()
lvm1.network_type = 'gre'
lvm1.vlan = 'vlan1'
lvm1.segmentation_id = 'seg1'
lvm1.tun_ofports = set(['1'])
lvm2 = mock.Mock()
lvm2.network_type = 'gre'
lvm2.vlan = 'vlan2'
lvm2.segmentation_id = 'seg2'
lvm2.tun_ofports = set(['1', '2'])
self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2}
self.agent.tun_br_ofports = {'gre':
{'1.1.1.1': '1', '2.2.2.2': '2'}}
def test_fdb_ignore_network(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net3': {}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'delete_flows'),
mock.patch.object(self.agent, 'setup_tunnel_port'),
mock.patch.object(self.agent, 'cleanup_tunnel_port')
) as (add_flow_fn, del_flow_fn, add_tun_fn, clean_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_flow_fn.called)
self.assertFalse(add_tun_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(del_flow_fn.called)
self.assertFalse(clean_tun_fn.called)
def test_fdb_ignore_self(self):
self._prepare_l2_pop_ofports()
self.agent.local_ip = 'agent_ip'
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports':
{'agent_ip':
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent.tun_br,
"defer_apply_on") as defer_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(defer_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(defer_fn.called)
def test_fdb_add_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net1':
{'network_type': 'gre',
'segment_id': 'tun1',
'ports':
{'2.2.2.2':
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'mod_flow'),
mock.patch.object(self.agent.tun_br, 'setup_tunnel_port'),
) as (add_flow_fn, mod_flow_fn, add_tun_fn):
add_tun_fn.return_value = '2'
self.agent.fdb_add(None, fdb_entry)
add_flow_fn.assert_called_with(table=constants.UCAST_TO_TUN,
priority=2,
dl_vlan='vlan1',
dl_dst='mac',
actions='strip_vlan,'
'set_tunnel:seg1,output:2')
mod_flow_fn.assert_called_with(table=constants.FLOOD_TO_TUN,
dl_vlan='vlan1',
actions='strip_vlan,'
'set_tunnel:seg1,output:1,2')
def test_fdb_del_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports':
{'2.2.2.2':
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'mod_flow'),
mock.patch.object(self.agent.tun_br, 'delete_flows'),
) as (mod_flow_fn, del_flow_fn):
self.agent.fdb_remove(None, fdb_entry)
del_flow_fn.assert_called_with(table=constants.UCAST_TO_TUN,
dl_vlan='vlan2',
dl_dst='mac')
mod_flow_fn.assert_called_with(table=constants.FLOOD_TO_TUN,
dl_vlan='vlan2',
actions='strip_vlan,'
'set_tunnel:seg2,output:1')
def test_fdb_add_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net1':
{'network_type': 'gre',
'segment_id': 'tun1',
'ports': {'1.1.1.1': [['mac', 'ip']]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_flow'),
mock.patch.object(self.agent.tun_br, 'mod_flow'),
mock.patch.object(self.agent, 'setup_tunnel_port')
) as (add_flow_fn, mod_flow_fn, add_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
fdb_entry['net1']['ports']['10.10.10.10'] = [['mac', 'ip']]
self.agent.fdb_add(None, fdb_entry)
add_tun_fn.assert_called_with('gre-0a0a0a0a', '10.10.10.10', 'gre')
def test_fdb_del_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net2':
{'network_type': 'gre',
'segment_id': 'tun2',
'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'delete_flows'),
mock.patch.object(self.agent.tun_br, 'delete_port')
) as (del_flow_fn, del_port_fn):
self.agent.fdb_remove(None, fdb_entry)
del_port_fn.assert_called_once_with('gre-02020202')
def test_recl_lv_port_to_preserve(self):
self._prepare_l2_pop_ofports()
self.agent.l2_pop = True
self.agent.enable_tunneling = True
with mock.patch.object(
self.agent.tun_br, 'cleanup_tunnel_port'
) as clean_tun_fn:
self.agent.reclaim_local_vlan('net1')
self.assertFalse(clean_tun_fn.called)
def test_recl_lv_port_to_remove(self):
self._prepare_l2_pop_ofports()
self.agent.l2_pop = True
self.agent.enable_tunneling = True
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'delete_port'),
mock.patch.object(self.agent.tun_br, 'delete_flows')
) as (del_port_fn, del_flow_fn):
self.agent.reclaim_local_vlan('net2')
del_port_fn.assert_called_once_with('gre-02020202')
def test_daemon_loop_uses_polling_manager(self):
with mock.patch(
'neutron.agent.linux.polling.get_polling_manager') as mock_get_pm:
with mock.patch.object(self.agent, 'rpc_loop') as mock_loop:
self.agent.daemon_loop()
mock_get_pm.assert_called_with(True, 'sudo',
constants.DEFAULT_OVSDBMON_RESPAWN)
mock_loop.called_once()
def test_setup_tunnel_port_error_negative(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value='-1'),
mock.patch.object(ovs_neutron_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_error_fn):
ofport = self.agent.setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_setup_tunnel_port_error_not_int(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value=None),
mock.patch.object(ovs_neutron_agent.LOG, 'exception'),
mock.patch.object(ovs_neutron_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_exc_fn, log_error_fn):
ofport = self.agent.setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port)
log_exc_fn.assert_called_once_with(
_("ofport should have a value that can be "
"interpreted as an integer"))
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_tunnel_sync_with_ovs_plugin(self):
fake_tunnel_details = {'tunnels': [{'id': '42',
'ip_address': '100.101.102.103'}]}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync',
return_value=fake_tunnel_details),
mock.patch.object(self.agent, 'setup_tunnel_port')
) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn):
self.agent.tunnel_types = ['gre']
self.agent.tunnel_sync()
expected_calls = [mock.call('gre-42', '100.101.102.103', 'gre')]
setup_tunnel_port_fn.assert_has_calls(expected_calls)
def test_tunnel_sync_with_ml2_plugin(self):
fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync',
return_value=fake_tunnel_details),
mock.patch.object(self.agent, 'setup_tunnel_port')
) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn):
self.agent.tunnel_types = ['vxlan']
self.agent.tunnel_sync()
expected_calls = [mock.call('vxlan-64651f0f',
'100.101.31.15', 'vxlan')]
setup_tunnel_port_fn.assert_has_calls(expected_calls)
def test_tunnel_sync_invalid_ip_address(self):
fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'},
{'ip_address': '100.100.100.100'}]}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync',
return_value=fake_tunnel_details),
mock.patch.object(self.agent, 'setup_tunnel_port')
) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn):
self.agent.tunnel_types = ['vxlan']
self.agent.tunnel_sync()
setup_tunnel_port_fn.assert_called_once_with('vxlan-64646464',
'100.100.100.100',
'vxlan')
def test_tunnel_update(self):
kwargs = {'tunnel_ip': '10.10.10.10',
'tunnel_type': 'gre'}
self.agent.setup_tunnel_port = mock.Mock()
self.agent.enable_tunneling = True
self.agent.tunnel_types = ['gre']
self.agent.l2_pop = False
self.agent.tunnel_update(context=None, **kwargs)
expected_calls = [mock.call('gre-0a0a0a0a', '10.10.10.10', 'gre')]
self.agent.setup_tunnel_port.assert_has_calls(expected_calls)
class AncillaryBridgesTest(base.BaseTestCase):
def setUp(self):
super(AncillaryBridgesTest, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def _test_ancillary_bridges(self, bridges, ancillary):
device_ids = ancillary[:]
def pullup_side_effect(self, *args):
result = device_ids.pop(0)
return result
with contextlib.nested(
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.get_bridges',
return_value=bridges),
mock.patch(
'neutron.agent.linux.ovs_lib.get_bridge_external_bridge_id',
side_effect=pullup_side_effect)):
self.agent = ovs_neutron_agent.OVSNeutronAgent(**self.kwargs)
self.assertEqual(len(ancillary), len(self.agent.ancillary_brs))
if ancillary:
bridges = [br.br_name for br in self.agent.ancillary_brs]
for br in ancillary:
self.assertIn(br, bridges)
def test_ancillary_bridges_single(self):
bridges = ['br-int', 'br-ex']
self._test_ancillary_bridges(bridges, ['br-ex'])
def test_ancillary_bridges_none(self):
bridges = ['br-int']
self._test_ancillary_bridges(bridges, [])
def test_ancillary_bridges_multiple(self):
bridges = ['br-int', 'br-ex1', 'br-ex2']
self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2'])
| {
"content_hash": "741615918a155582d29cdd9f14bd4c82",
"timestamp": "",
"source": "github",
"line_count": 832,
"max_line_length": 79,
"avg_line_length": 47.55288461538461,
"alnum_prop": 0.5553028005257304,
"repo_name": "vijayendrabvs/hap",
"id": "13464ca547b17ed4ae8b5bd6bf601cfb0605ed43",
"size": "40227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8801288"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
My App 2: Hello with Bokeh plot, Jinja2 template, and Bootstrap
"""
from bokeh.plotting import figure
from bokeh.embed import components
from flask import (
Flask, request, render_template, abort, Response, redirect, url_for
)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
@app.route('/<xdata>', methods=['GET', 'POST'])
def hello(xdata=None):
if request.method == 'GET':
kwargs = {'title': 'hello'}
if xdata is not None:
try:
xdata = [float(x) for x in xdata.split(',')]
except ValueError:
pass
else:
plot = figure(title='squares from input')
plot.line(xdata, [x*x for x in xdata], legend='y=x^2')
plot_script, plot_div = components(plot)
kwargs.update(plot_script=plot_script, plot_div=plot_div)
return render_template('hello.html', **kwargs)
elif request.method == 'POST':
xdata = request.form.get('xdata')
comma_seperated = request.form.get('commaSeparatedCheck')
if not comma_seperated:
xdata = ','.join(xdata.split())
return redirect(url_for('hello', xdata=xdata))
abort(404)
abort(Response('Hello'))
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "c5f1ba9b3730ae153ed1ebd753cbc1f7",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 32.6,
"alnum_prop": 0.5774539877300614,
"repo_name": "thehackerwithin/berkeley",
"id": "674837423c867ca7af106d5a18b1c4907023d696",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_examples/flask/myapp-2/hello.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9196"
},
{
"name": "C++",
"bytes": "9944"
},
{
"name": "Dockerfile",
"bytes": "1068"
},
{
"name": "Fortran",
"bytes": "434"
},
{
"name": "Gnuplot",
"bytes": "240"
},
{
"name": "HTML",
"bytes": "1901059"
},
{
"name": "Jupyter Notebook",
"bytes": "23122238"
},
{
"name": "Makefile",
"bytes": "1416"
},
{
"name": "PostScript",
"bytes": "287518"
},
{
"name": "Python",
"bytes": "62059"
},
{
"name": "R",
"bytes": "5431"
},
{
"name": "Shell",
"bytes": "1493"
},
{
"name": "TeX",
"bytes": "51016"
}
],
"symlink_target": ""
} |
from ..distributions import MultivariateNormal
from ..likelihoods import _GaussianLikelihoodBase
from .marginal_log_likelihood import MarginalLogLikelihood
class ExactMarginalLogLikelihood(MarginalLogLikelihood):
"""
The exact marginal log likelihood (MLL) for an exact Gaussian process with a
Gaussian likelihood.
.. note::
This module will not work with anything other than a :obj:`~gpytorch.likelihoods.GaussianLikelihood`
and a :obj:`~gpytorch.models.ExactGP`. It also cannot be used in conjunction with
stochastic optimization.
:param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood for the model
:param ~gpytorch.models.ExactGP model: The exact GP model
Example:
>>> # model is a gpytorch.models.ExactGP
>>> # likelihood is a gpytorch.likelihoods.Likelihood
>>> mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
>>>
>>> output = model(train_x)
>>> loss = -mll(output, train_y)
>>> loss.backward()
"""
def __init__(self, likelihood, model):
if not isinstance(likelihood, _GaussianLikelihoodBase):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
def forward(self, function_dist, target, *params):
r"""
Computes the MLL given :math:`p(\mathbf f)` and :math:`\mathbf y`.
:param ~gpytorch.distributions.MultivariateNormal function_dist: :math:`p(\mathbf f)`
the outputs of the latent function (the :obj:`gpytorch.models.ExactGP`)
:param torch.Tensor target: :math:`\mathbf y` The target values
:rtype: torch.Tensor
:return: Exact MLL. Output shape corresponds to batch shape of the model/input data.
"""
if not isinstance(function_dist, MultivariateNormal):
raise RuntimeError("ExactMarginalLogLikelihood can only operate on Gaussian random variables")
# Get the log prob of the marginal distribution
output = self.likelihood(function_dist, *params)
res = output.log_prob(target)
# Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
for added_loss_term in self.model.added_loss_terms():
res = res.add(added_loss_term.loss(*params))
# Add log probs of priors on the (functions of) parameters
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum())
# Scale by the amount of data we have
num_data = target.size(-1)
return res.div_(num_data)
def pyro_factor(self, output, target, *params):
import pyro
mll = self(output, target, *params)
pyro.factor("gp_mll", mll)
return mll
| {
"content_hash": "ea50baf5e2918c54aee4c23b6401d3aa",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 108,
"avg_line_length": 42.220588235294116,
"alnum_prop": 0.667014977359805,
"repo_name": "jrg365/gpytorch",
"id": "f8444a55cd9f226fc505869bbe4d4f4bb533451d",
"size": "2895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpytorch/mlls/exact_marginal_log_likelihood.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6005"
},
{
"name": "C++",
"bytes": "242"
},
{
"name": "Python",
"bytes": "338860"
}
],
"symlink_target": ""
} |
from corehq.apps.sms.api import incoming as incoming_sms
from corehq.apps.twilio.models import TwilioBackend
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
EMPTY_RESPONSE = """<?xml version="1.0" encoding="UTF-8" ?>
<Response></Response>"""
@csrf_exempt
def sms_in(request):
if request.method == "POST":
message_sid = request.POST.get("MessageSid")
account_sid = request.POST.get("AccountSid")
from_ = request.POST.get("From")
to = request.POST.get("To")
body = request.POST.get("Body")
incoming_sms(
from_,
body,
TwilioBackend.get_api_id(),
backend_message_id=message_sid
)
return HttpResponse(EMPTY_RESPONSE)
else:
return HttpResponseBadRequest("POST Expected")
| {
"content_hash": "b1a9b829754a7835041b82cd8431ea36",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 60,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.648491879350348,
"repo_name": "puttarajubr/commcare-hq",
"id": "eeb2cb5adc462afec7644340ce5b030ca447a5c8",
"size": "862",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "corehq/apps/twilio/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
"""
iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform.
"""
import os
import six
from xml.etree import ElementTree as ETree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hnas_backend
from cinder.volume import utils
from cinder.volume import volume_types
HDS_HNAS_ISCSI_VERSION = '4.0.0'
LOG = logging.getLogger(__name__)
iSCSI_OPTS = [
cfg.StrOpt('hds_hnas_iscsi_config_file',
default='/opt/hds/hnas/cinder_iscsi_conf.xml',
help='Configuration file for HDS iSCSI cinder plugin')]
CONF = cfg.CONF
CONF.register_opts(iSCSI_OPTS)
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc',
'chap_enabled': 'True',
'ssh_port': '22'}
MAX_HNAS_ISCSI_TARGETS = 32
def factory_bend(drv_configs):
return hnas_backend.HnasBackend(drv_configs)
def _loc_info(loc):
"""Parse info from location string."""
LOG.info(_LI("Parse_loc: %s"), loc)
info = {}
tup = loc.split(',')
if len(tup) < 5:
info['id_lu'] = tup[0].split('.')
return info
info['id_lu'] = tup[2].split('.')
info['tgt'] = tup
return info
def _xml_read(root, element, check=None):
"""Read an xml element."""
try:
val = root.findtext(element)
LOG.info(_LI("%(element)s: %(val)s"),
{'element': element,
'val': val if element != 'password' else '***'})
if val:
return val.strip()
if check:
raise exception.ParameterNotFound(param=element)
return None
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_LE("XML exception reading parameter: %s"), element)
else:
LOG.info(_LI("XML exception reading parameter: %s"), element)
return None
def _read_config(xml_config_file):
"""Read hds driver specific xml config file."""
if not os.access(xml_config_file, os.R_OK):
msg = (_("Can't open config file: %s") % xml_config_file)
raise exception.NotFound(message=msg)
try:
root = ETree.parse(xml_config_file).getroot()
except Exception:
msg = (_("Error parsing config file: %s") % xml_config_file)
raise exception.ConfigNotFound(message=msg)
# mandatory parameters
config = {}
arg_prereqs = ['mgmt_ip0', 'username']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters
opt_parameters = ['hnas_cmd', 'ssh_enabled', 'chap_enabled',
'cluster_admin_ip0']
for req in opt_parameters:
config[req] = _xml_read(root, req)
if config['chap_enabled'] is None:
config['chap_enabled'] = HNAS_DEFAULT_CONFIG['chap_enabled']
if config['ssh_enabled'] == 'True':
config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', 'check')
config['ssh_port'] = _xml_read(root, 'ssh_port')
config['password'] = _xml_read(root, 'password')
if config['ssh_port'] is None:
config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
else:
# password is mandatory when not using SSH
config['password'] = _xml_read(root, 'password', 'check')
if config['hnas_cmd'] is None:
config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd']
config['hdp'] = {}
config['services'] = {}
# min one needed
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
if _xml_read(root, svc) is None:
continue
service = {'label': svc}
# none optional
for arg in ['volume_type', 'hdp', 'iscsi_ip']:
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
config['services'][service['volume_type']] = service
config['hdp'][service['hdp']] = service['hdp']
# at least one service required!
if config['services'].keys() is None:
raise exception.ParameterNotFound(param="No service found")
return config
class HDSISCSIDriver(driver.ISCSIDriver):
"""HDS HNAS volume driver.
Version 1.0.0: Initial driver version
Version 2.2.0: Added support to SSH authentication
Version 3.2.0: Added pool aware scheduling
Fixed concurrency errors
Version 3.3.0: Fixed iSCSI target limitation error
Version 4.0.0: Added manage/unmanage features
"""
def __init__(self, *args, **kwargs):
"""Initialize, read different config parameters."""
super(HDSISCSIDriver, self).__init__(*args, **kwargs)
self.driver_stats = {}
self.context = {}
self.configuration.append_config_values(iSCSI_OPTS)
self.config = _read_config(
self.configuration.hds_hnas_iscsi_config_file)
self.type = 'HNAS'
self.platform = self.type.lower()
LOG.info(_LI("Backend type: %s"), self.type)
self.bend = factory_bend(self.config)
def _array_info_get(self):
"""Get array parameters."""
out = self.bend.get_version(self.config['hnas_cmd'],
HDS_HNAS_ISCSI_VERSION,
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
inf = out.split()
return inf[1], 'hnas_' + inf[1], inf[6]
def _get_iscsi_info(self):
"""Validate array iscsi parameters."""
out = self.bend.get_iscsi_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
lines = out.split('\n')
# dict based on iSCSI portal ip addresses
conf = {}
for line in lines:
# only record up links
if 'CTL' in line and 'Up' in line:
inf = line.split()
(ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7])
conf[ip] = {}
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp
LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s",
{'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
return conf
def _get_service(self, volume):
"""Get the available service parameters
Get the available service parametersfor a given volume using its
type.
:param volume: dictionary volume reference
:return HDP related to the service
"""
label = utils.extract_host(volume['host'], level='pool')
LOG.info(_LI("Using service label: %s"), label)
if label in self.config['services'].keys():
svc = self.config['services'][label]
return svc['hdp']
else:
LOG.info(_LI("Available services: %s."),
self.config['services'].keys())
LOG.error(_LE("No configuration found for service: %s."), label)
raise exception.ParameterNotFound(param=label)
def _get_service_target(self, volume):
"""Get the available service parameters
Get the available service parameters for a given volume using
its type.
:param volume: dictionary volume reference
"""
hdp = self._get_service(volume)
info = _loc_info(volume['provider_location'])
(arid, lun_name) = info['id_lu']
evsid = self.bend.get_evs(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp)
svc_label = utils.extract_host(volume['host'], level='pool')
svc = self.config['services'][svc_label]
LOG.info(_LI("_get_service_target hdp: %s."), hdp)
LOG.info(_LI("config[services]: %s."), self.config['services'])
mapped, lunid, tgt = self.bend.check_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
lun_name, hdp)
LOG.info(_LI("Target is %(map)s! Targetlist = %(tgtl)s."),
{'map': "mapped" if mapped else "not mapped", 'tgtl': tgt})
# The volume is already mapped to a LUN, so no need to create any
# targets
if mapped:
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], hdp, tgt['alias'], tgt['secret'])
return service
# Each EVS can have up to 32 targets. Each target can have up to 32
# LUNs attached and have the name format 'evs<id>-tgt<0-N>'. We run
# from the first 'evs1-tgt0' until we find a target that is not already
# created in the BE or is created but have slots to place new targets.
found_tgt = False
for i in range(0, MAX_HNAS_ISCSI_TARGETS):
tgt_alias = 'evs' + evsid + '-tgt' + six.text_type(i)
# TODO(erlon): we need to go to the BE 32 times here
tgt_exist, tgt = self.bend.check_target(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, tgt_alias)
if tgt_exist and len(tgt['luns']) < 32 or not tgt_exist:
# Target exists and has free space or, target does not exist
# yet. Proceed and use the target or create a target using this
# name.
found_tgt = True
break
# If we've got here and found_tgt is not True, we run out of targets,
# raise and go away.
if not found_tgt:
LOG.error(_LE("No more targets avaliable."))
raise exception.NoMoreTargets(param=tgt_alias)
LOG.info(_LI("Using target label: %s."), tgt_alias)
# Check if we have a secret stored for this target so we don't have to
# go to BE on every query
if 'targets' not in self.config.keys():
self.config['targets'] = {}
if tgt_alias not in self.config['targets'].keys():
self.config['targets'][tgt_alias] = {}
tgt_info = self.config['targets'][tgt_alias]
# HNAS - one time lookup
# see if the client supports CHAP authentication and if
# iscsi_secret has already been set, retrieve the secret if
# available, otherwise generate and store
if self.config['chap_enabled'] == 'True':
# It may not exist, create and set secret.
if 'iscsi_secret' not in tgt_info.keys():
LOG.info(_LI("Retrieving secret for service: %s."),
tgt_alias)
out = self.bend.get_targetsecret(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
tgt_alias, hdp)
tgt_info['iscsi_secret'] = out
if tgt_info['iscsi_secret'] == "":
randon_secret = utils.generate_password()[0:15]
tgt_info['iscsi_secret'] = randon_secret
self.bend.set_targetsecret(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
tgt_alias, hdp,
tgt_info['iscsi_secret'])
LOG.info(_LI("Set tgt CHAP secret for service: %s."),
tgt_alias)
else:
# We set blank password when the client does not
# support CHAP. Later on, if the client tries to create a new
# target that does not exists in the backend, we check for this
# value and use a temporary dummy password.
if 'iscsi_secret' not in tgt_info.keys():
# Warns in the first time
LOG.info(_LI("CHAP authentication disabled."))
tgt_info['iscsi_secret'] = ""
if 'tgt_iqn' not in tgt_info:
LOG.info(_LI("Retrieving target for service: %s."), tgt_alias)
out = self.bend.get_targetiqn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
tgt_alias, hdp,
tgt_info['iscsi_secret'])
tgt_info['tgt_iqn'] = out
self.config['targets'][tgt_alias] = tgt_info
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], hdp, tgt_alias, tgt_info['iscsi_secret'])
return service
def _get_stats(self):
"""Get HDP stats from HNAS."""
hnas_stat = {}
be_name = self.configuration.safe_get('volume_backend_name')
hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
hnas_stat["vendor_name"] = 'HDS'
hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION
hnas_stat["storage_protocol"] = 'iSCSI'
hnas_stat['reserved_percentage'] = 0
for pool in self.pools:
out = self.bend.get_hdp_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
pool['hdp'])
LOG.debug('Query for pool %(pool)s: %(out)s.',
{'pool': pool['pool_name'], 'out': out})
(hdp, size, _ign, used) = out.split()[1:5] # in MB
pool['total_capacity_gb'] = int(size) / units.Ki
pool['free_capacity_gb'] = (int(size) - int(used)) / units.Ki
pool['allocated_capacity_gb'] = int(used) / units.Ki
pool['QoS_support'] = 'False'
pool['reserved_percentage'] = 0
hnas_stat['pools'] = self.pools
LOG.info(_LI("stats: stats: %s."), hnas_stat)
return hnas_stat
def _get_hdp_list(self):
"""Get HDPs from HNAS."""
out = self.bend.get_hdp_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
hdp_list = []
for line in out.split('\n'):
if 'HDP' in line:
inf = line.split()
if int(inf[1]) >= units.Ki:
# HDP fsids start at units.Ki (1024)
hdp_list.append(inf[11])
else:
# HDP pools are 2-digits max
hdp_list.extend(inf[1:2])
# returns a list of HDP IDs
LOG.info(_LI("HDP list: %s"), hdp_list)
return hdp_list
def _check_hdp_list(self):
"""Verify HDPs in HNAS array.
Verify that all HDPs specified in the configuration files actually
exists on the storage.
"""
hdpl = self._get_hdp_list()
lst = self.config['hdp'].keys()
for hdp in lst:
if hdp not in hdpl:
LOG.error(_LE("HDP not found: %s"), hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
# status, verify corresponding status is Normal
def _id_to_vol(self, volume_id):
"""Given the volume id, retrieve the volume object from database.
:param volume_id: volume id string
"""
vol = self.db.volume_get(self.context, volume_id)
return vol
def _update_vol_location(self, volume_id, loc):
"""Update the provider location.
:param volume_id: volume id string
:param loc: string provider location value
"""
update = {'provider_location': loc}
self.db.volume_update(self.context, volume_id, update)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
pass
def do_setup(self, context):
"""Setup and verify HDS HNAS storage connection."""
self.context = context
(self.arid, self.hnas_name, self.lumax) = self._array_info_get()
self._check_hdp_list()
service_list = self.config['services'].keys()
for svc in service_list:
svc = self.config['services'][svc]
pool = {}
pool['pool_name'] = svc['volume_type']
pool['service_label'] = svc['volume_type']
pool['hdp'] = svc['hdp']
self.pools.append(pool)
LOG.info(_LI("Configured pools: %s"), self.pools)
iscsi_info = self._get_iscsi_info()
LOG.info(_LI("do_setup: %s"), iscsi_info)
for svc in self.config['services'].keys():
svc_ip = self.config['services'][svc]['iscsi_ip']
if svc_ip in iscsi_info.keys():
LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip)
self.config['services'][svc]['port'] = \
iscsi_info[svc_ip]['port']
self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl']
self.config['services'][svc]['iscsi_port'] = \
iscsi_info[svc_ip]['iscsi_port']
else: # config iscsi address not found on device!
LOG.error(_LE("iSCSI portal not found "
"for service: %s"), svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
"""Create an export. Moved to initialize_connection.
:param context:
:param volume: volume reference
"""
name = volume['name']
LOG.debug("create_export %s", name)
pass
def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance.
:param context: context
:param volume: dictionary volume reference
"""
provider = volume['provider_location']
name = volume['name']
LOG.debug("remove_export provider %(provider)s on %(name)s",
{'provider': provider, 'name': name})
pass
def create_volume(self, volume):
"""Create a LU on HNAS.
:param volume: dictionary volume reference
"""
hdp = self._get_service(volume)
out = self.bend.create_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp,
'%s' % (int(volume['size']) * units.Ki),
volume['name'])
LOG.info(_LI("create_volume: create_lu returns %s"), out)
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
# Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd
LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created."),
{'lun': lun, 'sz': sz})
return {'provider_location': lun}
def create_cloned_volume(self, dst, src):
"""Create a clone of a volume.
:param dst: ditctionary destination volume reference
:param src: ditctionary source volume reference
"""
if src['size'] != dst['size']:
msg = 'clone volume size mismatch'
raise exception.VolumeBackendAPIException(data=msg)
hdp = self._get_service(dst)
size = int(src['size']) * units.Ki
source_vol = self._id_to_vol(src['id'])
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
slun, hdp, '%s' % size,
dst['name'])
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.",
{'lun': lun, 'size': size})
return {'provider_location': lun}
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
"""
hdp = self._get_service(volume)
(arid, lun) = _loc_info(volume['provider_location'])['id_lu']
self.bend.extend_vol(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, lun,
'%s' % (new_size * units.Ki),
volume['name'])
LOG.info(_LI("LUN %(lun)s extended to %(size)s GB."),
{'lun': lun, 'size': new_size})
def delete_volume(self, volume):
"""Delete an LU on HNAS.
:param volume: dictionary volume reference
"""
prov_loc = volume['provider_location']
if prov_loc is None:
LOG.error(_LE("delete_vol: provider location empty."))
return
info = _loc_info(prov_loc)
(arid, lun) = info['id_lu']
if 'tgt' in info.keys(): # connected?
LOG.info(_LI("delete lun loc %s"), info['tgt'])
# loc = id.lun
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']
self.bend.del_iscsi_conn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
ctl, iqn, hlun)
name = self.hnas_name
LOG.debug("delete lun %(lun)s on %(name)s", {'lun': lun, 'name': name})
hdp = self._get_service(volume)
self.bend.delete_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, lun)
@cinder_utils.synchronized('volume_mapping')
def initialize_connection(self, volume, connector):
"""Map the created volume to connector['initiator'].
:param volume: dictionary volume reference
:param connector: dictionary connector reference
"""
LOG.info(_LI("initialize volume %(vol)s connector %(conn)s"),
{'vol': volume, 'conn': connector})
# connector[ip, host, wwnns, unititator, wwp/
service_info = self._get_service_target(volume)
(ip, ipp, ctl, port, _hdp, tgtalias, secret) = service_info
info = _loc_info(volume['provider_location'])
if 'tgt' in info.keys(): # spurious repeat connection
# print info.keys()
LOG.debug("initiate_conn: tgt already set %s", info['tgt'])
(arid, lun_name) = info['id_lu']
loc = arid + '.' + lun_name
# sps, use target if provided
try:
out = self.bend.add_iscsi_conn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
lun_name, _hdp, port, tgtalias,
connector['initiator'])
except processutils.ProcessExecutionError:
msg = _("Error attaching volume %s. "
"Target limit might be reached!") % volume['id']
raise exception.ISCSITargetAttachFailed(message=msg)
hnas_portal = ip + ':' + ipp
# sps need hlun, fulliqn
hlun = out.split()[1]
fulliqn = out.split()[13]
tgt = hnas_portal + ',' + tgtalias + ',' + loc + ',' + ctl + ','
tgt += port + ',' + hlun
LOG.info(_LI("initiate: connection %s"), tgt)
properties = {}
properties['provider_location'] = tgt
self._update_vol_location(volume['id'], tgt)
properties['target_discovered'] = False
properties['target_portal'] = hnas_portal
properties['target_iqn'] = fulliqn
properties['target_lun'] = hlun
properties['volume_id'] = volume['id']
properties['auth_username'] = connector['initiator']
if self.config['chap_enabled'] == 'True':
properties['auth_method'] = 'CHAP'
properties['auth_password'] = secret
conn_info = {'driver_volume_type': 'iscsi', 'data': properties}
LOG.debug("initialize_connection: conn_info: %s.", conn_info)
return conn_info
@cinder_utils.synchronized('volume_mapping')
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume.
:param volume: dictionary volume reference
:param connector: dictionary connector reference
"""
info = _loc_info(volume['provider_location'])
if 'tgt' not in info.keys(): # spurious disconnection
LOG.warning(_LW("terminate_conn: provider location empty."))
return
(arid, lun) = info['id_lu']
(_portal, tgtalias, loc, ctl, port, hlun) = info['tgt']
LOG.info(_LI("terminate: connection %s"), volume['provider_location'])
self.bend.del_iscsi_conn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
ctl, tgtalias, hlun)
self._update_vol_location(volume['id'], loc)
return {'provider_location': loc}
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
:param volume: dictionary volume reference
:param snapshot: dictionary snapshot reference
"""
size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(snapshot['provider_location'])['id_lu']
hdp = self._get_service(volume)
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
slun, hdp, '%s' % (size),
volume['name'])
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.",
{'lun': lun, 'sz': sz})
return {'provider_location': lun}
def create_snapshot(self, snapshot):
"""Create a snapshot.
:param snapshot: dictionary snapshot reference
"""
source_vol = self._id_to_vol(snapshot['volume_id'])
hdp = self._get_service(source_vol)
size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
slun, hdp,
'%s' % (size),
snapshot['name'])
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is created.",
{'lun': lun, 'size': size})
return {'provider_location': lun}
def delete_snapshot(self, snapshot):
"""Delete a snapshot.
:param snapshot: dictionary snapshot reference
"""
loc = snapshot['provider_location']
# to take care of spurious input
if loc is None:
# which could cause exception.
return
(arid, lun) = loc.split('.')
source_vol = self._id_to_vol(snapshot['volume_id'])
hdp = self._get_service(source_vol)
myid = self.arid
if arid != myid:
LOG.error(_LE("Array mismatch %(myid)s vs %(arid)s"),
{'myid': myid, 'arid': arid})
msg = 'Array id mismatch in delete snapshot'
raise exception.VolumeBackendAPIException(data=msg)
self.bend.delete_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, lun)
LOG.debug("LUN %s is deleted.", lun)
return
def get_volume_stats(self, refresh=False):
"""Get volume stats. If 'refresh', run update the stats first."""
if refresh:
self.driver_stats = self._get_stats()
return self.driver_stats
def get_pool(self, volume):
if not volume['volume_type']:
return 'default'
else:
metadata = {}
type_id = volume['volume_type_id']
if type_id is not None:
metadata = volume_types.get_volume_type_extra_specs(type_id)
if not metadata.get('service_label'):
return 'default'
else:
if metadata['service_label'] not in \
self.config['services'].keys():
return 'default'
else:
pass
return metadata['service_label']
def _check_pool_and_fs(self, volume, fs_label):
"""Validation of the pool and filesystem.
Checks if the file system for the volume-type chosen matches the
one passed in the volume reference. Also, checks if the pool
for the volume type matches the pool for the host passed.
:param volume: Reference to the volume.
:param fs_label: Label of the file system.
"""
pool_from_vol_type = self.get_pool(volume)
pool_from_host = utils.extract_host(volume['host'], level='pool')
if self.config['services'][pool_from_vol_type]['hdp'] != fs_label:
msg = (_("Failed to manage existing volume because the pool of "
"the volume type chosen does not match the file system "
"passed in the volume reference."),
{'File System passed': fs_label,
'File System for volume type':
self.config['services'][pool_from_vol_type]['hdp']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if pool_from_host != pool_from_vol_type:
msg = (_("Failed to manage existing volume because the pool of "
"the volume type chosen does not match the pool of "
"the host."),
{'Pool of the volume type': pool_from_vol_type,
'Pool of the host': pool_from_host})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
def _get_info_from_vol_ref(self, vol_ref):
"""Gets information from the volume reference.
Returns the information (File system and volume name) taken from
the volume reference.
:param vol_ref: existing volume to take under management
"""
vol_info = vol_ref.split('/')
fs_label = vol_info[0]
vol_name = vol_info[1]
return fs_label, vol_name
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Gets the size to manage_existing.
Returns the size of volume to be managed by manage_existing.
:param volume: cinder volume to manage
:param existing_vol_ref: existing volume to take under management
"""
# Check that the reference is valid.
if 'source-name' not in existing_vol_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_vol_ref, reason=reason)
ref_name = existing_vol_ref['source-name']
fs_label, vol_name = self._get_info_from_vol_ref(ref_name)
LOG.debug("File System: %(fs_label)s "
"Volume name: %(vol_name)s.",
{'fs_label': fs_label, 'vol_name': vol_name})
lu_info = self.bend.get_existing_lu_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
fs_label, vol_name)
if fs_label in lu_info:
aux = lu_info.split('\n')[3]
size = aux.split(':')[1]
size_unit = size.split(' ')[2]
if size_unit == 'TB':
return int(size.split(' ')[1]) * units.k
else:
return int(size.split(' ')[1])
else:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_vol_ref,
reason=_('Volume not found on configured storage backend.'))
def manage_existing(self, volume, existing_vol_ref):
"""Manages an existing volume.
The specified Cinder volume is to be taken into Cinder management.
The driver will verify its existence and then rename it to the
new Cinder volume name. It is expected that the existing volume
reference is a File System and some volume_name;
e.g., openstack/vol_to_manage
:param volume: cinder volume to manage
:param existing_vol_ref: driver-specific information used to identify a
volume
"""
ref_name = existing_vol_ref['source-name']
fs_label, vol_name = self._get_info_from_vol_ref(ref_name)
LOG.debug("Asked to manage ISCSI volume %(vol)s, with vol "
"ref %(ref)s.", {'vol': volume['id'],
'ref': existing_vol_ref['source-name']})
self._check_pool_and_fs(volume, fs_label)
self.bend.rename_existing_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'], fs_label,
volume['name'], vol_name)
LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."),
{'name': volume['name']})
lun = self.arid + '.' + volume['name']
return {'provider_location': lun}
def unmanage(self, volume):
"""Unmanages a volume from cinder.
Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
will be made to notify the Admin that the volume is no longer being
managed.
:param volume: cinder volume to unmanage
"""
svc = self._get_service(volume)
new_name = 'unmanage-' + volume['name']
vol_path = svc + '/' + volume['name']
self.bend.rename_existing_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'], svc, new_name,
volume['name'])
LOG.info(_LI("Cinder ISCSI volume with current path %(path)s is "
"no longer being managed. The new name is %(unm)s."),
{'path': vol_path, 'unm': new_name})
| {
"content_hash": "ef24e58134c1c9dc918a73e5ddc72bbf",
"timestamp": "",
"source": "github",
"line_count": 967,
"max_line_length": 79,
"avg_line_length": 38.820062047569806,
"alnum_prop": 0.5123737979168331,
"repo_name": "scottdangelo/RemoveVolumeMangerLocks",
"id": "158b87c787782aacd9cf251555a3d8daf41fefe3",
"size": "38187",
"binary": false,
"copies": "5",
"ref": "refs/heads/RemoveVolumeManagerLocks",
"path": "cinder/volume/drivers/hitachi/hnas_iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13128387"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ReindexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
def run_test(self):
self.nodes[0].generate(3)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
assert_equal(self.nodes[0].getblockcount(), 3)
print "Success"
if __name__ == '__main__':
ReindexTest().main()
| {
"content_hash": "f9c00c2ec3531c2a10c2fc8620f2abae",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 102,
"avg_line_length": 33.208333333333336,
"alnum_prop": 0.6474278544542033,
"repo_name": "marlengit/hardfork_prototype_1_mvf-bu",
"id": "52534ca89e7cfb2765ce84cdd7b00bffaef40252",
"size": "1112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/reindex.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "647624"
},
{
"name": "C++",
"bytes": "4698227"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3821"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "156005"
},
{
"name": "Makefile",
"bytes": "96858"
},
{
"name": "Objective-C",
"bytes": "5375"
},
{
"name": "Objective-C++",
"bytes": "7360"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "765443"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "36574"
}
],
"symlink_target": ""
} |
import psycopg2
from psycopg2.extensions import AsIs
from osgeo import ogr
from xml.etree import ElementTree as ETree
import math
import random
import simplejson as json
from cStringIO import StringIO
import struct
import binascii
from pgpointcloud_utils import PcRunTimeException, PcInvalidArgException
# mapping between OGR datatypes and pgPointCloud datatypes
DATA_TYPE_MAPPING = {
ogr.OFTInteger: {
'interpretation': 'double',
'size': 8,
'cast': float,
'struct': 'd'
},
ogr.OFTReal: {
'interpretation': 'double',
'size': 8,
'struct': 'd'
},
ogr.OFTDate: {
'interpretation': 'double',
'size': 8,
'struct': 'd'
},
ogr.OFTTime: {
'interpretation': 'double',
'size': 8,
'struct': 'd'
},
ogr.OFTDateTime: {
'interpretation': 'double',
'size': 8,
'struct': 'd'
}
}
def build_pc_dimension(doc, dimension, index):
pc_dimension = ETree.Element('pc:dimension')
doc.append(pc_dimension)
pc_position = ETree.Element('pc:position')
pc_dimension.append(pc_position)
pc_position.text = str(index)
pc_name = ETree.Element('pc:name')
pc_dimension.append(pc_name)
pc_name.text = dimension['name']
pc_size = ETree.Element('pc:size')
pc_dimension.append(pc_size)
pc_size.text = str(dimension['type']['dest']['size'])
pc_interpretation = ETree.Element('pc:interpretation')
pc_dimension.append(pc_interpretation)
pc_interpretation.text = dimension['type']['dest']['interpretation']
if dimension['type']['source'] in [
ogr.OFTDate,
ogr.OFTTime,
ogr.OFTDateTime
]:
pc_description = ETree.Element('pc:description')
pc_dimension.append(pc_description)
if dimension['type']['source'] == ogr.OFTDate:
pc_description.text = 'date as number of seconds UTC from UNIX epoch to 00:00:00 of the date'
elif dimension['type']['source'] == ogr.OFTTime:
pc_description.text = 'time as number of seconds UTC from 00:00:00'
elif dimension['type']['source'] == ogr.OFTDateTime:
pc_description.text = 'datetime as number of seconds UTC from UNIX epoch'
'''
pc_metadata = ETree.Element('pc:metadata')
pc_dimension.append(pc_metadata)
# additional tags indicating that dimension is special
# date, time, datetime
ogr_ = ETree.Element('ogr')
pc_metadata.append(ogr_)
data_type = ETree.Element('datatype')
ogr_.append(data_type)
data_type.text = ogr.GetFieldTypeName(dimension['type']['source'])
'''
def build_pc_schema(fields):
XML_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>'
pc_schema = ETree.Element('pc:PointCloudSchema')
pc_schema.set('xmlns:pc', "http://pointcloud.org/schemas/PC/1.1")
pc_schema.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance")
pc_metadata = ETree.Element('pc:metadata')
pc_schema.append(pc_metadata)
# compression
Metadata = ETree.Element('Metadata')
pc_metadata.append(Metadata)
Metadata.set('name', 'compression')
Metadata.text = 'dimensional'
num_dimensions = 1
for dimension in fields['dimension']:
build_pc_dimension(pc_schema, dimension, num_dimensions)
num_dimensions += 1
return XML_DECLARATION + ETree.tostring(pc_schema)
def add_pc_schema(dbconn, pc_schema, srid=0):
try:
cursor = dbconn.cursor()
# check if this schema already exists
cursor.execute("""
SELECT
pcid
FROM pointcloud_formats
WHERE schema = %s
""", [pc_schema])
# it does exist, use
if cursor.rowcount > 0:
return cursor.fetchone()[0]
# next best PCID
cursor.execute("""
SELECT
max(avail)
FROM generate_series(1, 65535) avail
LEFT JOIN pointcloud_formats used
ON avail = used.pcid
WHERE used.pcid IS NULL
""")
if cursor.rowcount > 0:
pcid = cursor.fetchone()[0]
else:
raise PcRunTimeException(
message='Query error getting the next available PCID'
)
cursor.execute(
'INSERT INTO pointcloud_formats (pcid, srid, schema) VALUES (%s, %s, %s)', (
pcid,
srid,
pc_schema
)
)
dbconn.commit()
except psycopg2.Error:
dbconn.rollback()
return None
finally:
cursor.close()
return pcid
def create_pcpatch_table(dbconn, table_name, table_action):
try:
cursor = dbconn.cursor()
# append to existing table, check that table exists
if table_action == 'a':
try:
cursor.execute("""
SELECT 1 FROM %s
""", [AsIs(table_name)])
except psycopg2.Error:
raise PcInvalidArgException(
message='Table not found: %s' % table_name
)
return
# drop table
if table_action == 'd':
cursor.execute("""
DROP TABLE IF EXISTS %s
""", [AsIs(table_name)])
cursor.execute("""
CREATE TABLE %s (
id BIGSERIAL PRIMARY KEY,
pa PCPATCH,
layer_name TEXT,
file_name TEXT,
group_by JSON,
metadata JSON
)
""", [AsIs(table_name)])
except psycopg2.Error:
dbconn.rollback()
raise PcRunTimeException(
message='Query error creating PcPatch table'
)
finally:
cursor.close()
def make_wkb_point(pcid, frmt, vals):
values = [1, pcid] + vals
s = struct.Struct('< B I' + frmt)
return binascii.hexlify(s.pack(*values))
def insert_pcpoints(dbconn, table_name, wkb_set, group):
group_str = json.dumps(group)
values = [
[wkb, group_str]
for wkb in wkb_set
]
try:
cursor = dbconn.cursor()
statement = """
INSERT INTO %s (pt, group_by)
VALUES (%%s::pcpoint, %%s)
""" % (
AsIs(table_name)
)
cursor.executemany(
statement,
values
)
except psycopg2.Error:
dbconn.rollback()
raise PcRunTimeException(
message='Query error inserting PcPoints'
)
finally:
cursor.close()
return True
def copy_pcpoints(dbconn, table_name, wkb_set, group):
group_str = json.dumps(group)
f = StringIO(
'\n'.join([
'\t'.join([wkb, group_str])
for wkb in wkb_set
])
)
try:
cursor = dbconn.cursor()
cursor.copy_from(f, table_name, columns=('pt', 'group_by'))
except psycopg2.Error:
dbconn.rollback()
raise PcRunTimeException(
message='Query error copying PcPoints'
)
finally:
cursor.close()
return True
def get_extent_corners(cursor, table_name, in_utm=True):
if not in_utm:
cursor.execute("""
WITH extent AS (
SELECT
ST_Envelope(ST_Collect(pt::geometry)) AS shp
FROM %s
)
SELECT
ST_XMin(shp),
ST_YMax(shp),
ST_XMax(shp),
ST_YMin(shp)
FROM extent
""" % (
AsIs(table_name)
))
else:
cursor.execute("""
WITH raw_extent AS (
SELECT
ST_Envelope(ST_Collect(pt::geometry)) AS shp
FROM %s
), utmzone AS (
SELECT
utmzone(ST_Centroid(shp)) AS srid
FROM raw_extent
), extent AS (
SELECT
ST_Transform(shp::geometry, srid) AS shp
FROM raw_extent
JOIN utmzone
ON true
)
SELECT
ST_XMin(shp),
ST_YMax(shp),
ST_XMax(shp),
ST_YMin(shp)
FROM extent
""" % (
AsIs(table_name)
))
return cursor.fetchone()
def _compute_patch_size(dbconn, temp_table, max_points_per_patch=400):
def get_patch_count(cursor, temp_table, dim, max_points):
cursor.execute("""
WITH raw_extent AS (
SELECT
ST_Envelope(ST_Collect(pt::geometry)) AS shp
FROM %s
), utmzone AS (
SELECT
utmzone(ST_Centroid(shp)) AS srid
FROM raw_extent
), points AS (
SELECT
ST_Transform(pt::geometry, srid) AS geom
FROM %s
JOIN utmzone
ON true
), extent AS (
SELECT
ST_Transform(shp::geometry, srid) AS shp
FROM raw_extent
JOIN utmzone
ON true
)
SELECT
ST_Centroid(ST_Collect(geom)) AS shp,
count(points.*) AS geom_count
FROM points
JOIN extent
ON true
GROUP BY ST_SnapToGrid(geom, ST_XMin(extent.shp), ST_YMax(extent.shp), %s, %s)
HAVING count(points.*) > %s
""" % (
AsIs(temp_table),
AsIs(temp_table),
dim,
dim,
max_points
))
return cursor.rowcount
try:
cursor = dbconn.cursor()
ulx, uly, lrx, lry = get_extent_corners(cursor, temp_table)
width = lrx - ulx
height = uly - lry
# starting patch size in meters (due to UTM zone usage)
patch_size = int(max(width / 10., height / 10.))
# no patch size, any patch size is valid
if patch_size < 1:
return 100
old_patch_sizes = [0]
old_patch_counts = [0]
delta = None
long_tail_count = 0
while True:
# patch size less than 1
# means no reasonable patch size worked
if patch_size < 1:
# use largest patch_size that had
# the least number of patches over max points per patch
min_patch_count = min(old_patch_counts[1:])
max_patch_size = -1
for idx in xrange(len(old_patch_counts) - 1, 0, -1):
if (
old_patch_counts[idx] == min_patch_count and
old_patch_sizes[idx] > max_patch_size
):
max_patch_size = old_patch_sizes[idx]
patch_size = max_patch_size
break
patch_count = \
get_patch_count(cursor, temp_table, patch_size, max_points_per_patch)
if abs(patch_size - old_patch_size) <= 1:
if patch_count == 0:
if long_tail_count >= 5:
patch_size = old_patch_size
break
elif patch_size > old_patch_size:
long_tail_count += 1
elif old_patch_count == 0:
patch_size = old_patch_size
break
elif long_tail_count > 0 and patch_count > 0 and old_patch_count == 0:
patch_size = old_patch_size
break
delta = max(abs(patch_size - old_patch_size) / 2, 1)
if patch_count > 0:
delta *= -1
old_patch_size = patch_size
patch_size += delta
old_patch_count = patch_count
cols = int(math.ceil(width / patch_size))
rows = int(math.ceil(height / patch_size))
except psycopg2.Error:
dbconn.rollback()
raise PcRunTimeException(
message='Query error computing grid for PcPatches'
)
finally:
cursor.close()
return patch_size
def insert_pcpatches(
dbconn, file_table, temp_table, layer,
metadata=None, file_name=None, max_points_per_patch=400
):
layer_name = layer.GetName()
if metadata:
# try to be nice with json metadata
try:
metadata = json.loads(metadata)
except json.JSONDecodeError:
pass
try:
patch_size = _compute_patch_size(dbconn, temp_table, max_points_per_patch)
cursor = dbconn.cursor()
cursor.execute("""
WITH raw_extent AS (
SELECT
ST_Envelope(ST_Collect(pt::geometry)) AS shp
FROM %s
), utmzone AS (
SELECT
utmzone(ST_Centroid(shp)) AS srid
FROM raw_extent
), points AS (
SELECT
ST_Transform(pt::geometry, srid) AS geom,
pt,
group_by
FROM %s
JOIN utmzone
ON true
), extent AS (
SELECT
ST_Transform(shp::geometry, srid) AS shp
FROM raw_extent
JOIN utmzone
ON true
)
INSERT INTO %s (layer_name, file_name, group_by, metadata, pa)
SELECT
layer_name,
%s,
group_by::json,
%s::json,
pa
FROM (
SELECT
%s AS layer_name,
group_by,
PC_Patch(pt) AS pa
FROM points
JOIN extent
ON true
GROUP BY group_by, ST_SnapToGrid(geom, ST_XMin(extent.shp), ST_YMax(extent.shp), %s, %s)
) sub
""", [
AsIs(temp_table),
AsIs(temp_table),
AsIs(file_table),
file_name,
json.dumps(metadata),
layer_name,
patch_size,
patch_size
])
except psycopg2.Error:
dbconn.rollback()
raise PcRunTimeException(
message='Query error inserting PcPatches'
)
finally:
cursor.close()
return True
def create_temp_table(dbconn):
table_name = (
'temp_' +
''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(16))
)
table_name = '"' + table_name + '"'
try:
cursor = dbconn.cursor()
cursor.execute("""
CREATE TEMPORARY TABLE %s (
id BIGSERIAL PRIMARY KEY,
pt PCPOINT,
group_by TEXT
)
ON COMMIT DROP;
""", [AsIs(table_name)])
except psycopg2.Error:
dbconn.rollback()
raise PcRunTimeException(
message='Query error creating temporary PcPoint table'
)
finally:
cursor.close()
return table_name
| {
"content_hash": "a7ff3e0fd13b8a21cacf17b682efc028",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 105,
"avg_line_length": 24.171378091872793,
"alnum_prop": 0.5580732402602149,
"repo_name": "dustymugs/pgpointcloud_utils",
"id": "de0709f836ace6d7bcf05f573e7594a88ae02d4b",
"size": "13681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ogr2pgpc/pgpointcloud.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PLpgSQL",
"bytes": "2600"
},
{
"name": "Python",
"bytes": "114809"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from pydl.models.pipeline import PCA
class PCATestCase(unittest.TestCase):
def test_get_config(self):
s = PCA(n_components=5)
config = s.get_config()
expected_config = dict(
n_components=5,
whiten=False,
name='pca'
)
self.assertDictEqual(config, expected_config)
def test_from_config(self):
config = dict(
n_components=5,
whiten=False,
name='pca'
)
s = PCA.from_config(config=config)
self.assertIsInstance(s, PCA)
self.assertEqual(s.name, 'pca')
self.assertEqual(s.n_components, 5)
self.assertEqual(s.whiten, False)
def test_to_json(self):
s = PCA()
s_json = s.to_json()
expected_json = '{"class_name": "PCA", "config": {"n_components": null, "name": "pca", "whiten": false}}'
self.assertEqual(expected_json, s_json)
def test_fit(self):
s = PCA(n_components=5)
x, y = create_dataset()
self.assertEqual((7, 10), x.shape)
self.assertEqual((7,), y.shape)
s.fit(x, y)
self.assertTrue(s.components_ is not None)
self.assertTrue(s.n_components_ is not None)
self.assertTrue(s.mean_ is not None)
def test_fit_transform(self):
s = PCA(n_components=5)
x, y = create_dataset()
self.assertEqual((7, 10), x.shape)
self.assertEqual((7,), y.shape)
x, y = s.fit_transform(x, y)
self.assertEqual((7, 5), x.shape)
self.assertEqual((7,), y.shape)
def test_transform_after_from_config(self):
s = PCA(n_components=5)
x, y = create_dataset()
self.assertEqual((7, 10), x.shape)
self.assertEqual((7,), y.shape)
x1, y1 = s.fit_transform(x, y)
config = s.get_config()
self.assertIn('n_components', config)
self.assertIn('whiten', config)
self.assertIn('name', config)
new_s = PCA.from_config(config)
x2, y2 = new_s.transform(x, y)
np.testing.assert_almost_equal(x1, x2, decimal=10)
np.testing.assert_array_equal(y1, y2)
def create_dataset():
x = [[[1.5], [2.0], [0.34], [-0.12], [0.67], [2.4], [-1.1]] for _ in range(10)]
y = np.ones(7)
return np.hstack(x), y
| {
"content_hash": "89475b8c76c39f62184db1b178f51a23",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 113,
"avg_line_length": 26.46067415730337,
"alnum_prop": 0.5545647558386412,
"repo_name": "rafaeltg/pydl",
"id": "a424012594ffb99b3b7d25b918082545900e9a48",
"size": "2355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pydl/models/pipeline/unittests/test_decomposition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84942"
}
],
"symlink_target": ""
} |
import testtools
from tempest.test import attr
from tempest_lib import exceptions
from murano.tests.functional.api import base
class TestServices(base.TestCase):
@attr(type='smoke')
def test_get_services_list(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
resp, services_list = self.client.get_services_list(env['id'],
sess['id'])
self.assertEqual(resp.status, 200)
self.assertTrue(isinstance(services_list, list))
@attr(type='negative')
def test_get_services_list_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.assertRaises(exceptions.NotFound,
self.client.get_services_list,
None,
sess['id'])
@attr(type='negative')
def test_get_services_list_after_delete_env(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.client.delete_environment(env['id'])
self.assertRaises(exceptions.NotFound,
self.client.get_services_list,
env['id'],
sess['id'])
@attr(type='negative')
def test_get_services_list_after_delete_session(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.client.delete_session(env['id'], sess['id'])
self.assertRaises(exceptions.NotFound,
self.client.get_services_list,
env['id'],
sess['id'])
@attr(type='smoke')
def test_create_and_delete_demo_service(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
services_list = self.client.get_services_list(env['id'], sess['id'])[1]
resp, service = self.create_demo_service(env['id'], sess['id'])
services_list_ = self.client.get_services_list(env['id'],
sess['id'])[1]
self.assertEqual(resp.status, 200)
self.assertEqual(len(services_list) + 1, len(services_list_))
resp = self.client.delete_service(env['id'],
sess['id'],
service['?']['id'])[0]
services_list_ = self.client.get_services_list(env['id'],
sess['id'])[1]
self.assertEqual(resp.status, 200)
self.assertEqual(len(services_list), len(services_list_))
@attr(type='negative')
def test_create_demo_service_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.assertRaises(exceptions.NotFound,
self.create_demo_service,
None,
sess['id'])
@attr(type='negative')
def test_create_demo_service_without_sess_id(self):
env = self.create_environment('test')
self.client.create_session(env['id'])
self.assertRaises(exceptions.Unauthorized,
self.create_demo_service,
env['id'],
"")
@attr(type='negative')
def test_delete_demo_service_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.NotFound,
self.client.delete_service,
None,
sess['id'],
service['?']['id'])
@attr(type='negative')
def test_delete_demo_service_without_session_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.client.delete_service,
env['id'],
"",
service['?']['id'])
@attr(type='negative')
def test_double_delete_service(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.client.delete_service(env['id'], sess['id'], service['?']['id'])
self.assertRaises(exceptions.NotFound,
self.client.delete_service,
env['id'],
sess['id'],
service['?']['id'])
@attr(type='smoke')
def test_get_service(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
resp, service_ = self.client.get_service(env['id'],
sess['id'],
service['?']['id'])
self.assertEqual(resp.status, 200)
self.assertEqual(service, service_)
@attr(type='negative')
def test_get_service_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.NotFound,
self.client.get_service,
None,
sess['id'],
service['?']['id'])
@testtools.skip("https://bugs.launchpad.net/murano/+bug/1295573")
@attr(type='negative')
def test_get_service_without_sess_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.client.get_service,
env['id'],
"",
service['?']['id'])
class TestServicesTenantIsolation(base.NegativeTestCase):
@attr(type='negative')
def test_get_list_services_in_env_from_another_tenant(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.alt_client.get_services_list, env['id'],
sess['id'])
@attr(type='negative')
def test_create_service_in_env_from_another_tenant(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.create_demo_service, env['id'],
sess['id'], client=self.alt_client)
@attr(type='negative')
def test_delete_service_in_env_from_another_tenant(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.alt_client.delete_service, env['id'],
sess['id'], service['?']['id'])
@attr(type='negative')
def test_get_service_in_env_from_another_tenant(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.alt_client.get_service, env['id'],
sess['id'], service['?']['id'])
| {
"content_hash": "e98572db9b5dd93562d7849a8439c15f",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 34.702127659574465,
"alnum_prop": 0.5234825260576333,
"repo_name": "telefonicaid/murano",
"id": "e252114726fe8eb6158a569542f547190525e770",
"size": "8769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/tests/functional/api/v1/test_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "826132"
},
{
"name": "Shell",
"bytes": "4296"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from flask import request
from indico.modules.admin.views import WPAdmin
from indico.modules.users import User
from indico.util.i18n import _
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.views import WPDecorated, WPJinjaMixin
class WPUser(WPJinjaMixin, WPDecorated):
"""Base WP for user profile pages.
Whenever you use this, you MUST include `user` in the params passed to
`render_template`. Any RH using this should inherit from `RHUserBase`
which already handles user/admin access. In this case, simply add
``user=self.user`` to your `render_template` call.
"""
template_prefix = 'users/'
def __init__(self, rh, active_menu_item, **kwargs):
kwargs['active_menu_item'] = active_menu_item
WPDecorated.__init__(self, rh, **kwargs)
def _get_breadcrumbs(self):
if 'user_id' in request.view_args:
user = User.get(request.view_args['user_id'])
profile_breadcrumb = _('Profile of {name}').format(name=user.full_name)
else:
profile_breadcrumb = _('My Profile')
return render_breadcrumbs(profile_breadcrumb)
def _get_body(self, params):
return self._get_page_content(params)
class WPUsersAdmin(WPAdmin):
template_prefix = 'users/'
bundles = ('module_users.js',)
| {
"content_hash": "6b05608047f77b3a2ae332a56979b957",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 83,
"avg_line_length": 33.21951219512195,
"alnum_prop": 0.6806167400881057,
"repo_name": "mvidalgarcia/indico",
"id": "d86f2c7bcf14b659c313f4a2dbb71d7fad98811e",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/users/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "538590"
},
{
"name": "HTML",
"bytes": "1345380"
},
{
"name": "JavaScript",
"bytes": "1781971"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4381847"
},
{
"name": "Shell",
"bytes": "3568"
},
{
"name": "TeX",
"bytes": "22182"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('parsing')
PARSER_TYPES = ['movie', 'series', 'music']
# Mapping of parser type to (mapping of parser name to plugin instance)
parsers = {}
# Mapping from parser type to the name of the default/selected parser for that type
default_parsers = {}
selected_parsers = {}
# We need to wait until manager startup to access other plugin instances, to make sure they have all been loaded
@event('manager.startup')
def init_parsers(manager):
"""Prepare our list of parsing plugins and default parsers."""
for parser_type in PARSER_TYPES:
parsers[parser_type] = {}
for p in plugin.get_plugins(group=parser_type + '_parser'):
parsers[parser_type][p.name.replace('parser_', '')] = p.instance
# Select default parsers based on priority
func_name = 'parse_' + parser_type
default_parsers[parser_type] = max(parsers[parser_type].iteritems(),
key=lambda p: getattr(getattr(p[1], func_name), 'priority', 0))[0]
class PluginParsing(object):
"""Provides parsing framework"""
@property
def schema(self):
# Create a schema allowing only our registered parsers to be used under the key of each parser type
properties = {}
for parser_type in PARSER_TYPES:
parser_names = [p.name.replace('parser_', '') for p in plugin.get_plugins(group=parser_type + '_parser')]
properties[parser_type] = {'type': 'string', 'enum': parser_names}
s = {
'type': 'object',
'properties': properties,
'additionalProperties': False
}
return s
def on_task_start(self, task, config):
# Set up user selected parsers from config for this task run
if config:
selected_parsers.update(config)
def on_task_end(self, task, config):
# Restore default parsers for next task run
selected_parsers.clear()
on_task_abort = on_task_end
def parse_series(self, data, name=None, **kwargs):
"""
Use the selected series parser to parse series information from `data`
:param data: The raw string to parse information from.
:param name: The series name to parse data for. If not supplied, parser will attempt to guess series name
automatically from `data`.
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success.
"""
parser = parsers['series'][selected_parsers.get('series', default_parsers.get('series'))]
return parser.parse_series(data, name=name, **kwargs)
def parse_movie(self, data, **kwargs):
"""
Use the selected movie parser to parse movie information from `data`
:param data: The raw string to parse information from
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success.
"""
parser = parsers['movie'][selected_parsers.get('movie') or default_parsers['movie']]
return parser.parse_movie(data, **kwargs)
def parse_music(self, data, **kwargs):
"""
Use the selected audio parser to parse audio information from `data`
:param data: The raw string to parse information from
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success.
"""
parser = parsers['music'][selected_parsers.get('music') or default_parsers['music']]
return parser.parse_music(data, **kwargs)
@event('plugin.register')
def register_plugin():
plugin.register(PluginParsing, 'parsing', api_ver=2)
| {
"content_hash": "5ca4e7eabc8a646eb64c3d686585b707",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 118,
"avg_line_length": 39.52577319587629,
"alnum_prop": 0.6517996870109546,
"repo_name": "lildadou/Flexget",
"id": "a695ca32988d9c85de44682962986af6ef382978",
"size": "3834",
"binary": false,
"copies": "1",
"ref": "refs/heads/music_snep",
"path": "flexget/plugins/parsers/plugin_parsing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4878"
},
{
"name": "HTML",
"bytes": "26542"
},
{
"name": "JavaScript",
"bytes": "43172"
},
{
"name": "Python",
"bytes": "2526165"
}
],
"symlink_target": ""
} |
from jug import TaskGenerator
from jug.tests.jugfiles.exceptions import FailingTask
@TaskGenerator
def some_fail(x):
if x in (2, 5, 8):
raise FailingTask
return x
@TaskGenerator
def plus1(x):
return x + 1
vals = list(map(some_fail, list(range(10))))
vals = list(map(plus1, vals))
# This jugfile has 20 tasks of which 10 are executable and 10 are waiting.
# If the first 10, 3 will fail.
# With full execution this should result in 14 complete tasks, 3 failed and 3 waiting
| {
"content_hash": "667fe5e8c1954cc0acb2c522aac58492",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 85,
"avg_line_length": 24.9,
"alnum_prop": 0.714859437751004,
"repo_name": "luispedro/jug",
"id": "b64f21892faf5fa89e1fd8ffe83682d5ed348456",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jug/tests/jugfiles/failing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nix",
"bytes": "774"
},
{
"name": "Python",
"bytes": "269470"
},
{
"name": "Shell",
"bytes": "520"
},
{
"name": "TeX",
"bytes": "23794"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy import ma
from .qctests import QCCheckVar
def constant_cluster_size(x, tol=0):
"""Estimate the cluster size with (nearly) constant value
Returns how many consecutive neighbor values are within a given
tolerance range. Note that invalid values, like NaN, are ignored.
"""
assert np.ndim(x) == 1, 'Not ready for more than 1 dimension'
# Adding a tolerance to handle roundings due to different numeric types.
tol = tol + 1e-5 * tol
ivalid = np.nonzero(~ma.getmaskarray(ma.fix_invalid(x)))[0]
dx = np.diff(np.atleast_1d(x)[ivalid])
cluster_size = np.zeros(np.shape(x), dtype='i')
for i, iv in enumerate(ivalid):
idx = np.absolute(dx[i:].cumsum()) > tol
if True in idx:
cluster_size[iv] += np.nonzero(idx)[0].min()
else:
cluster_size[iv] += idx.size
idx = np.absolute(dx[0:i][::-1].cumsum()) > tol
if True in idx:
cluster_size[iv] += np.nonzero(idx)[0].min()
else:
cluster_size[iv] += idx.size
return cluster_size
class ConstantClusterSize(QCCheckVar):
"""
Need to implement a check on time. TSG specifies constant value during 6 hrs.
"""
def set_features(self):
cluster_size = constant_cluster_size(self.data[self.varname])
N = ma.compressed(self.data[self.varname]).size
cluster_fraction = cluster_size / N
self.features = {'constant_cluster_size': cluster_size,
'constant_cluster_fraction': cluster_fraction,
}
def test(self):
self.flags = {}
threshold = self.cfg['threshold']
# assert (np.size(threshold) == 1) \
# and (threshold is not None) \
# and (np.isfinite(threshold))
if isinstance(threshold, str) and (threshold[-1] == '%'):
threshold = float(threshold[:-1]) * 1e-2
feature_name = 'constant_cluster_fraction'
else:
feature_name = 'constant_cluster_size'
flag = np.zeros(self.data[self.varname].shape, dtype='i1')
feature = self.features[feature_name]
flag[np.nonzero(feature > threshold)] = self.flag_bad
flag[np.nonzero(feature <= threshold)] = self.flag_good
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags[feature_name] = flag
| {
"content_hash": "e0e7a20711c626cca997171c945603ce",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 35.39705882352941,
"alnum_prop": 0.5961778147071043,
"repo_name": "castelao/CoTeDe",
"id": "739fde6ef8daaf219f295f8b2cc40aff85f16334",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cotede/qctests/constant_cluster_size.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "178847"
},
{
"name": "TeX",
"bytes": "2430"
}
],
"symlink_target": ""
} |
import time
import sys
import RPi.GPIO as GPIO
SIGNAL_CONFIG = {
"signal_patterns": {
"outlet_a": {
"on": "1001010000001010000010000",
"off": "1001010000001010000001100"
}
},
"signal_long_secs": 0.0008,
"signal_short_secs": 0.0003,
"signal_spacing_decs": 0.008
}
DATA_PIN = 11
class IREmitter:
def __init__(self, attempts, is_debug = False):
self.__attempts = attempts
self.__is_debug = is_debug
def __emit(
self,
signal_pattern,
signal_long_secs,
signal_short_secs,
signal_spacing_decs
):
GPIO.setmode(GPIO.BCM)
GPIO.setup(DATA_PIN, GPIO.OUT)
for i in range(self.__attempts):
# Deliberately use synchronous sleep because this is extremely timing
# sensitive
for s in signal_pattern:
if s == "1":
GPIO.output(DATA_PIN, 1)
time.sleep(signal_long_secs)
GPIO.output(DATA_PIN, 0)
time.sleep(signal_short_secs)
elif s == "0":
GPIO.output(DATA_PIN, 1)
time.sleep(signal_short_secs)
GPIO.output(DATA_PIN, 0)
time.sleep(signal_long_secs)
GPIO.output(DATA_PIN, 0)
time.sleep(signal_spacing_decs)
GPIO.cleanup()
def __emit_with_config(self, signal_group, signal_name):
if self.__is_debug:
print(
"Emitting signal group: %s, signal name: %s" %
(signal_group, signal_name)
)
else:
self.__emit(
SIGNAL_CONFIG["signal_patterns"][signal_group][signal_name],
SIGNAL_CONFIG["signal_long_secs"],
SIGNAL_CONFIG["signal_short_secs"],
SIGNAL_CONFIG["signal_spacing_decs"]
)
def emit_on(self, signal_group):
self.__emit_with_config(signal_group, "on")
def emit_off(self, signal_group):
self.__emit_with_config(signal_group, "off")
| {
"content_hash": "61cccaaacd1013486946065e3980c013",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 26.318840579710145,
"alnum_prop": 0.6029735682819384,
"repo_name": "initialxy/initialxy-irswitch",
"id": "51ae4f277a96f3b5143fb45781706d048edbc48d",
"size": "1816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/iremitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5251"
},
{
"name": "HTML",
"bytes": "370"
},
{
"name": "JavaScript",
"bytes": "13716"
},
{
"name": "Python",
"bytes": "3557"
}
],
"symlink_target": ""
} |
from string import maketrans
ori = 'Of zit kggd zitkt qkt ygxk ortfzoeqs wqlatzwqssl qfr zvg ortfzoeqs yggzwqssl. Fgv oy ngx vqfz zg hxz zitd of gft soft, piv dgfn lgsxzogfl qkt zitkt? Zohl: hstqlt eiqfut zit ygkd gy zit fxdwtk ngx utz. Zit Hkgukqddtkl!'
def ali_decode(letter):
t=maketrans("qwertyuiopasdfghjklzxcvbnm","abcdefghijklmnopqrstuvwxyz")
return letter.translate(t)
print(ali_decode(ori)) | {
"content_hash": "ce7933ea7ded1aee84b6f002f8f901b6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 225,
"avg_line_length": 45.55555555555556,
"alnum_prop": 0.7804878048780488,
"repo_name": "Akagi201/learning-python",
"id": "6d61db0d1c17b50c17bf0096ceb86c0ae0811343",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puzzle/aliyun20151111.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys
import os.path
import numpy as np
# Must be one line or PyPI will cut it off
DESC = ("Compute perceptual similarity between sRGB colors according to the "
"CAM02-UCS formula given by Luo et al (2006)")
LONG_DESC = open("README.rst").read()
# defines __version__
exec(open("pycam02ucs/version.py").read())
setup(
name="pycam02ucs",
version=__version__,
description=DESC,
long_description=LONG_DESC,
author="Nathaniel J. Smith",
author_email="njs@pobox.com",
url="https://github.com/njsmith/pycam02ucs",
license="MIT",
classifiers =
[ "Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
install_requires=["numpy"],
package_data={'pycam02ucs': ['cm/examples/*']},
)
| {
"content_hash": "70927f340ed3ed7c66622549d562850e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.6426512968299711,
"repo_name": "njsmith/pycam02ucs",
"id": "77b7ce6639045c2c1dae10a89cca3f799aa9ed6e",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134823"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import sys
import hist
import numpy
from scipy.io import loadmat
def main(session_path, scores_path, output_path, title):
"""Make scatter plot of MATLAB vs Java neighborhood scores.
"""
root = loadmat(session_path, struct_as_record=False, squeeze_me=True)
layout = root['layout']
label_indexes = {}
for index, label in zip(range(len(layout.label)), layout.label):
label = label.replace(''', "'")
label_indexes[label] = index
x = []
y = []
with open(scores_path) as scores_file:
header = scores_file.readline()
for line in scores_file:
parts = line.split('\t')
label = parts[0]
node_index = label_indexes[label]
for attribute_index, value in zip(range(len(parts) - 1), map(float, parts[1:])):
y.append(round(value, 3))
x.append(round(layout.opacity[node_index][attribute_index], 3))
plot(output_path, x, y)
d = numpy.array(x) - y
print(numpy.nanstd(d))
def plot(output_path, x, y):
# the histogram of the data
plt.scatter(x, y, alpha=0.1, linewidth=0)
plt.xlim(xmin=0, xmax=1)
plt.ylim(ymin=0, ymax=1)
plt.xlabel('MATLAB')
plt.ylabel('Java')
plt.title('Neighborhood Scores - %s' % title)
plt.grid(True)
plt.savefig(output_path)
if __name__ == '__main__':
session_path = sys.argv[1]
scores_path = sys.argv[2]
title = sys.argv[3]
output_path = sys.argv[4]
main(session_path, scores_path, output_path, title)
| {
"content_hash": "8ce16cb189869ed9c229babac0d8d74e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 92,
"avg_line_length": 28.527272727272727,
"alnum_prop": 0.6061185468451242,
"repo_name": "baryshnikova-lab/safe-java",
"id": "1b2891d873a0ea552f6b2f237b2a9e55de41639b",
"size": "1593",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "util/neighborhood_corr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "396120"
},
{
"name": "Python",
"bytes": "8187"
}
],
"symlink_target": ""
} |
import os
import requests
from eve import Eve
from flask import render_template
from flask import redirect
from flask import request
from flask import url_for
from geopy import geocoders
import googlemaps
from polyline import decode_line
import json
from wtforms import Form
from wtforms import TextField
from wtforms import validators
from settings import GOOGLE_API_KEY
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
static_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve(template_folder=tmpl_dir, static_folder=static_dir)
@app.route('/s', methods=['GET', 'POST'])
def slash():
if request.method == 'GET':
form = NumberAddressForm()
return render_template('slash.html', form=form)
elif request.method == 'POST':
form = NumberAddressForm(request.form)
if form.validate():
phone = ''.join(e for e in form.phone.data if e.isalnum())
headers = {'Content-Type': 'application/json'}
phone_endpoint = 'http://%s:%s/%s/' % (host, port, "phones")
response = requests.get(phone_endpoint + phone, headers=headers)
if response.status_code != 200:
gmaps = googlemaps.Client(key=GOOGLE_API_KEY)
directions = gmaps.directions(form.home_address.data, form.work_address.data, mode="driving")
g = geocoders.GoogleV3(GOOGLE_API_KEY)
home_location_geo = g.geocode(form.home_address.data)
work_location_geo = g.geocode(form.work_address.data)
data = form.data
data['is_verified'] = False
data["work_address_geo"] = {'type': 'Point', 'coordinates': [work_location_geo.longitude, work_location_geo.latitude]}
data["home_address_geo"] = {'type': 'Point', 'coordinates': [home_location_geo.longitude, home_location_geo.latitude]}
response = requests.post(phone_endpoint, json.dumps(data), headers=headers)
decoded = decode_line(directions[0]["overview_polyline"]["points"])
stuffs = []
for decode in decoded:
stuffs.append({'phone': phone, 'location': {'type': 'Point', 'coordinates': [decode[1], decode[0]]}})
route_endpoint = 'http://%s:%s/%s/' % (host, port, "routes")
response = requests.post(route_endpoint, json.dumps(stuffs), headers=headers)
return render_template('success.html')
else:
return render_template('already_registered.html')
return render_template('slash.html', form=form)
@app.route('/c/<objectid>', methods=['GET'])
def companies_get(objectid):
headers = {'Content-Type': 'application/json'}
response = requests.get(endpoint('companies') + objectid, headers=headers)
return render_template('company.html', company=response.json())
@app.route('/c', methods=['GET', 'POST'])
def companies_add():
if request.method == 'GET':
form = CompanyForm()
return render_template('company_add.html', form=form)
elif request.method == 'POST':
form = CompanyForm(request.form)
if form.validate():
headers = {'Content-Type': 'application/json'}
response = requests.post(endpoint('companies'), json.dumps(form.data), headers=headers)
if response.status_code == 201:
return redirect(url_for('companies_list'))
return render_template('company_add.html', form=form)
@app.route('/cs', methods=['GET'])
def companies_list():
headers = {'Content-Type': 'application/json'}
response = requests.get(endpoint("companies"), headers=headers)
return render_template('company_list.html', companies=response.json()["_items"])
def endpoint(resource):
return 'http://%s:%s/%s/' % (host, port, resource)
class NumberAddressForm(Form):
phone = TextField(u'Phone Number', [validators.required(), validators.length(max=11)])
home_address = TextField(u'Home Address', [validators.required()])
work_address = TextField(u'Work Address', [validators.required()])
class CompanyForm(Form):
name = TextField(u'Company Name', [validators.required()])
if __name__ == '__main__':
app.run(host=host, port=port, threaded=True)
| {
"content_hash": "b8b9b3e31edcd20319e8425bccc107d5",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 134,
"avg_line_length": 41.61682242990654,
"alnum_prop": 0.6332809342016618,
"repo_name": "ryankanno/hitraffic-alert",
"id": "19ce12deaec064b8c92e1e8e1748b12888babe15",
"size": "4478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124"
},
{
"name": "HTML",
"bytes": "5789"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "9774"
}
],
"symlink_target": ""
} |
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import cogroupbykey
def check_plants(actual):
expected = '''[START plants]
('Apple', {'icons': ['๐', '๐'], 'durations': ['perennial']})
('Carrot', {'icons': [], 'durations': ['biennial']})
('Tomato', {'icons': ['๐
'], 'durations': ['perennial', 'annual']})
('Eggplant', {'icons': ['๐'], 'durations': []})
[END plants]'''.splitlines()[1:-1]
# Make it deterministic by sorting all sublists in each element.
def normalize_element(elem):
name, details = elem
details['icons'] = sorted(details['icons'])
details['durations'] = sorted(details['durations'])
return name, details
assert_matches_stdout(actual, expected, normalize_element)
@mock.patch('apache_beam.Pipeline', TestPipeline)
@mock.patch(
'apache_beam.examples.snippets.transforms.aggregation.cogroupbykey.print',
str)
class CoGroupByKeyTest(unittest.TestCase):
def test_cogroupbykey(self):
cogroupbykey.cogroupbykey(check_plants)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "adc266de0c5af3f17fb271a4e979c3d8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 29.28205128205128,
"alnum_prop": 0.6873905429071804,
"repo_name": "apache/beam",
"id": "ad4ed99a6e2b84d1d1d4d38918f32b6373401681",
"size": "1975",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/snippets/transforms/aggregation/cogroupbykey_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "70760"
},
{
"name": "Dart",
"bytes": "912687"
},
{
"name": "Dockerfile",
"bytes": "59805"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "5508697"
},
{
"name": "Groovy",
"bytes": "936956"
},
{
"name": "HCL",
"bytes": "103872"
},
{
"name": "HTML",
"bytes": "184151"
},
{
"name": "Java",
"bytes": "41223435"
},
{
"name": "JavaScript",
"bytes": "119576"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "220768"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "10728612"
},
{
"name": "Rust",
"bytes": "5168"
},
{
"name": "SCSS",
"bytes": "318364"
},
{
"name": "Sass",
"bytes": "25954"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "375834"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "1997829"
}
],
"symlink_target": ""
} |
from mhvdb2.models import Entity
from peewee import DoesNotExist
import re
def get(entity_id):
try:
return Entity.get(Entity.id == entity_id)
except DoesNotExist:
return None
def validate(name, email, phone):
errors = []
if not name:
errors.append("Sorry, you need to provide a name.")
if email and not re.match("[^@\s]+@[^@\s]+", email):
errors.append("Sorry, that isn't a valid email address.")
return errors
def create(name, email, phone):
entity = Entity()
entity.is_member = False
entity.name = name
entity.email = email
entity.phone = phone
entity.is_keyholder = False
entity.save()
return entity.id
# Update entity
def update(entity_id, name, email, phone):
entity = Entity.get(Entity.id == entity_id)
entity.name = name
entity.email = email
entity.phone = phone
entity.save()
| {
"content_hash": "ba186fd8f3af5edc06d076a5ccc7d863",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 65,
"avg_line_length": 22.525,
"alnum_prop": 0.6392896781354052,
"repo_name": "makehackvoid/mhvdb2",
"id": "6008a4f47cd1cf6c02540386b644e5dfd8283d39",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mhvdb2/resources/entities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "535"
},
{
"name": "Dockerfile",
"bytes": "1347"
},
{
"name": "HTML",
"bytes": "18390"
},
{
"name": "Makefile",
"bytes": "297"
},
{
"name": "Python",
"bytes": "42057"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="sunburst.insidetextfont", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "f5475a6fee5a6b1ae4e863736049e0e5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 33.07692307692308,
"alnum_prop": 0.6162790697674418,
"repo_name": "plotly/plotly.py",
"id": "429179e06fd54e306f10b4edc426b791b8a822ca",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/insidetextfont/_familysrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Central place for analysis environment setup
and standard data file locations."""
import os
from os.path import join
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
#from enrico.extern.odict import OrderedDict
#Submission farm name
#Currently supported : LAPP-Annecy, MPIK-HD, CCIN2P3
FARM = os.environ.get('FARM','MPIK')
# Directory names
ENRICO_DIR = os.environ.get('ENRICO_DIR', '')
FERMI_DIR = os.environ.get('FERMI_DIR', '')
FERMI_DATA_DIR = os.environ.get('FERMI_DATA_DIR', '')
CATALOG_DIR = os.environ.get('FERMI_CATALOG_DIR', '')
CATALOG_TEMPLATE_DIR = ''
DIFFUSE_DIR = os.environ.get('FERMI_DIFFUSE_DIR', '')
DOWNLOAD_DIR = os.environ.get('FERMI_DOWNLOAD_DIR', '')
WEEKLY_DIR = ''
if DOWNLOAD_DIR :
WEEKLY_DIR = join(DOWNLOAD_DIR, 'weekly/photon')
PREPROCESSED_DIR = os.environ.get('FERMI_PREPROCESSED_DIR', '')
CONFIG_DIR = join(os.path.dirname(__file__), 'config')
try :
from enrico.extern.odict import OrderedDict
DIRS = OrderedDict(FERMI_DATA_DIR=FERMI_DATA_DIR,
FERMI_DIR=FERMI_DIR,
CATALOG_DIR=CATALOG_DIR,
DIFFUSE_DIR=DIFFUSE_DIR,
PREPROCESSED_DIR=PREPROCESSED_DIR,
DOWNLOAD_DIR=DOWNLOAD_DIR,
WEEKLY_DIR=WEEKLY_DIR,
CONFIG_DIR=CONFIG_DIR,
ENRICO_DIR=ENRICO_DIR)
except :
DIRS = {}
# File names
CATALOG_VERSION = '16'
TEMPLATE_VERSION = '15'
if CATALOG_DIR :
CATALOG_TEMPLATE_DIR = join(CATALOG_DIR, 'Extended_archive_v%s/Templates'% TEMPLATE_VERSION)
try :
os.mkdir(join(CATALOG_DIR, 'Extended_archive_v%s'% TEMPLATE_VERSION))
except:
pass
CATALOG = 'gll_psc_v%s.fit' % CATALOG_VERSION
DIFFUSE_GAL = 'gll_iem_v06.fits'
DIFFUSE_ISO_SOURCE = 'iso_P8R2_SOURCE_V6_v06.txt'
DIFFUSE_ISO_CLEAN = 'iso_P8R2_CLEAN_V6_v06.txt'
SPACECRAFT = 'lat_spacecraft_merged.fits'
def check_command_line_tools():
"""Check command line tool availability"""
from subprocess import Popen, PIPE
print('*** COMMAND LINE TOOLS ***')
for tool in ['python', 'ipython', 'gtlike', 'enrico_setupcheck']:
location = Popen(['which', tool],
stdout=PIPE).communicate()[0]
print('{0:.<20} {1}'.format(tool, location.strip() or 'MISSING'))
def check_python_modules():
"""Check python package availability"""
# @todo: Use this fast method to check for python module availability:
# http://stackoverflow.com/questions/2617704/
# checking-for-module-availability-programmatically-in-python
ASTRO_PACKAGES = ['pyfits', 'kapteyn']
# @todo: Here it's enough to try one of the Fermi python modules
# and to show where they are located.
FERMI_PACKAGES = ['gt_apps', 'UnbinnedAnalysis','BinnedAnalysis']
PACKAGES = ['enrico', 'IPython'] + ASTRO_PACKAGES + FERMI_PACKAGES
print('*** PYTHON PACKAGES ***')
for package in PACKAGES:
try:
exec('import %s' % package)
filename = eval('%s.__file__' % package)
print('{0:.<20} {1}'.format(package, filename))
except ImportError:
print('{0:.<20} {1}'.format(package, 'MISSING'))
def print_farm():
"""Print the name of the submission farm"""
print('*** FARM ***')
if FARM=='':
print('{0:.<20} {1}'.format("FARM", 'MISSING'))
else:
print('{0:.<20} {1}'.format("FARM", FARM))
| {
"content_hash": "377c9eac835db00bd66418859523f461",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 94,
"avg_line_length": 34.765306122448976,
"alnum_prop": 0.6369239800410919,
"repo_name": "dailytimes/enrico",
"id": "3c70bf1a97de93f83814df84601725b5169bca90",
"size": "3407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enrico/environ.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "256163"
},
{
"name": "Shell",
"bytes": "5773"
}
],
"symlink_target": ""
} |
"""
profiling.remote
~~~~~~~~~~~~~~~~
Utilities for remote profiling. They help you to implement profiling
server and client.
"""
from __future__ import absolute_import
from errno import EBADF, EPIPE, ECONNRESET
import functools
import io
from logging import getLogger as get_logger
try:
import cPickle as pickle
except ImportError:
import pickle
import socket
import struct
import sys
from .. import __version__
from ..utils import frame_stack
__all__ = ['LOGGER', 'LOG', 'INTERVAL', 'PICKLE_PROTOCOL',
'SIZE_STRUCT_FORMAT', 'pack_result', 'recv_msg', 'fmt_connected',
'fmt_disconnected', 'fmt_profiler_started', 'fmt_profiler_stopped',
'ProfilingServer']
#: The standard logger.
LOGGER = get_logger('Profiling')
#: The standard log function.
LOG = LOGGER.debug
#: The default profiling interval.
INTERVAL = 1
#: The default Pickle protocol.
PICKLE_PROTOCOL = getattr(pickle, 'DEFAULT_PROTOCOL', pickle.HIGHEST_PROTOCOL)
#: The struct format to pack message size. (uint32)
SIZE_STRUCT_FORMAT = '!I'
#: The struct format to pack method. (uint8)
METHOD_STRUCT_FORMAT = '!B'
# methods
WELCOME = 0x10
PROFILER = 0x11
RESULT = 0x12
def pack_msg(method, msg, pickle_protocol=PICKLE_PROTOCOL):
"""Packs a method and message."""
dump = io.BytesIO()
pickle.dump(msg, dump, pickle_protocol)
size = dump.tell()
return (struct.pack(METHOD_STRUCT_FORMAT, method) +
struct.pack(SIZE_STRUCT_FORMAT, size) + dump.getvalue())
def recv(sock, size):
"""Receives exactly `size` bytes. This function blocks the thread."""
data = sock.recv(size, socket.MSG_WAITALL)
if len(data) < size:
raise socket.error(ECONNRESET, 'Connection closed')
return data
def recv_msg(sock):
"""Receives a method and message from the socket. This function blocks the
current thread.
"""
data = recv(sock, struct.calcsize(METHOD_STRUCT_FORMAT))
method, = struct.unpack(METHOD_STRUCT_FORMAT, data)
data = recv(sock, struct.calcsize(SIZE_STRUCT_FORMAT))
size, = struct.unpack(SIZE_STRUCT_FORMAT, data)
data = recv(sock, size)
msg = pickle.loads(data)
return method, msg
def fmt_connected(addr, num_clients):
if addr:
fmt = 'Connected from {0[0]}:{0[1]} (total: {1})'
else:
fmt = 'A client connected (total: {1})'
return fmt.format(addr, num_clients)
def fmt_disconnected(addr, num_clients):
if addr:
fmt = 'Disconnected from {0[0]}:{0[1]} (total: {1})'
else:
fmt = 'A client disconnected (total: {1})'
return fmt.format(addr, num_clients)
def fmt_profiler_started(interval):
return 'Profiling every {0} seconds...'.format(interval)
def fmt_profiler_stopped():
return 'Profiler stopped'
def abstract(message):
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
raise NotImplementedError(message)
return wrapped
return decorator
class ProfilingServer(object):
"""The base class for profiling server implementations. Implement abstract
methods and call :meth:`connected` when a client connected.
"""
_latest_result_data = None
def __init__(self, profiler, interval=INTERVAL,
log=LOG, pickle_protocol=PICKLE_PROTOCOL):
self.profiler = profiler
self.interval = interval
self.log = log
self.pickle_protocol = pickle_protocol
self.clients = set()
@abstract('Implement serve_forever() to run a server synchronously.')
def serve_forever(self):
pass
@abstract('Implement _send() to send data to the client.')
def _send(self, client, data):
pass
@abstract('Implement _close() to close the client.')
def _close(self, client):
pass
@abstract('Implement _addr() to get the address from the client.')
def _addr(self, client):
pass
@abstract('Implement _start_profiling() to start a profiling loop.')
def _start_profiling(self):
pass
@abstract('Implement _start_watching() to add a disconnection callback to '
'the client')
def _start_watching(self, client):
pass
def profiling(self):
"""A generator which profiles then broadcasts the result. Implement
sleeping loop using this::
def profile_periodically(self):
for __ in self.profiling():
time.sleep(self.interval)
"""
self._log_profiler_started()
# to exclude statistics of profiler server thread.
excluding_code = frame_stack(sys._getframe())[0].f_code
while self.clients:
self.profiler.start()
# should sleep
yield
self.profiler.stop()
self.profiler.exclude_code(excluding_code)
result = self.profiler.result()
data = pack_msg(RESULT, result,
pickle_protocol=self.pickle_protocol)
self._latest_result_data = data
# broadcast
closed_clients = []
for client in self.clients:
try:
self._send(client, data)
except socket.error as err:
if err.errno == EPIPE:
closed_clients.append(client)
continue
pass
del data
# handle disconnections.
for client in closed_clients:
self.disconnected(client)
self._log_profiler_stopped()
def send_msg(self, client, method, msg, pickle_protocol=None):
if pickle_protocol is None:
pickle_protocol = self.pickle_protocol
data = pack_msg(method, msg, pickle_protocol=pickle_protocol)
self._send(client, data)
def connected(self, client):
"""Call this method when a client connected."""
self.clients.add(client)
self._log_connected(client)
self._start_watching(client)
self.send_msg(client, WELCOME, (self.pickle_protocol, __version__),
pickle_protocol=0)
profiler = self.profiler
while True:
try:
profiler = profiler.profiler
except AttributeError:
break
self.send_msg(client, PROFILER, type(profiler))
if self._latest_result_data is not None:
try:
self._send(client, self._latest_result_data)
except socket.error as err:
if err.errno in (EBADF, EPIPE):
self.disconnected(client)
return
raise
if len(self.clients) == 1:
self._start_profiling()
def disconnected(self, client):
"""Call this method when a client disconnected."""
if client not in self.clients:
# already disconnected.
return
self.clients.remove(client)
self._log_disconnected(client)
self._close(client)
def _log_connected(self, client):
addr = self._addr(client)
addr = addr if isinstance(addr, tuple) else None
self.log(fmt_connected(addr, len(self.clients)))
def _log_disconnected(self, client):
addr = self._addr(client)
addr = addr if isinstance(addr, tuple) else None
self.log(fmt_disconnected(addr, len(self.clients)))
def _log_profiler_started(self):
self.log(fmt_profiler_started(self.interval))
def _log_profiler_stopped(self):
self.log(fmt_profiler_stopped())
| {
"content_hash": "ff1b3aebb91a6be04c48959bcd2ba6d2",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 30.206349206349206,
"alnum_prop": 0.6078560168155543,
"repo_name": "sublee/profiling",
"id": "3b32f8c947a205a105b2c976bffa307dda455480",
"size": "7636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiling/remote/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1705"
},
{
"name": "Python",
"bytes": "118175"
}
],
"symlink_target": ""
} |
"""Test cases for anyconfig.parser.parse_list.
"""
import anyconfig.parser as TT
from . import common
class TestCase(common.TestCase):
kind = 'list'
pattern = '*.*'
def test_parse_list(self):
for data in self.each_data():
self.assertEqual(
TT.parse_list(data.inp, **data.opts),
data.exp,
data
)
# vim:sw=4:ts=4:et:
| {
"content_hash": "8c91b763494600917f73b9eb17423575",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 53,
"avg_line_length": 20.6,
"alnum_prop": 0.5412621359223301,
"repo_name": "ssato/python-anyconfig",
"id": "a9313dce1fe2fb013745111cf74822f31a427c21",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "tests/parser/test_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "568"
},
{
"name": "Python",
"bytes": "348779"
},
{
"name": "Shell",
"bytes": "3456"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .apps import PollsConfig
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_questions'
def get_queryset(self):
return (
Question.objects
.filter(pub_date__lte=timezone.now())
.order_by('-pub_date')[:5]
)
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
# TODO why does this not work?
# app: PollsConfig = request.current_app
# app.app_object_bla += ' | one more vote cast'
# print("========== XXXXX srakator", app.app_object_bla, '===========================')
try:
chosen_answer_id = request.POST['choice']
selected_choice = question.choice_set.get(pk=chosen_answer_id)
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice."
})
else:
selected_choice.votes += 1
selected_choice.save()
redirect_url = reverse('polls:results', args=(question.id,))
return HttpResponseRedirect(redirect_url)
| {
"content_hash": "8bda21678d400fc585b2c058e7083505",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 91,
"avg_line_length": 30.535714285714285,
"alnum_prop": 0.6479532163742691,
"repo_name": "butla/experiments",
"id": "2f684a8e618f700dad65ba5fc986c13381322661",
"size": "1710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/django_sample/polls/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18716"
},
{
"name": "CSS",
"bytes": "848"
},
{
"name": "Dockerfile",
"bytes": "1065"
},
{
"name": "Go",
"bytes": "245"
},
{
"name": "HTML",
"bytes": "359777"
},
{
"name": "Java",
"bytes": "2439"
},
{
"name": "JavaScript",
"bytes": "22263"
},
{
"name": "Jupyter Notebook",
"bytes": "487037"
},
{
"name": "Lua",
"bytes": "684"
},
{
"name": "Makefile",
"bytes": "4177"
},
{
"name": "PLpgSQL",
"bytes": "4461"
},
{
"name": "Python",
"bytes": "193267"
},
{
"name": "Rust",
"bytes": "1075"
},
{
"name": "SCSS",
"bytes": "102440"
},
{
"name": "Scala",
"bytes": "4797"
},
{
"name": "Shell",
"bytes": "6079"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.