text stringlengths 4 1.02M | meta dict |
|---|---|
import random
class Hand(object):
def __init__(self, n):
'''
Initialize a Hand.
n: integer, the size of the hand.
'''
assert type(n) == int
self.HAND_SIZE = n
self.VOWELS = 'aeiou'
self.CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
# Deal a new hand
self.dealNewHand()
def dealNewHand(self):
'''
Deals a new hand, and sets the hand attribute to the new hand.
'''
# Set self.hand to a new, empty dictionary
self.hand = {}
# Build the hand
numVowels = self.HAND_SIZE / 3
for i in range(numVowels):
x = self.VOWELS[random.randrange(0,len(self.VOWELS))]
self.hand[x] = self.hand.get(x, 0) + 1
for i in range(numVowels, self.HAND_SIZE):
x = self.CONSONANTS[random.randrange(0,len(self.CONSONANTS))]
self.hand[x] = self.hand.get(x, 0) + 1
def setDummyHand(self, handString):
'''
Allows you to set a dummy hand. Useful for testing your implementation.
handString: A string of letters you wish to be in the hand. Length of this
string must be equal to self.HAND_SIZE.
This method converts sets the hand attribute to a dictionary
containing the letters of handString.
'''
assert len(handString) == self.HAND_SIZE, "Length of handString ({0}) must equal length of HAND_SIZE ({1})".format(len(handString), self.HAND_SIZE)
self.hand = {}
for char in handString:
self.hand[char] = self.hand.get(char, 0) + 1
def calculateLen(self):
'''
Calculate the length of the hand.
'''
ans = 0
for k in self.hand:
ans += self.hand[k]
return ans
def __str__(self):
'''
Display a string representation of the hand.
'''
output = ''
hand_keys = self.hand.keys()
hand_keys.sort()
for letter in hand_keys:
for j in range(self.hand[letter]):
output += letter
return output
def update(self, word):
"""
Does not assume that self.hand has all the letters in word.
Updates the hand: if self.hand does have all the letters to make
the word, modifies self.hand by using up the letters in the given word.
Returns True if the word was able to be made with the letter in
the hand; False otherwise.
word: string
returns: Boolean (if the word was or was not made)
"""
hand = self.hand.copy()
is_formed = True
for c in word:
if c in hand.keys() and hand.get(c) > 0:
hand[c] = hand.get(c, 0) - 1
else:
is_formed = False
break
if is_formed:
self.hand = hand
return is_formed
myHand = Hand(7)
print myHand
print myHand.calculateLen()
myHand.setDummyHand('aazzmsp')
print myHand
print myHand.calculateLen()
print myHand.update('za')
print myHand
myHand = Hand(11)
myHand.setDummyHand('nnandocoxjx')
myHand.update('daikon')
print myHand
myHand = Hand(10)
myHand.setDummyHand('alaleyicsi')
myHand.update('apple')
print myHand | {
"content_hash": "5cc06d3f831ec7f7ee355e99d2886cce",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 155,
"avg_line_length": 27.794871794871796,
"alnum_prop": 0.5738007380073801,
"repo_name": "spradeepv/dive-into-python",
"id": "5cd0793069496d22c6186d0ef3771faae6855241",
"size": "3252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edx/lecture-12/l12_hand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "530165"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import QuerySet, Q
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.six import iteritems, integer_types
from django.utils.translation import ugettext_lazy as _
from jsonfield.fields import JSONField
class LogEntryManager(models.Manager):
"""
Custom manager for the :py:class:`LogEntry` model.
"""
def log_create(self, instance, **kwargs):
"""
Helper method to create a new log entry. This method automatically populates some fields when no explicit value
is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogEntry` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogEntry
"""
changes = kwargs.get('changes', None)
pk = self._get_pk_value(instance)
if changes is not None:
kwargs.setdefault('content_type', ContentType.objects.get_for_model(instance))
kwargs.setdefault('object_pk', pk)
kwargs.setdefault('object_repr', smart_text(instance))
if isinstance(pk, integer_types):
kwargs.setdefault('object_id', pk)
get_additional_data = getattr(instance, 'get_additional_data', None)
if callable(get_additional_data):
kwargs.setdefault('additional_data', get_additional_data())
# Delete log entries with the same pk as a newly created model. This should only be necessary when an pk is
# used twice.
if kwargs.get('action', None) is LogEntry.Action.CREATE:
if kwargs.get('object_id', None) is not None and self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).exists():
self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).delete()
else:
self.filter(content_type=kwargs.get('content_type'), object_pk=kwargs.get('object_pk', '')).delete()
return self.create(**kwargs)
return None
def get_for_object(self, instance):
"""
Get log entries for the specified model instance.
:param instance: The model instance to get log entries for.
:type instance: Model
:return: QuerySet of log entries for the given model instance.
:rtype: QuerySet
"""
# Return empty queryset if the given model instance is not a model instance.
if not isinstance(instance, models.Model):
return self.none()
content_type = ContentType.objects.get_for_model(instance.__class__)
pk = self._get_pk_value(instance)
if isinstance(pk, integer_types):
return self.filter(content_type=content_type, object_id=pk)
else:
return self.filter(content_type=content_type, object_pk=pk)
def get_for_objects(self, queryset):
"""
Get log entries for the objects in the specified queryset.
:param queryset: The queryset to get the log entries for.
:type queryset: QuerySet
:return: The LogEntry objects for the objects in the given queryset.
:rtype: QuerySet
"""
if not isinstance(queryset, QuerySet) or queryset.count() == 0:
return self.none()
content_type = ContentType.objects.get_for_model(queryset.model)
primary_keys = queryset.values_list(queryset.model._meta.pk.name, flat=True)
if isinstance(primary_keys[0], integer_types):
return self.filter(content_type=content_type).filter(Q(object_id__in=primary_keys)).distinct()
else:
return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct()
def get_for_model(self, model):
"""
Get log entries for all objects of a specified type.
:param model: The model to get log entries for.
:type model: class
:return: QuerySet of log entries for the given model.
:rtype: QuerySet
"""
# Return empty queryset if the given object is not valid.
if not issubclass(model, models.Model):
return self.none()
content_type = ContentType.objects.get_for_model(model)
return self.filter(content_type=content_type)
def _get_pk_value(self, instance):
"""
Get the primary key field value for a model instance.
:param instance: The model instance to get the primary key for.
:type instance: Model
:return: The primary key value of the given model instance.
"""
pk_field = instance._meta.pk.name
pk = getattr(instance, pk_field, None)
# Check to make sure that we got an pk not a model object.
if isinstance(pk, models.Model):
pk = self._get_pk_value(pk)
return pk
@python_2_unicode_compatible
class LogEntry(models.Model):
"""
Represents an entry in the audit log. The content type is saved along with the textual and numeric (if available)
primary key, as well as the textual representation of the object when it was saved. It holds the action performed
and the fields that were changed in the transaction.
If AuditlogMiddleware is used, the actor will be set automatically. Keep in mind that editing / re-saving LogEntry
instances may set the actor to a wrong value - editing LogEntry instances is not recommended (and it should not be
necessary).
"""
class Action:
"""
The actions that Auditlog distinguishes: creating, updating and deleting objects. Viewing objects is not logged.
The values of the actions are numeric, a higher integer value means a more intrusive action. This may be useful
in some cases when comparing actions because the ``__lt``, ``__lte``, ``__gt``, ``__gte`` lookup filters can be
used in queries.
The valid actions are :py:attr:`Action.CREATE`, :py:attr:`Action.UPDATE` and :py:attr:`Action.DELETE`.
"""
CREATE = 0
UPDATE = 1
DELETE = 2
choices = (
(CREATE, _("create")),
(UPDATE, _("update")),
(DELETE, _("delete")),
)
content_type = models.ForeignKey('contenttypes.ContentType', on_delete=models.CASCADE, related_name='+', verbose_name=_("content type"))
object_pk = models.CharField(db_index=True, max_length=255, verbose_name=_("object pk"))
object_id = models.BigIntegerField(blank=True, db_index=True, null=True, verbose_name=_("object id"))
object_repr = models.TextField(verbose_name=_("object representation"))
action = models.PositiveSmallIntegerField(choices=Action.choices, verbose_name=_("action"))
changes = models.TextField(blank=True, verbose_name=_("change message"))
actor = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL, related_name='+', verbose_name=_("actor"))
remote_addr = models.GenericIPAddressField(blank=True, null=True, verbose_name=_("remote address"))
timestamp = models.DateTimeField(auto_now_add=True, verbose_name=_("timestamp"))
additional_data = JSONField(blank=True, null=True, verbose_name=_("additional data"))
objects = LogEntryManager()
class Meta:
get_latest_by = 'timestamp'
ordering = ['-timestamp']
verbose_name = _("log entry")
verbose_name_plural = _("log entries")
def __str__(self):
if self.action == self.Action.CREATE:
fstring = _("Created {repr:s}")
elif self.action == self.Action.UPDATE:
fstring = _("Updated {repr:s}")
elif self.action == self.Action.DELETE:
fstring = _("Deleted {repr:s}")
else:
fstring = _("Logged {repr:s}")
return fstring.format(repr=self.object_repr)
@property
def changes_dict(self):
"""
:return: The changes recorded in this log entry as a dictionary object.
"""
try:
return json.loads(self.changes)
except ValueError:
return {}
@property
def changes_str(self, colon=': ', arrow=smart_text(' \u2192 '), separator='; '):
"""
Return the changes recorded in this log entry as a string. The formatting of the string can be customized by
setting alternate values for colon, arrow and separator. If the formatting is still not satisfying, please use
:py:func:`LogEntry.changes_dict` and format the string yourself.
:param colon: The string to place between the field name and the values.
:param arrow: The string to place between each old and new value.
:param separator: The string to place between each field.
:return: A readable string of the changes in this log entry.
"""
substrings = []
for field, values in iteritems(self.changes_dict):
substring = smart_text('{field_name:s}{colon:s}{old:s}{arrow:s}{new:s}').format(
field_name=field,
colon=colon,
old=values[0],
arrow=arrow,
new=values[1],
)
substrings.append(substring)
return separator.join(substrings)
class AuditlogHistoryField(GenericRelation):
"""
A subclass of py:class:`django.contrib.contenttypes.fields.GenericRelation` that sets some default variables. This
makes it easier to access Auditlog's log entries, for example in templates.
By default this field will assume that your primary keys are numeric, simply because this is the most common case.
However, if you have a non-integer primary key, you can simply pass ``pk_indexable=False`` to the constructor, and
Auditlog will fall back to using a non-indexed text based field for this model.
Using this field will not automatically register the model for automatic logging. This is done so you can be more
flexible with how you use this field.
:param pk_indexable: Whether the primary key for this model is not an :py:class:`int` or :py:class:`long`.
:type pk_indexable: bool
"""
def __init__(self, pk_indexable=True, **kwargs):
kwargs['to'] = LogEntry
if pk_indexable:
kwargs['object_id_field'] = 'object_id'
else:
kwargs['object_id_field'] = 'object_pk'
kwargs['content_type_field'] = 'content_type'
super(AuditlogHistoryField, self).__init__(**kwargs)
# South compatibility for AuditlogHistoryField
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^auditlog\.models\.AuditlogHistoryField"])
raise DeprecationWarning("South support will be dropped in django-auditlog 0.4.0 or later.")
except ImportError:
pass
| {
"content_hash": "ebf18616a6ccf783e8eca942aa9e29be",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 162,
"avg_line_length": 41.9812030075188,
"alnum_prop": 0.6479806572938122,
"repo_name": "chris-griffin/django-auditlog",
"id": "2ba3947adfd4b2fab8d4056da412ca1cd1acb12c",
"size": "11167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/auditlog/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51889"
}
],
"symlink_target": ""
} |
"""Build PO gettext files for server apps (html and py).
"""
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.management import call_command
class Command(BaseCommand):
help = "Build PO gettext files for server apps (html and py)."
def add_arguments(self, parser):
parser.add_argument('app_name', nargs='*', type=str)
def handle(self, *args, **options):
base_path = os.path.join(settings.BASE_DIR)
os.chdir(base_path)
for ldir in os.listdir():
if os.path.isdir(ldir):
if len(options['app_name']) > 0 and ldir not in options['app_name']:
continue
if os.path.exists(os.path.join(ldir, 'locale')):
os.chdir(ldir)
print("processing makemessages on %s for any locale %s" % (os.getcwd(), ldir))
call_command('makemessages', locale=['fr', 'en'], domain='django', verbosity=0)
os.chdir('..')
| {
"content_hash": "80e0f537ba31334a6a8f25b20c59c30a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 99,
"avg_line_length": 31.696969696969695,
"alnum_prop": 0.5879541108986616,
"repo_name": "coll-gate/collgate",
"id": "5e1a0b3553e0ce27cc688719edb5f46ac27db9ba",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/main/management/commands/make_messages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20334"
},
{
"name": "HTML",
"bytes": "245334"
},
{
"name": "JavaScript",
"bytes": "5131841"
},
{
"name": "Python",
"bytes": "1291968"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
} |
import copy
import numpy as np
try:
import torch
from torch.autograd import Variable
except Exception:
pass
from src.play.model.Move import Move
from src.play.model.Game import BLACK, WHITE
from .utils import network_input
from .model_zero import ConvNet
class ConvBot():
def __init__(self, logic: {'policy', 'value'}, verbose=False):
# Load Model
self.model = ConvNet(in_channels=4, conv_depth=5)
self.model.load_state_dict(torch.load(
'src/learn/conv/saved_nets/5depth_2.5m_tanh.pth',
map_location=lambda storage, loc: storage))
for param in self.model.parameters():
param.requires_grad = False
self.model.eval()
# Bot logic
self.logic = logic
self.verbose = verbose
@staticmethod
def generate_input(board, player_value):
X = network_input(board.reshape(1, 9, 9), np.array([player_value]))
X = Variable(torch.from_numpy(X.astype(float)).float(), volatile=True)
return X
def genmove(self, color, game) -> Move:
board = np.array(game.board)
my_value = WHITE if color == 'w' else BLACK
# enemy_value = BLACK if my_value == WHITE else WHITE
inp = self.generate_input(board, my_value)
if self.verbose:
print(inp)
policy, value = self.model(inp)
policy = policy.data.numpy().flatten()
value = value.data.numpy().flatten()
playable_locations = game.get_playable_locations(color)
# Default: passing
policy_move = value_move = Move(is_pass=True)
policy_move_prob = policy[81]
value_move_prob = value
for move in playable_locations:
if self.verbose:
print(move)
if move.is_pass:
continue
if self.logic == 'value':
# Play move on a test board
test_board = copy.deepcopy(game.board)
test_board.place_stone_and_capture_if_applicable_default_values(
move.to_matrix_location(), my_value)
# Evaluate state - attention: Enemy's turn!
# inp = self.generate_input(np.array(test_board), enemy_value)
# _, enemy_win_prob = self.model(inp)
# enemy_win_prob = enemy_win_prob.data.numpy().flatten()
# my_new_value = -enemy_win_prob
# Disregard that right now and just get my own win prob
inp = self.generate_input(np.array(test_board), my_value)
_, new_value = self.model(inp)
new_value = new_value.data.numpy().flatten()
if new_value > value_move_prob:
value_move = move
value_move_prob = new_value
if self.logic == 'policy':
if policy[move.to_flat_idx()] > policy_move_prob:
policy_move = move
policy_move_prob = policy[move.to_flat_idx()]
if self.logic == 'policy':
out_move = policy_move
if self.logic == 'value':
out_move = value_move
return out_move
class ConvBot_value(ConvBot):
def __init__(self):
super().__init__('value')
class ConvBot_policy(ConvBot):
def __init__(self):
super().__init__('policy')
| {
"content_hash": "f67b86bac3d4e9ef3fe95c428335d9ba",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 80,
"avg_line_length": 32.640776699029125,
"alnum_prop": 0.5621653777513385,
"repo_name": "nathbo/GO_DILab",
"id": "74ffaf81111b5b9a30eb775eadf1582b88ac2858",
"size": "3362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/learn/conv/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "496"
},
{
"name": "Python",
"bytes": "320762"
},
{
"name": "Shell",
"bytes": "2336"
}
],
"symlink_target": ""
} |
from email.parser import FeedParser
import os
import imp
import pkg_resources
import re
import sys
import shutil
import tempfile
import textwrap
import zipfile
from distutils.util import change_root
from pip.locations import (bin_py, running_under_virtualenv,PIP_DELETE_MARKER_FILENAME,
write_delete_marker_file)
from pip.exceptions import (InstallationError, UninstallationError,
BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.vcs import vcs
from pip.log import logger
from pip.util import (display_path, rmtree, ask, ask_path_exists, backup_dir,
is_installable_dir, is_local, dist_is_local,
dist_in_usersite, dist_in_site_packages, renames,
normalize_path, egg_link_path, make_path_relative,
call_subprocess, is_prerelease, normalize_name)
from pip.backwardcompat import (urlparse, urllib, uses_pycache,
ConfigParser, string_types, HTTPError,
get_python_version, b)
from pip.index import Link
from pip.locations import build_prefix
from pip.download import (PipSession, get_file_content, is_url, url_to_path,
path_to_url, is_archive_file,
unpack_vcs_link, is_vcs_url, is_file_url,
unpack_file_url, unpack_http_url)
import pip.wheel
from pip.wheel import move_wheel_files
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, prereleases=None,
editable_options=None, from_bundle=False):
self.extras = ()
if isinstance(req, string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.url = url
self.as_egg = as_egg
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
self._is_bundle = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.from_bundle = from_bundle
# True if pre-releases are acceptable
if prereleases:
self.prereleases = True
elif self.req is not None:
self.prereleases = any([is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs])
else:
self.prereleases = False
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None):
name, url, extras_override = parse_editable(editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
url=url,
editable_options=extras_override,
prereleases=True)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, prereleases=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
url = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif os.path.isdir(path) and (os.path.sep in name or name.startswith('.')):
if not is_installable_dir(path):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % name)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warn('Requirement %r looks like a filename, but the file does not exist', name)
link = Link(path_to_url(name))
# If the line has an egg= definition, but isn't editable, pull the requirement out.
# Otherwise, assume the name is the req for the non URL/path/archive case.
if link and req is None:
url = link.url_without_fragment
req = link.egg_fragment #when fragment is None, this will become an 'unnamed' requirement
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
else:
req = name
return cls(req, comes_from, url=url, prereleases=prereleases)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir, unpack=True):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s' % new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug('Moving package %s from %s to new location %s'
% (self, display_path(old_location), display_path(new_location)))
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return self.req.project_name
@property
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
@property
def setup_py(self):
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
return setup_py
def run_egg_info(self, force_root_egg_info=False):
assert self.source_dir
if self.name:
logger.notify('Running setup.py (path:%s) egg_info for package %s' % (self.setup_py, self.name))
else:
logger.notify('Running setup.py (path:%s) egg_info for package from %s' % (self.setup_py, self.url))
logger.indent += 2
try:
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir if
# no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a specifier
# (not archive path/urls)
# TODO: take this out later
if self.name == 'distribute' and not os.path.isdir(os.path.join(self.source_dir, 'setuptools')):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
egg_info_cmd = [sys.executable, '-c', script, 'egg_info']
# We can't put the .egg-info files at the root, because then the source code will be mistaken
# for an installed egg, causing problems
if self.editable or force_root_egg_info:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False,
command_level=logger.VERBOSE_DEBUG,
command_desc='python setup.py egg_info')
finally:
logger.indent -= 2
if not self.req:
self.req = pkg_resources.Requirement.parse(
"%(Name)s==%(Version)s" % self.pkg_info())
self.correct_build_location()
## FIXME: This is a lame hack, entirely for PasteScript which has
## a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
fp = open(filename, 'r')
data = fp.read()
fp.close()
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv environment
if (os.path.exists(os.path.join(root, dir, 'bin', 'python'))
or os.path.exists(os.path.join(root, dir, 'Scripts', 'Python.exe'))):
dirs.remove(dir)
# Also don't search through tests
if dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError('No files/directories in %s (from %s)' % (base, filename))
assert filenames, "No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This can
# easily be the case if there is a dist folder which contains an
# extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(key=lambda x: x.count(os.path.sep) +
(os.path.altsep and
x.count(os.path.altsep) or 0))
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO')))
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line.lower())
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
logger.debug('skipping extra %s' % in_extra)
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def absolute_versions(self):
for qualifier, version in self.req.specs:
if qualifier == '==':
yield version
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
version = self.installed_version
if version not in self.req:
logger.warn('Requested %s, but installing version %s' % (self, self.installed_version))
else:
logger.debug('Source in %s has version %s, which satisfies requirement %s'
% (display_path(self.source_dir), version, self))
def update_editable(self, obtain=True):
if not self.url:
logger.info("Cannot update repository at %s; repository location is unknown" % self.source_dir)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
dist_info_path = os.path.join(dist.location,
'-'.join(dist.egg_name().split('-')[:2])
) + '.dist-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
dist_info_exists = os.path.exists(dist_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
#FIXME: need a test for this elif block
#occurs with --single-version-externally-managed/--record outside of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif dist_info_exists:
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
paths_to_remove.add(os.path.join(bin_py, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
paths_to_remove.add(os.path.join(bin_py, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, name) + '.exe')
paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_py, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error("Can't rollback %s, nothing uninstalled."
% (self.project_name,))
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error("Can't commit %s, nothing uninstalled."
% (self.project_name,))
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.installed_version)
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warn('Backing up %s to %s'
% (display_path(archive_path), display_path(dest_file)))
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.indent -= 2
logger.notify('Saved %s' % display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
self.move_wheel_files(self.source_dir)
self.install_succeeded = True
return
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools;__file__=%r;"\
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py)
install_args += list(global_options) + ['install','--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if running_under_virtualenv():
## FIXME: I'm not sure if this is a reasonable location; probably not
## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
'python' + get_python_version())]
logger.notify('Running setup.py install for %s' % self.name)
logger.indent += 2
try:
call_subprocess(install_args + install_options,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False)
finally:
logger.indent -= 2
if not os.path.exists(record_filename):
logger.notify('Record file %s not found' % record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install command
# so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
f = open(record_filename)
for line in f:
line = line.strip()
if line.endswith('.egg-info'):
egg_info_dir = prepend_root(line)
break
else:
logger.warn('Could not find .egg-info directory in install record for %s' % self)
## FIXME: put the record somewhere
## FIXME: should this be an error?
return
f.close()
new_lines = []
f = open(record_filename)
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(make_path_relative(prepend_root(filename), egg_info_dir))
f.close()
f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w')
f.write('\n'.join(new_lines)+'\n')
f.close()
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.is_bundle or os.path.exists(self.delete_marker_filename):
logger.info('Removing source in %s' % self.source_dir)
if self.source_dir:
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.notify('Running setup.py develop for %s' % self.name)
logger.indent += 2
try:
## FIXME: should we do --install-headers here too?
call_subprocess(
[sys.executable, '-c',
"import setuptools; __file__=%r; exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py]
+ list(global_options) + ['develop', '--no-deps'] + list(install_options),
cwd=self.source_dir, filter_stdout=self._filter_install,
show_stdout=False)
finally:
logger.indent -= 2
self.install_succeeded = True
def _filter_install(self, line):
level = logger.NOTIFY
for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*',
r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = logger.INFO
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(self.req.project_name)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif running_under_virtualenv() and dist_in_site_packages(existing_dist):
raise InstallationError("Will not install to the user site because it will lack sys.path precedence to %s in %s"
%(existing_dist.project_name, existing_dist.location))
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
@property
def is_bundle(self):
if self._is_bundle is not None:
return self._is_bundle
base = self._temp_build_dir
if not base:
## FIXME: this doesn't seem right:
return False
self._is_bundle = (os.path.exists(os.path.join(base, 'pip-manifest.txt'))
or os.path.exists(os.path.join(base, 'pyinstall-manifest.txt')))
return self._is_bundle
def bundle_requirements(self):
for dest_dir in self._bundle_editable_dirs:
package = os.path.basename(dest_dir)
## FIXME: svnism:
for vcs_backend in vcs.backends:
url = rev = None
vcs_bundle_file = os.path.join(
dest_dir, vcs_backend.bundle_file)
if os.path.exists(vcs_bundle_file):
vc_type = vcs_backend.name
fp = open(vcs_bundle_file)
content = fp.read()
fp.close()
url, rev = vcs_backend().parse_vcs_bundle_file(content)
break
if url:
url = '%s+%s@%s' % (vc_type, url, rev)
else:
url = None
yield InstallRequirement(
package, self, editable=True, url=url,
update=False, source_dir=dest_dir, from_bundle=True)
for dest_dir in self._bundle_build_dirs:
package = os.path.basename(dest_dir)
yield InstallRequirement(package, self,source_dir=dest_dir, from_bundle=True)
def move_bundle_files(self, dest_build_dir, dest_src_dir):
base = self._temp_build_dir
assert base
src_dir = os.path.join(base, 'src')
build_dir = os.path.join(base, 'build')
bundle_build_dirs = []
bundle_editable_dirs = []
for source_dir, dest_dir, dir_collection in [
(src_dir, dest_src_dir, bundle_editable_dirs),
(build_dir, dest_build_dir, bundle_build_dirs)]:
if os.path.exists(source_dir):
for dirname in os.listdir(source_dir):
dest = os.path.join(dest_dir, dirname)
dir_collection.append(dest)
if os.path.exists(dest):
logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s'
% (dest, dirname, self))
continue
if not os.path.exists(dest_dir):
logger.info('Creating directory %s' % dest_dir)
os.makedirs(dest_dir)
shutil.move(os.path.join(source_dir, dirname), dest)
if not os.listdir(source_dir):
os.rmdir(source_dir)
self._temp_build_dir = None
self._bundle_build_dirs = bundle_build_dirs
self._bundle_editable_dirs = bundle_editable_dirs
def move_wheel_files(self, wheeldir):
move_wheel_files(self.name, self.req, wheeldir, user=self.use_user_site, home=self.target_dir)
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, download_cache=None,
upgrade=False, ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False, use_user_site=False,
session=None):
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
self.download_cache = download_cache
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir #set from --target option
self.session = session or PipSession()
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
name = install_req.name
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
if not name:
#url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
## FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def has_editables(self):
if any(req.editable for req in self.requirements.values()):
return True
if any(req.editable for req in self.unnamed_requirements):
return True
return False
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.fatal('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
## FIXME: duplicates code from prepare_files; relevant code should
## probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
#don't uninstall conflict if user install and and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(self.src_dir)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(self.build_dir, not self.is_download)
if req_to_install.source_dir is not None and not os.path.isdir(req_to_install.source_dir):
raise InstallationError('Could not install requirement %s '
'because source folder %s does not exist '
'(perhaps --no-download was used without first running '
'an equivalent install with --no-install?)'
% (req_to_install, req_to_install.source_dir))
def prepare_files(self, finder, force_root_egg_info=False, bundle=False):
"""Prepare process. Create temp directories, download and/or unpack files."""
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound:
not_found = sys.exc_info()[1]
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
#don't uninstall conflict if user install and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.notify('Requirement already up-to-date: %s'
% req_to_install)
else:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
logger.notify('Obtaining %s' % req_to_install)
elif install:
if req_to_install.url and req_to_install.url.lower().startswith('file:'):
logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url)))
else:
logger.notify('Downloading/unpacking %s' % req_to_install)
logger.indent += 2
try:
is_bundle = False
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
##@@ if filesystem packages are not marked
##editable in a req, a non deterministic error
##occurs when the script attempts to unpack the
##build directory
# NB: This call can result in the creation of a temporary build directory
location = req_to_install.build_location(self.build_dir, not self.is_download)
unpack = True
url = None
# In the case where the req comes from a bundle, we should
# assume a build dir exists and move on
if req_to_install.from_bundle:
pass
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
elif os.path.exists(os.path.join(location, 'setup.py')):
raise PreviousBuildDirError(textwrap.dedent("""
pip can't proceed with requirement '%s' due to a pre-existing build directory.
location: %s
This is likely due to a previous installation that failed.
pip is being responsible and not assuming it can delete this.
Please delete it and try again.
""" % (req_to_install, location)))
else:
## FIXME: this won't upgrade when there's an existing package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(req_to_install, upgrade=self.upgrade)
else:
## FIXME: should req_to_install.url already be a link?
url = Link(req_to_install.url)
assert url
if url:
try:
self.unpack_url(url, location, self.is_download)
except HTTPError:
e = sys.exc_info()[1]
logger.fatal('Could not install requirement %s because of error %s'
% (req_to_install, e))
raise InstallationError(
'Could not install requirement %s because of HTTP error %s for URL %s'
% (req_to_install, e, url))
else:
unpack = False
if unpack:
is_bundle = req_to_install.is_bundle
is_wheel = url and url.filename.endswith('.whl')
if is_bundle:
req_to_install.move_bundle_files(self.build_dir, self.src_dir)
for subreq in req_to_install.bundle_requirements():
reqs.append(subreq)
self.add_requirement(subreq)
elif self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME: see https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
dist = list(pkg_resources.find_distributions(location))[0]
if not req_to_install.req:
req_to_install.req = dist.as_requirement()
self.add_requirement(req_to_install)
if not self.ignore_dependencies:
for subreq in dist.requires(req_to_install.extras):
if self.has_requirement(subreq.project_name):
continue
subreq = InstallRequirement(str(subreq),
req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
if force_root_egg_info:
# We need to run this to make sure that the .egg-info/
# directory is created for packing in the bundle
req_to_install.run_egg_info(force_root_egg_info=True)
req_to_install.assert_source_matches_version()
#@@ sketchy way of identifying packages not grabbed from an index
if bundle and req_to_install.url:
self.copy_to_build_dir(req_to_install)
install = False
# req_to_install.req is only avail after unpack for URL pkgs
# repeat check_if_exists to uninstall-on-upgrade (#14)
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
#don't uninstall conflict if user install and and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if not (is_bundle or is_wheel):
## FIXME: shouldn't be globally added:
finder.add_dependency_links(req_to_install.dependency_links)
if (req_to_install.extras):
logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras))
if not self.ignore_dependencies:
for req in req_to_install.requirements(req_to_install.extras):
try:
name = pkg_resources.Requirement.parse(req).project_name
except ValueError:
e = sys.exc_info()[1]
## FIXME: proper warning
logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install))
continue
if self.has_requirement(name):
## FIXME: check for conflict
continue
subreq = InstallRequirement(req, req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
#'unnamed' requirements will get added here
self.add_requirement(req_to_install)
if self.is_download or req_to_install._temp_build_dir is not None:
self.reqs_to_cleanup.append(req_to_install)
else:
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')):
self.copy_to_build_dir(req_to_install)
finally:
logger.indent -= 2
def cleanup_files(self, bundle=False):
"""Clean up files, remove builds."""
logger.notify('Cleaning up...')
logger.indent += 2
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
remove_dir = []
if self._pip_has_created_build_dir():
remove_dir.append(self.build_dir)
# The source dir of a bundle can always be removed.
# FIXME: not if it pre-existed the bundle!
if bundle:
remove_dir.append(self.src_dir)
for dir in remove_dir:
if os.path.exists(dir):
logger.info('Removing temporary dir %s...' % dir)
rmtree(dir)
logger.indent -= 2
def _pip_has_created_build_dir(self):
return (self.build_dir == build_prefix and
os.path.exists(os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)))
def copy_to_build_dir(self, req_to_install):
target_dir = req_to_install.editable and self.src_dir or self.build_dir
logger.info("Copying %s to %s" % (req_to_install.name, target_dir))
dest = os.path.join(target_dir, req_to_install.name)
shutil.copytree(req_to_install.source_dir, dest)
call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest,
command_desc='python setup.py clean')
def unpack_url(self, link, location, only_download=False):
if only_download:
loc = self.download_dir
else:
loc = location
if is_vcs_url(link):
return unpack_vcs_link(link, loc, only_download)
# a local file:// index could have links with hashes
elif not link.hash and is_file_url(link):
return unpack_file_url(link, loc)
else:
if self.download_cache:
self.download_cache = os.path.expanduser(self.download_cache)
retval = unpack_http_url(link, location, self.download_cache, self.download_dir, self.session)
if only_download:
write_delete_marker_file(location)
return retval
def install(self, install_options, global_options=(), *args, **kwargs):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if req.name == 'distribute' and req.installed_version in distribute_req:
to_install.remove(req)
to_install.append(req)
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's pkg_resources.
# It's ok *not* to check if setuptools>=0.7 because if someone
# were actually trying to ugrade from distribute to setuptools
# 0.6.X, then all this could do is actually help, although that
# upgade path was certainly never "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options, *args, **kwargs)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install
def create_bundle(self, bundle_filename):
## FIXME: can't decide which is better; zip is easier to read
## random files from, but tar.bz2 is smaller and not as lame a
## format.
## FIXME: this file should really include a manifest of the
## packages, maybe some other metadata files. It would make
## it easier to detect as well.
zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED)
vcs_dirs = []
for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'):
dir = os.path.normcase(os.path.abspath(dir))
for dirpath, dirnames, filenames in os.walk(dir):
for backend in vcs.backends:
vcs_backend = backend()
vcs_url = vcs_rev = None
if vcs_backend.dirname in dirnames:
for vcs_dir in vcs_dirs:
if dirpath.startswith(vcs_dir):
# vcs bundle file already in parent directory
break
else:
vcs_url, vcs_rev = vcs_backend.get_info(
os.path.join(dir, dirpath))
vcs_dirs.append(dirpath)
vcs_bundle_file = vcs_backend.bundle_file
vcs_guide = vcs_backend.guide % {'url': vcs_url,
'rev': vcs_rev}
dirnames.remove(vcs_backend.dirname)
break
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zip.writestr(basename + '/' + name + '/', '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, basename + '/' + name)
if vcs_url:
name = os.path.join(dirpath, vcs_bundle_file)
name = self._clean_zip_name(name, dir)
zip.writestr(basename + '/' + name, vcs_guide)
zip.writestr('pip-manifest.txt', self.bundle_requirements())
zip.close()
BUNDLE_HEADER = '''\
# This is a pip bundle file, that contains many source packages
# that can be installed as a group. You can install this like:
# pip this_file.zip
# The rest of the file contains a list of all the packages included:
'''
def bundle_requirements(self):
parts = [self.BUNDLE_HEADER]
for req in [req for req in self.requirements.values()
if not req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
parts.append('# These packages were installed to satisfy the above requirements:\n')
for req in [req for req in self.requirements.values()
if req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
## FIXME: should we do something with self.unnamed_requirements?
return ''.join(parts)
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
_scheme_re = re.compile(r'^(http|https|file):', re.I)
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None):
if session is None:
session = PipSession()
skip_match = None
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
skip_match = re.compile(skip_regex)
reqs_file_dir = os.path.dirname(os.path.abspath(filename))
filename, content = get_file_content(filename,
comes_from=comes_from,
session=session,
)
for line_number, line in enumerate(content.splitlines()):
line_number += 1
line = line.strip()
if not line or line.startswith('#'):
continue
if skip_match and skip_match.search(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
if line.startswith('-r'):
req_url = line[2:].strip()
else:
req_url = line[len('--requirement'):].strip().strip('=')
if _scheme_re.search(filename):
# Relative to a URL
req_url = urlparse.urljoin(filename, req_url)
elif not _scheme_re.search(req_url):
req_url = os.path.join(os.path.dirname(filename), req_url)
for item in parse_requirements(req_url, finder, comes_from=filename, options=options, session=session):
yield item
elif line.startswith('-Z') or line.startswith('--always-unzip'):
# No longer used, but previously these were used in
# requirement files, so we'll ignore.
pass
elif line.startswith('-f') or line.startswith('--find-links'):
if line.startswith('-f'):
line = line[2:].strip()
else:
line = line[len('--find-links'):].strip().lstrip('=')
## FIXME: it would be nice to keep track of the source of
## the find_links:
# support a find-links local path relative to a requirements file
relative_to_reqs_file = os.path.join(reqs_file_dir, line)
if os.path.exists(relative_to_reqs_file):
line = relative_to_reqs_file
if finder:
finder.find_links.append(line)
elif line.startswith('-i') or line.startswith('--index-url'):
if line.startswith('-i'):
line = line[2:].strip()
else:
line = line[len('--index-url'):].strip().lstrip('=')
if finder:
finder.index_urls = [line]
elif line.startswith('--extra-index-url'):
line = line[len('--extra-index-url'):].strip().lstrip('=')
if finder:
finder.index_urls.append(line)
elif line.startswith('--use-wheel'):
finder.use_wheel = True
elif line.startswith('--no-index'):
finder.index_urls = []
elif line.startswith("--allow-external"):
line = line[len("--allow-external"):].strip().lstrip("=")
finder.allow_external |= set([normalize_name(line).lower()])
elif line.startswith("--allow-all-external"):
finder.allow_all_external = True
# Remove in 1.7
elif line.startswith("--no-allow-external"):
pass
# Remove in 1.7
elif line.startswith("--no-allow-insecure"):
pass
# Remove after 1.7
elif line.startswith("--allow-insecure"):
line = line[len("--allow-insecure"):].strip().lstrip("=")
finder.allow_unverified |= set([normalize_name(line).lower()])
elif line.startswith("--allow-unverified"):
line = line[len("--allow-unverified"):].strip().lstrip("=")
finder.allow_unverified |= set([normalize_name(line).lower()])
else:
comes_from = '-r %s (line %s)' % (filename, line_number)
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
req = InstallRequirement.from_editable(
line, comes_from=comes_from, default_vcs=options.default_vcs if options else None)
else:
req = InstallRequirement.from_line(line, comes_from, prereleases=getattr(options, "pre", None))
yield req
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
## FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % url_no_extras)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return None, url_no_extras, pkg_resources.Requirement.parse('__placeholder__' + extras).extras
else:
return None, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url beginning with svn+, git+, hg+, or bzr+' % editable_req)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception:
message = sys.exc_info()[1]
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, message))
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError('--editable=%s is not the right format; it must have #egg=Package' % editable_req)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, options
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def _can_uninstall(self):
if not dist_is_local(self.dist):
logger.notify("Not uninstalling %s at %s, outside environment %s"
% (self.dist.project_name, normalize_path(self.dist.location), sys.prefix))
return False
return True
def add(self, path):
path = normalize_path(path)
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created, due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(imp.cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self._can_uninstall():
return
if not self.paths:
logger.notify("Can't uninstall '%s'. No files were found to uninstall." % self.dist.project_name)
return
logger.notify('Uninstalling %s:' % self.dist.project_name)
logger.indent += 2
paths = sorted(self.compact(self.paths))
try:
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.notify(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.notify('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.notify(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.info('Removing file or directory %s' % path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.notify('Successfully uninstalled %s' % self.dist.project_name)
finally:
logger.indent -= 2
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error("Can't roll back %s; was not uninstalled" % self.dist.project_name)
return False
logger.notify('Rolling back uninstall of %s' % self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.info('Replacing %s' % path)
renames(tmp_path, path)
for pth in self.pth:
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError("Cannot remove entries from nonexistent file %s" % pth_file)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if sys.platform == 'win32' and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.info('Removing pth entries from %s:' % self.file)
fh = open(self.file, 'rb')
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
fh.close()
if any(b('\r\n') in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.info('Removing entry: %s' % entry)
lines.remove(b(entry + endline))
except ValueError:
pass
fh = open(self.file, 'wb')
fh.writelines(lines)
fh.close()
def rollback(self):
if self._saved_lines is None:
logger.error('Cannot roll back changes to %s, none were made' % self.file)
return False
logger.info('Rolling %s back to previous state' % self.file)
fh = open(self.file, 'wb')
fh.writelines(self._saved_lines)
fh.close()
return True
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
| {
"content_hash": "68d3c3340d5acea46996ecf4ea593d07",
"timestamp": "",
"source": "github",
"line_count": 1809,
"max_line_length": 156,
"avg_line_length": 43.90491984521835,
"alnum_prop": 0.5360596293311846,
"repo_name": "qwcode/pip",
"id": "b1c7c43d37438094378365cfa4a111cb5029f1f9",
"size": "79424",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pip/req.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2717"
},
{
"name": "Python",
"bytes": "1145976"
},
{
"name": "Shell",
"bytes": "4534"
}
],
"symlink_target": ""
} |
"""Test Chipsec client actions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import sys
from absl import app
from chipsec.helper import oshelper
import mock
from grr_response_client import vfs
# If grr_response_client.components.chipsec_support.actions.grr_chipsec is
# imported here, it will import
# grr_response_client.components.chipsec_support.actions and fail (because of
# the circular import dependency).
#
# This is a terrible hack employed in grr_chipsec implementation since GRR
# components days. The idea is that we *must* import
# grr_response_client.components.chipsec_support.actions before
# grr_response_client.components.chipsec_support.actions.grr_chipsec to
# explicitly resolve the circular dependency.
from grr_response_client.components.chipsec_support import actions # pylint: disable=unused-import
from grr_response_core.lib.rdfvalues import chipsec_types as rdf_chipsec_types
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class MockUnknownChipsetError(RuntimeError):
pass
class MockSPI(mock.MagicMock):
def get_SPI_region(self, unused_region): # pylint: disable=invalid-name
return (0, 0xffff, 0)
def read_spi(self, unused_offset, size):
return [0xff] * size
class UnsupportedChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unsupported Platform: VID = 0x0000, DID = 0x0000"
raise MockUnknownChipsetError(msg)
class FailingOsHelperChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unable to open /sys/bus/pci/devices/0000:00:00.0/config"
raise oshelper.OsHelperError(msg, -1)
class GRRChipsecTest(client_test_lib.EmptyActionTest):
"""Generic test class for GRR-Chipsec actions."""
def setUp(self):
super(GRRChipsecTest, self).setUp()
# Mock the interface for Chipsec
self.chipsec_mock = mock.MagicMock()
self.chipsec_mock.chipset = mock.MagicMock()
self.chipsec_mock.chipset.UnknownChipsetError = MockUnknownChipsetError
self.chipsec_mock.hal = mock.MagicMock()
self.chipsec_mock.logger = mock.MagicMock()
mock_modules = {
"chipsec": self.chipsec_mock,
"chipsec.hal": self.chipsec_mock.hal,
}
chipsec_patch = mock.patch.dict(sys.modules, mock_modules)
chipsec_patch.start()
self.addCleanup(chipsec_patch.stop)
# Import the ClientAction to test with the Chipsec mock in place.
# pylint: disable=g-import-not-at-top, unused-variable
from grr_response_client.components.chipsec_support.actions import grr_chipsec
# pylint: enable=g-import-not-at-top, unused-variable
# Keep a reference to the module so child classes may mock its content.
self.grr_chipsec_module = grr_chipsec
self.grr_chipsec_module.chipset = self.chipsec_mock.chipset
self.grr_chipsec_module.logger = self.chipsec_mock.logger
class TestChipsecDumpFlashImage(vfs_test_lib.VfsTestCase, GRRChipsecTest):
"""Test the client dump flash image action."""
def setUp(self):
super(TestChipsecDumpFlashImage, self).setUp()
self.chipsec_mock.hal.spi = mock.MagicMock()
self.chipsec_mock.hal.spi.SPI = MockSPI
self.grr_chipsec_module.spi = self.chipsec_mock.hal.spi
def testDumpFlashImage(self):
"""Test the basic dump."""
args = rdf_chipsec_types.DumpFlashImageRequest()
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), b"\xff" * 0x10000)
def testDumpFlashImageVerbose(self):
"""Test the basic dump with the verbose mode enabled."""
args = rdf_chipsec_types.DumpFlashImageRequest(log_level=1)
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), b"\xff" * 0x10000)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpFlashImageUnknownChipset(self):
"""By default, if the chipset is unknown, no exception is raised."""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = rdf_chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
def testDumpFlashImageUnknownChipsetVerbose(self):
"""Test unknown chipset with verbose mode.
If the chipset is unknown but verbose enabled, no exception is raised
and at least one response should be returned with non-empty logs.
"""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = rdf_chipsec_types.DumpFlashImageRequest(log_level=1)
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
self.assertNotEmpty(self.results)
self.assertNotEmpty(self.results[0].logs)
self.assertEqual(self.results[0].path.path, "")
def testDumpFlashImageOsHelperErrorChipset(self):
"""If an exception is raised by the helper layer, handle it."""
self.chipsec_mock.chipset.cs = FailingOsHelperChipset
args = rdf_chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
class MockACPI(object):
def __init__(self, unused_chipset):
self.tableList = { # pylint: disable=invalid-name
"DSDT": [(0xAABBCCDDEEFF0011)],
"FACP": [(0x1100FFEEDDCCBBAA)],
"XSDT": [(0x1122334455667788)],
"SSDT": [(0x1234567890ABCDEF), (0x2234567890ABCDEF),
(0x3234567890ABCDEF)]
}
# Mimic the behaviour of tableList in Chipsec
# pylint: disable=invalid-name
self.tableList = collections.defaultdict(list, self.tableList)
# pylint: enable=invalid-name
# key: header, content
self.table_content = {
0xAABBCCDDEEFF0011: (b"\xFF" * 0xFF, b"\xEE" * 0xFF),
0x1100FFEEDDCCBBAA: (b"\xEE" * 0xFF, b"\xFF" * 0xFF),
0x1122334455667788: (b"\xAB" * 0xFF, b"\xCD" * 0xFF),
0x1234567890ABCDEF: (b"\xEF" * 0xFF, b"\xFE" * 0xFF),
0x2234567890ABCDEF: (b"\xDC" * 0xFF, b"\xBA" * 0xFF),
0x3234567890ABCDEF: (b"\xAA" * 0xFF, b"\xBB" * 0xFF)
}
def get_ACPI_table(self, name): # pylint: disable=invalid-name
return [self.table_content[address] for address in self.tableList[name]]
class MockACPIReadingRestrictedArea(object):
def __init__(self, unused_chipset):
# Simulate /dev/mem error
raise OSError("Operation not permitted")
def get_ACPI_table(self, unused_name): # pylint: disable=invalid-name
return []
class TestDumpACPITable(GRRChipsecTest):
def setUp(self):
super(TestDumpACPITable, self).setUp()
self.chipsec_mock.hal.acpi = mock.MagicMock()
self.chipsec_mock.hal.acpi.ACPI = MockACPI
self.grr_chipsec_module.acpi = self.chipsec_mock.hal.acpi
def testDumpValidSingleACPITable(self):
"""Tests basic valid ACPI table dump."""
args = rdf_chipsec_types.DumpACPITableRequest(table_signature="DSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertLen(result.acpi_tables, 1)
self.assertEqual(result.acpi_tables[0].table_address, 0xAABBCCDDEEFF0011)
self.assertEqual(result.acpi_tables[0].table_blob,
b"\xFF" * 0xFF + b"\xEE" * 0xFF)
def testDumpValidMultipleACPITables(self):
"""Tests valid ACPI table dump that would yield several tables."""
args = rdf_chipsec_types.DumpACPITableRequest(table_signature="SSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertLen(result.acpi_tables, 3)
self.assertEqual(result.acpi_tables[0].table_address, 0x1234567890ABCDEF)
self.assertEqual(result.acpi_tables[0].table_blob,
b"\xEF" * 0xFF + b"\xFE" * 0xFF)
self.assertEqual(result.acpi_tables[1].table_address, 0x2234567890ABCDEF)
self.assertEqual(result.acpi_tables[1].table_blob,
b"\xDC" * 0xFF + b"\xBA" * 0xFF)
self.assertEqual(result.acpi_tables[2].table_address, 0x3234567890ABCDEF)
self.assertEqual(result.acpi_tables[2].table_blob,
b"\xAA" * 0xFF + b"\xBB" * 0xFF)
def testDumpValidSingleACPITableVerbose(self):
"""Tests valid ACPI table dump with verbose mode enabled."""
args = rdf_chipsec_types.DumpACPITableRequest(
table_signature="XSDT", logging=True)
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(result.acpi_tables[0].table_address, 0x1122334455667788)
self.assertEqual(result.acpi_tables[0].table_blob,
b"\xAB" * 0xFF + b"\xCD" * 0xFF)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpInvalidACPITable(self):
"""Tests dumping invalid ACPI table."""
args = rdf_chipsec_types.DumpACPITableRequest(
table_signature="INVALID_TABLE")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertNotEqual(len(result.logs), 0)
def testDumpACPITableUnknownChipset(self):
"""By default, if the chipset is unknown, no exception is raised."""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = rdf_chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
def testDumpACPITableUnknownChipsetVerbose(self):
"""Tests unknown chipset with verbose mode.
If the chipset is unknown but verbose enabled, no exception is raised
and at least one response should be returned with non-empty logs.
"""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = rdf_chipsec_types.DumpACPITableRequest(
table_signature="FACP", logging=True)
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
self.assertNotEmpty(self.results)
self.assertNotEmpty(self.results[0].logs)
def testDumpACPITableTriggeringDevMemError(self):
"""Tests the condition where OSError is triggered due to using /dev/mem.
No exception should be raised, and the log describing the error should be
returned.
"""
self.chipsec_mock.acpi.ACPI = MockACPIReadingRestrictedArea
args = rdf_chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertNotEmpty(self.results)
self.assertNotEmpty(self.results[0].logs)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "058ae018f7a0f0c11eca2d0ac61829ee",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 99,
"avg_line_length": 39.57992565055762,
"alnum_prop": 0.7205785667324129,
"repo_name": "dunkhong/grr",
"id": "bdf924f103a2aca3a297a4daca6f16114c05b3c0",
"size": "10669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/client/grr_response_client/components/chipsec_support/grr_chipsec_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import csv
import sys
import logging
logger = logging.getLogger(__name__)
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
_has_sklearn = True
except (AttributeError, ImportError) as e:
logger.warning("To use data.metrics please install scikit-learn. See https://scikit-learn.org/stable/index.html")
_has_sklearn = False
def is_sklearn_available():
return _has_sklearn
if _has_sklearn:
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xnli_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
| {
"content_hash": "f6ff40b468ac888740d0cfe07262a547",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 117,
"avg_line_length": 32.026666666666664,
"alnum_prop": 0.5749375520399667,
"repo_name": "HLTCHKUST/Xpersona",
"id": "5a46eb05d3badcae4848ae00445990a3b88ee761",
"size": "3112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multilingual/transformers/data/metrics/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "5230"
},
{
"name": "Python",
"bytes": "2391176"
},
{
"name": "Shell",
"bytes": "27554"
}
],
"symlink_target": ""
} |
import gurobipy as gu
from ticdat import TicDatFactory, standard_main
# ------------------------ define the input schema --------------------------------
input_schema = TicDatFactory (
commodities = [["Name"],[]],
nodes = [["Name"],[]],
arcs = [["Source", "Destination"],["Capacity"]],
cost = [["Commodity", "Source", "Destination"], ["Cost"]],
inflow = [["Commodity", "Node"],["Quantity"]]
)
# Define the foreign key relationships
input_schema.add_foreign_key("arcs", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("arcs", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key("cost", "nodes", ['Source', 'Name'])
input_schema.add_foreign_key("cost", "nodes", ['Destination', 'Name'])
input_schema.add_foreign_key("cost", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("inflow", "commodities", ['Commodity', 'Name'])
input_schema.add_foreign_key("inflow", "nodes", ['Node', 'Name'])
# Define the data types
input_schema.set_data_type("arcs", "Capacity", min=0, max=float("inf"),
inclusive_min=True, inclusive_max=True)
input_schema.set_data_type("cost", "Cost", min=0, max=float("inf"),
inclusive_min=True, inclusive_max=False)
input_schema.set_data_type("inflow", "Quantity", min=-float("inf"), max=float("inf"),
inclusive_min=False, inclusive_max=False)
# The default-default of zero makes sense everywhere except for Capacity
input_schema.set_default_value("arcs", "Capacity", float("inf"))
# ---------------------------------------------------------------------------------
# ------------------------ define the output schema -------------------------------
solution_schema = TicDatFactory(
flow = [["Commodity", "Source", "Destination"], ["Quantity"]],
parameters = [["Parameter"],["Value"]])
# ---------------------------------------------------------------------------------
# ------------------------ solving section-----------------------------------------
def solve(dat):
"""
core solving routine
:param dat: a good ticdat for the dataFactory
:return: a good ticdat for the solutionFactory, or None
"""
assert input_schema.good_tic_dat_object(dat)
assert not input_schema.find_foreign_key_failures(dat)
assert not input_schema.find_data_type_failures(dat)
# we're using TicDatFactory instead of PanDatFactory because the former will create data based
# indexes on the DataFrame's when calling copy_to_pandas
dat = input_schema.copy_to_pandas(dat, drop_pk_columns=False)
# Create optimization model
m = gu.Model('netflow')
flow = dat.cost.join(dat.arcs, on = ["Source", "Destination"],
how = "inner", rsuffix="_arcs")\
.apply(lambda r : m.addVar(ub=r.Capacity, obj=r.Cost,
name='flow_%s_%s_%s'%
(r.Commodity, r.Source, r.Destination)),
axis=1, reduce=True)
flow.name = "flow"
# combining aggregate with gurobipy.quicksum is more efficient than using sum
flow.groupby(level=["Source", "Destination"])\
.aggregate({"flow": gu.quicksum})\
.join(dat.arcs)\
.apply(lambda r : m.addConstr(r.flow <= r.Capacity,
'cap_%s_%s' %(r.Source, r.Destination)),
axis =1)
def flow_subtotal(node_fld, sum_field_name):
rtn = flow.groupby(level=['Commodity',node_fld])\
.aggregate({sum_field_name : gu.quicksum})
rtn.index.names = ['Commodity', 'Node']
return rtn
# We need a proxy for zero because of the toehold problem, and
# we use quicksum([]) instead of a dummy variable because of the fillna problem.
# (see notebooks in this directory and parent directory)
zero_proxy = gu.quicksum([])
flow_subtotal("Destination", "flow_in")\
.join(dat.inflow[abs(dat.inflow.Quantity) > 0].Quantity, how="outer")\
.join(flow_subtotal("Source", "flow_out"), how = "outer")\
.fillna(zero_proxy)\
.apply(lambda r : m.addConstr(r.flow_in + r.Quantity - r.flow_out == 0,
'cons_flow_%s_%s' % r.name),
axis =1)
# Compute optimal solution
m.optimize()
if m.status == gu.GRB.status.OPTIMAL:
t = flow.apply(lambda r : r.x)
# TicDat is smart enough to handle a Series for a single data field table
rtn = solution_schema.TicDat(flow = t[t > 0])
rtn.parameters["Total Cost"] = dat.cost.join(t).apply(lambda r: r.Cost * r.flow,
axis=1).sum()
return rtn
# ---------------------------------------------------------------------------------
# ------------------------ provide stand-alone functionality ----------------------
# when run from the command line, will read/write xls/csv/db/mdb files
if __name__ == "__main__":
standard_main(input_schema, solution_schema, solve)
# ---------------------------------------------------------------------------------
| {
"content_hash": "b33e6108e146d4e55b0832ccd00ab872",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 98,
"avg_line_length": 47.648148148148145,
"alnum_prop": 0.5369218810726778,
"repo_name": "opalytics/opalytics-ticdat",
"id": "ce693eb7c279ca763253623eb8f75b811658e668",
"size": "6033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/expert_section/pandas/full_pandas/netflow.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "2877"
},
{
"name": "Python",
"bytes": "660699"
}
],
"symlink_target": ""
} |
"""Poller Apps."""
| {
"content_hash": "1ae08976429f2feffce1a3434f81e9ab",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.5263157894736842,
"repo_name": "LokiNetworks/empower-runtime",
"id": "95b96ff6745aa84f7f22918ed86fe69565af430c",
"size": "628",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "empower/apps/pollers/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "101256"
},
{
"name": "HTML",
"bytes": "61335"
},
{
"name": "JavaScript",
"bytes": "5861837"
},
{
"name": "Python",
"bytes": "637231"
}
],
"symlink_target": ""
} |
"""Converts Haiku functions to dot."""
import collections
import contextlib
import functools
import html
from typing import Any, Callable, NamedTuple, List, Optional
from haiku._src import config
from haiku._src import data_structures
from haiku._src import module
from haiku._src import utils
import jax
# Import tree if available, but only throw error at runtime.
# Permits us to drop dm-tree from deps.
try:
import tree # pylint: disable=g-import-not-at-top
except ImportError as e:
tree = None
graph_stack = data_structures.ThreadLocalStack()
Node = collections.namedtuple('Node', 'id,title,outputs')
Edge = collections.namedtuple('Edge', 'a,b')
class Graph(NamedTuple):
"""Represents a graphviz digraph/subgraph.."""
title: str
nodes: List[Node]
edges: List[Edge]
subgraphs: List['Graph']
@classmethod
def create(cls, title: Optional[str] = None):
return Graph(title=title, nodes=[], edges=[], subgraphs=[])
def evolve(self, **kwargs) -> 'Graph':
return Graph(**{**self._asdict(), **kwargs})
def to_dot(fun: Callable[..., Any]) -> Callable[..., str]:
"""Converts a function using Haiku modules to a dot graph.
To view the resulting graph in Google Colab or an iPython notebook use the
``graphviz`` package:
.. code-block::
dot = hk.experimental.to_dot(f)(x)
import graphviz
graphviz.Source(dot)
Args:
fun: A function using Haiku modules.
Returns:
A function that returns the source code string to a graphviz graph
describing the operations executed by the given function clustered by Haiku
module.
See Also:
:func:`abstract_to_dot`: Generates a graphviz graph using abstract inputs.
"""
graph_fun = to_graph(fun)
@functools.wraps(fun)
def wrapped_fun(*args) -> str:
# Disable namescopes so they don't show up in the generated dot.
with config.context(profiler_name_scopes=False):
return _graph_to_dot(*graph_fun(*args))
return wrapped_fun
def abstract_to_dot(fun: Callable[..., Any]) -> Callable[..., str]:
"""Converts a function using Haiku modules to a dot graph.
Same as :func:`to_dot` but uses JAX's abstract interpretation
machinery to evaluate the function without requiring concrete inputs.
Valid inputs for the wrapped function include
:class:`jax.ShapeDtypeStruct`.
:func:`abstract_to_dot` does not support data-dependent
control-flow, because no concrete values are provided to the function.
Args:
fun: A function using Haiku modules.
Returns:
A function that returns the source code string to a graphviz graph
describing the operations executed by the given function clustered by Haiku
module.
See Also:
:func:`to_dot`: Generates a graphviz graph using concrete inputs.
"""
@functools.wraps(fun)
def wrapped_fun(*args) -> str:
dot_out = ''
# eval_shape cannot evaluate functions which return str, as str is not a
# valid JAX types.
# The following function extracts the created dot string during the
# abstract evaluation.
def dot_extractor_fn(*inner_args):
nonlocal dot_out
dot_out = to_dot(fun)(*inner_args)
jax.eval_shape(dot_extractor_fn, *args)
assert dot_out, 'Failed to extract dot graph from abstract evaluation'
return dot_out
return wrapped_fun
def name_or_str(o):
return getattr(o, '__name__', str(o))
def to_graph(fun):
"""Converts a Haiku function into an graph IR (extracted for testing)."""
@functools.wraps(fun)
def wrapped_fun(*args):
"""See `fun`."""
f = jax.linear_util.wrap_init(fun)
args_flat, in_tree = jax.tree_util.tree_flatten((args, {}))
flat_fun, out_tree = jax.api_util.flatten_fun(f, in_tree)
graph = Graph.create(title=name_or_str(fun))
@contextlib.contextmanager
def method_hook(mod: module.Module, method_name: str):
subg = Graph.create()
with graph_stack(subg):
yield
title = mod.module_name
if method_name != '__call__':
title += f' ({method_name})'
graph_stack.peek().subgraphs.append(subg.evolve(title=title))
with graph_stack(graph), \
module.hook_methods(method_hook), \
jax.core.new_main(DotTrace) as main:
out_flat = _interpret_subtrace(flat_fun, main).call_wrapped(*args_flat)
out = jax.tree_util.tree_unflatten(out_tree(), out_flat)
return graph, args, out
return wrapped_fun
@jax.linear_util.transformation
def _interpret_subtrace(main, *in_vals):
trace = DotTrace(main, jax.core.cur_sublevel())
in_tracers = [DotTracer(trace, val) for val in in_vals]
outs = yield in_tracers, {}
out_tracers = map(trace.full_raise, outs)
out_vals = [t.val for t in out_tracers]
yield out_vals
class DotTracer(jax.core.Tracer):
"""JAX tracer used in DotTrace."""
def __init__(self, trace, val):
super().__init__(trace)
self.val = val
@property
def aval(self):
return jax.core.get_aval(self.val)
def full_lower(self):
return self
class DotTrace(jax.core.Trace):
"""Traces a JAX function to dot."""
def pure(self, val):
return DotTracer(self, val)
def lift(self, val):
return DotTracer(self, val)
def sublift(self, val):
return DotTracer(self, val.val)
def process_primitive(self, primitive, tracers, params):
val_out = primitive.bind(*[t.val for t in tracers], **params)
inputs = [t.val for t in tracers]
outputs = list(jax.tree_util.tree_leaves(val_out))
graph = graph_stack.peek()
node = Node(id=outputs[0], title=str(primitive), outputs=outputs)
graph.nodes.append(node)
graph.edges.extend([(i, outputs[0]) for i in inputs])
return jax.tree_util.tree_map(lambda v: DotTracer(self, v), val_out)
def process_call(self, call_primitive, f, tracers, params):
assert call_primitive.multiple_results
if (call_primitive is jax.interpreters.xla.xla_call_p and
params.get('inline', False)):
f = _interpret_subtrace(f, self.main)
vals_out = f.call_wrapped(*[t.val for t in tracers])
return [DotTracer(self, v) for v in vals_out]
graph = Graph.create(title=f'{call_primitive} ({name_or_str(f.f)})')
graph_stack.peek().subgraphs.append(graph)
with graph_stack(graph):
f = _interpret_subtrace(f, self.main)
vals_out = f.call_wrapped(*[t.val for t in tracers])
return [DotTracer(self, v) for v in vals_out]
process_map = process_call
def process_custom_jvp_call(self, primitive, fun, jvp, tracers):
# Drop the custom differentiation rule.
del primitive, jvp # Unused.
return fun.call_wrapped(*tracers)
def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers,
out_trees):
# Drop the custom differentiation rule.
del primitive, fwd, bwd, out_trees # Unused.
return fun.call_wrapped(*tracers)
def _format_val(val):
if not hasattr(val, 'shape'):
return repr(val)
shape = ','.join(map(str, val.shape))
dtype = utils.simple_dtype(val.dtype)
return f'{dtype}[{shape}]'
def escape(value):
return html.escape(str(value))
# Determine maximum nesting depth to appropriately scale subgraph labels.
def _max_depth(g: Graph) -> int:
if g.subgraphs:
return 1 + max(0, *[_max_depth(s) for s in g.subgraphs])
else:
return 1
def _scaled_font_size(depth: int) -> int:
return int(1.4**depth * 14)
def _graph_to_dot(graph: Graph, args, outputs) -> str:
"""Converts from an internal graph IR to 'dot' format."""
if tree is None:
raise ImportError('hk.experimental.to_dot requires dm-tree>=0.1.1.')
def format_path(path):
if isinstance(outputs, tuple):
out = f'output[{path[0]}]'
if len(path) > 1:
out += ': ' + '/'.join(map(str, path[1:]))
else:
out = 'output'
if path:
out += ': ' + '/'.join(map(str, path))
return out
lines = []
used_argids = set()
argid_usecount = collections.Counter()
op_outids = set()
captures = []
argids = {id(v) for v in jax.tree_util.tree_leaves(args)}
outids = {id(v) for v in jax.tree_util.tree_leaves(outputs)}
outname = {id(v): format_path(p) for p, v in tree.flatten_with_path(outputs)}
def render_graph(g: Graph, parent: Optional[Graph] = None, depth: int = 0):
"""Renders a given graph by appending 'dot' format lines."""
if parent:
lines.extend([
f'subgraph cluster_{id(g)} {{',
' style="rounded,filled";',
' fillcolor="#F0F5F5";',
' color="#14234B;";',
' pad=0.1;',
f' fontsize={_scaled_font_size(depth)};',
f' label = <<b>{escape(g.title)}</b>>;',
' labelloc = t;',
])
for node in g.nodes:
label = f'<b>{escape(node.title)}</b>'
for o in node.outputs:
label += '<br/>' + _format_val(o)
op_outids.add(id(o))
node_id = id(node.id)
if node_id in outids:
label = f'<b>{escape(outname[node_id])}</b><br/>' + label
color = '#0053D6'
fillcolor = '#AABFFF'
style = 'filled,bold'
else:
color = '#FFDB13'
fillcolor = '#FFF26E'
style = 'filled'
lines.append(f'{node_id} [label=<{label}>, '
f' id="node{node_id}",'
' shape=rect,'
f' style="{style}",'
' tooltip=" ",'
' fontcolor="black",'
f' color="{color}",'
f' fillcolor="{fillcolor}"];')
for s in g.subgraphs:
render_graph(s, parent=g, depth=depth - 1)
if parent:
lines.append(f'}} // subgraph cluster_{id(g)}')
for a, b in g.edges:
if id(a) not in argids and id(a) not in op_outids:
captures.append(a)
a, b = map(id, (a, b))
if a in argids:
i = argid_usecount[a]
argid_usecount[a] += 1
lines.append(f'{a}{i} -> {b};')
else:
lines.append(f'{a} -> {b};')
used_argids.add(a)
graph_depth = _max_depth(graph)
render_graph(graph, parent=None, depth=graph_depth)
# Process inputs and label them in the graph.
for path, value in tree.flatten_with_path(args):
if value is None:
continue
node_id = id(value)
if node_id not in used_argids:
continue
for i in range(argid_usecount[node_id]):
label = f'<b>args[{escape(path[0])}]'
if len(path) > 1:
label += ': ' + '/'.join(map(str, path[1:]))
label += '</b>'
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
label += f'<br/>{escape(_format_val(value))}'
fillcolor = '#FFDEAF'
fontcolor = 'black'
if i > 0:
label = '<b>(reuse)</b><br/>' + label
fillcolor = '#FFEACC'
fontcolor = '#565858'
lines.append(f'{node_id}{i} [label=<{label}>'
f' id="node{node_id}{i}",'
' shape=rect,'
' style="filled",'
f' fontcolor="{fontcolor}",'
' color="#FF8A4F",'
f' fillcolor="{fillcolor}"];')
for value in captures:
node_id = id(value)
if (not hasattr(value, 'aval') and
hasattr(value, 'size') and
value.size == 1):
label = f'<b>{value.item()}</b>'
else:
label = f'<b>{escape(_format_val(value))}</b>'
lines.append(f'{node_id} [label=<{label}>'
' shape=rect,'
' style="filled",'
' fontcolor="black",'
' color="#A261FF",'
' fillcolor="#E6D6FF"];')
head = [
'digraph G {',
'rankdir = TD;',
'compound = true;',
f'label = <<b>{escape(graph.title)}</b>>;',
f'fontsize={_scaled_font_size(graph_depth)};',
'labelloc = t;',
'stylesheet = <',
' data:text/css,',
' @import url(https://fonts.googleapis.com/css?family=Roboto:400,700);',
' svg text {',
' font-family: \'Roboto\';',
' }',
' .node text {',
' font-size: 12px;',
' }',
]
for node_id, use_count in argid_usecount.items():
if use_count == 1:
continue
# Add hover animation for reused args.
for a in range(use_count):
for b in range(use_count):
if a == b:
head.append(f'%23node{node_id}{a}:hover '
'{ stroke-width: 0.2em; }')
else:
head.append(
f'%23node{node_id}{a}:hover ~ %23node{node_id}{b} '
'{ stroke-width: 0.2em; }')
head.append('>')
lines.append('} // digraph G')
return '\n'.join(head + lines) + '\n'
| {
"content_hash": "ee33b2f3bfb743fa2becb0a4a8387f1d",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 79,
"avg_line_length": 29.826603325415675,
"alnum_prop": 0.6004618937644342,
"repo_name": "deepmind/dm-haiku",
"id": "674b1c2668302fcb6852aa43110ca984742e2973",
"size": "13253",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "haiku/_src/dot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1024855"
},
{
"name": "Shell",
"bytes": "1907"
},
{
"name": "Starlark",
"bytes": "31643"
}
],
"symlink_target": ""
} |
"""
This module contains the `ExpandoTextCtrl` which is a multi-line
text control that will expand its height on the fly to be able to show
all the lines of the content of the control.
"""
import wx
import wx.lib.newevent
# This event class and binder object can be used to catch
# notifications that the ExpandoTextCtrl has resized itself and
# that layout adjustments may need to be made.
wxEVT_ETC_LAYOUT_NEEDED = wx.NewEventType()
EVT_ETC_LAYOUT_NEEDED = wx.PyEventBinder( wxEVT_ETC_LAYOUT_NEEDED, 1 )
#---------------------------------------------------------------------------
class ExpandoTextCtrl(wx.TextCtrl):
"""
The ExpandoTextCtrl is a multi-line wx.TextCtrl that will
adjust its height on the fly as needed to accomodate the number of
lines needed to display the current content of the control. It is
assumed that the width of the control will be a fixed value and
that only the height will be adjusted automatically. If the
control is used in a sizer then the width should be set as part of
the initial or min size of the control.
When the control resizes itself it will attempt to also make
necessary adjustments in the sizer hierarchy it is a member of (if
any) but if that is not suffiecient then the programmer can catch
the EVT_ETC_LAYOUT_NEEDED event in the container and make any
other layout adjustments that may be needed.
"""
_defaultHeight = -1
_leading = 1 # TODO: find a way to calculate this, it may vary by platform
def __init__(self, parent, id=-1, value="",
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, validator=wx.DefaultValidator, name="expando"):
# find the default height of a single line control
self.defaultHeight = self._getDefaultHeight(parent)
# make sure we default to that height if none was given
w, h = size
if h == -1:
h = self.defaultHeight
# always use the multi-line style
style = style | wx.TE_MULTILINE | wx.TE_NO_VSCROLL | wx.TE_RICH2
# init the base class
wx.TextCtrl.__init__(self, parent, id, value, pos, (w, h),
style, validator, name)
# save some basic metrics
self.extraHeight = self.defaultHeight - self.GetCharHeight()
self.numLines = 1
self.maxHeight = -1
if value:
wx.CallAfter(self._adjustCtrl)
self.Bind(wx.EVT_TEXT, self.OnTextChanged)
self.Bind(wx.EVT_SIZE, self.OnSize)
def SetMaxHeight(self, h):
"""
Sets the max height that the control will expand to on its
own, and adjusts it down if needed.
"""
self.maxHeight = h
if h != -1 and self.GetSize().height > h:
self.SetSize((-1, h))
def GetMaxHeight(self):
"""Sets the max height that the control will expand to on its own"""
return self.maxHeight
def SetFont(self, font):
wx.TextCtrl.SetFont(self, font)
self.numLines = -1
self._adjustCtrl()
def WriteText(self, text):
# work around a bug of a lack of a EVT_TEXT when calling
# WriteText on wxMac
wx.TextCtrl.WriteText(self, text)
self._adjustCtrl()
def AppendText(self, text):
# Instead of using wx.TextCtrl.AppendText append and set the
# insertion point ourselves. This works around a bug on wxMSW
# where it scrolls the old text out of view, and since there
# is no scrollbar there is no way to get back to it.
self.SetValue(self.GetValue() + text)
self.SetInsertionPointEnd()
def OnTextChanged(self, evt):
# check if any adjustments are needed on every text update
self._adjustCtrl()
evt.Skip()
def OnSize(self, evt):
# The number of lines needed can change when the ctrl is resized too.
self._adjustCtrl()
evt.Skip()
def _adjustCtrl(self):
# if the current number of lines is different than before
# then recalculate the size needed and readjust
numLines = self.GetNumberOfLines()
if numLines != self.numLines:
self.numLines = numLines
charHeight = self.GetCharHeight()
height = numLines * (charHeight+self._leading) + self.extraHeight
if not (self.maxHeight != -1 and height > self.maxHeight):
# The size is changing... if the control is not in a
# sizer then we just want to change the size and
# that's it, the programmer will need to deal with
# potential layout issues. If it is being managed by
# a sizer then we'll change the min size setting and
# then try to do a layout. In either case we'll also
# send an event so the parent can handle any special
# layout issues that it wants to deal with.
if self.GetContainingSizer() is not None:
mw, mh = self.GetMinSize()
self.SetMinSize((mw, height))
if self.GetParent().GetSizer() is not None:
self.GetParent().Layout()
else:
self.GetContainingSizer().Layout()
else:
self.SetSize((-1, height))
# send notification that layout may be needed
evt = wx.PyCommandEvent(wxEVT_ETC_LAYOUT_NEEDED, self.GetId())
evt.SetEventObject(self)
evt.height = height
evt.numLines = numLines
self.GetEventHandler().ProcessEvent(evt)
def _getDefaultHeight(self, parent):
# checked for cached value
if self.__class__._defaultHeight != -1:
return self.__class__._defaultHeight
# otherwise make a single line textctrl and find out its default height
tc = wx.TextCtrl(parent)
sz = tc.GetSize()
tc.Destroy()
self.__class__._defaultHeight = sz.height
return sz.height
if 'wxGTK' in wx.PlatformInfo or 'wxOSX-cocoa' in wx.PlatformInfo:
# GetNumberOfLines in some ports doesn't count wrapped lines, so we
# need to implement our own.
def GetNumberOfLines(self):
text = self.GetValue()
width = self.GetClientSize().width
dc = wx.ClientDC(self)
dc.SetFont(self.GetFont())
count = 0
for line in text.split('\n'):
count += 1
w, h = dc.GetTextExtent(line)
if w > width - self._getExtra():
# the width of the text is wider than the control,
# calc how many lines it will be wrapped to
count += self._wrapLine(line, dc, width)
if not count:
count = 1
return count
def _getExtra(self):
if 'wxOSX-cocoa' in wx.PlatformInfo:
return wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)
else:
return 0
def _wrapLine(self, line, dc, width):
# Estimate where the control will wrap the lines and
# return the count of extra lines needed.
pte = dc.GetPartialTextExtents(line)
width -= wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)
if not pte or width < pte[0]:
return 1
idx = 0
start = 0
count = 0
spc = -1
while idx < len(pte):
if line[idx] == ' ':
spc = idx
if pte[idx] - start > width:
# we've reached the max width, add a new line
count += 1
# did we see a space? if so restart the count at that pos
if spc != -1:
idx = spc + 1
spc = -1
try:
start = pte[idx]
except IndexError:
start = pte[-1]
else:
idx += 1
return count
#---------------------------------------------------------------------------
| {
"content_hash": "e98585f06b0212201a73c90a3c38718d",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 40.62735849056604,
"alnum_prop": 0.5410426100081273,
"repo_name": "ktan2020/legacy-automation",
"id": "ad3d668eb9d2904ca37ae84a62fbc2a5c2a07dee",
"size": "9109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/expando.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
} |
import logging
from fake_switches.adapters import tftp_reader
from fake_switches.command_processing.piping_processor_base import NotPipingProcessor
from fake_switches.terminal import NoopTerminalController
class SwitchTftpParser(object):
def __init__(self, configuration, reader=None):
self.configuration = configuration
self.reader = reader if reader else tftp_reader
self.logger = logging.getLogger("fake_switches.%s.tftp" % self.configuration.name)
def parse(self, url, filename, command_processor_class):
self.logger.info("Reading : %s/%s" % (url, filename))
data = self.reader.read_tftp(url, filename).split("\n")
command_processor = command_processor_class(
self.configuration, NoopTerminalController(),
self.logger, NotPipingProcessor())
for line in data:
self.logger.debug("Processing : %s" % line)
command_processor.process_command(line)
| {
"content_hash": "867f110dbdf2adf993425d77ad118ef4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 38.64,
"alnum_prop": 0.6956521739130435,
"repo_name": "mlecours/fake-switches",
"id": "edf32a0b0a8ecaac05d9d139195b969681156368",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fake_switches/command_processing/switch_tftp_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "377641"
}
],
"symlink_target": ""
} |
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.fonts import addMapping
from reportlab.lib.pagesizes import landscape, A4
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.paraparser import ParaFrag, ps2tt, tt2ps
from xhtml2pdf.util import getSize, getCoords, getFile, pisaFileObject, \
getFrameDimensions, getColor
from xhtml2pdf.w3c import css
from xhtml2pdf.xhtml2pdf_reportlab import PmlPageTemplate, PmlTableOfContents, \
PmlParagraph, PmlParagraphAndImage, PmlPageCount
import copy
import logging
import os
import re
import reportlab
import sys
#support python 3
#from types import StringTypes, TupleType, ListType
if sys.version[0] == '2':
StringTypes = (str,unicode)
else:
StringTypes = (str,)
TupleType = tuple
ListType = list
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import xhtml2pdf.default
import xhtml2pdf.parser
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
reportlab.rl_config.warnOnMissingFontGlyphs = 0
log = logging.getLogger("xhtml2pdf")
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.4 # fraction of font size that a sub script should be lowered
superFraction = 0.4
NBSP = u"\u00a0"
def clone(self, **kwargs):
n = ParaFrag(**self.__dict__)
if kwargs:
d = n.__dict__
d.update(kwargs)
# This else could cause trouble in Paragraphs with images etc.
if "cbDefn" in d:
del d["cbDefn"]
n.bulletText = None
return n
ParaFrag.clone = clone
def getParaFrag(style):
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0 # XXX Need to be able to set color to fit CSS tests
frag.strike = 0
frag.greek = 0
frag.link = None
frag.text = ""
frag.fontName = "Times-Roman"
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
# Extras
frag.leading = 0
frag.letterSpacing = "normal"
frag.leadingSource = "150%"
frag.leadingSpace = 0
frag.backColor = None
frag.spaceBefore = 0
frag.spaceAfter = 0
frag.leftIndent = 0
frag.rightIndent = 0
frag.firstLineIndent = 0
frag.keepWithNext = False
frag.alignment = TA_LEFT
frag.vAlign = None
frag.borderWidth = 1
frag.borderStyle = None
frag.borderPadding = 0
frag.borderColor = None
frag.borderLeftWidth = frag.borderWidth
frag.borderLeftColor = frag.borderColor
frag.borderLeftStyle = frag.borderStyle
frag.borderRightWidth = frag.borderWidth
frag.borderRightColor = frag.borderColor
frag.borderRightStyle = frag.borderStyle
frag.borderTopWidth = frag.borderWidth
frag.borderTopColor = frag.borderColor
frag.borderTopStyle = frag.borderStyle
frag.borderBottomWidth = frag.borderWidth
frag.borderBottomColor = frag.borderColor
frag.borderBottomStyle = frag.borderStyle
frag.paddingLeft = 0
frag.paddingRight = 0
frag.paddingTop = 0
frag.paddingBottom = 0
frag.listStyleType = None
frag.listStyleImage = None
frag.whiteSpace = "normal"
frag.wordWrap = None
frag.pageNumber = False
frag.pageCount = False
frag.height = None
frag.width = None
frag.bulletIndent = 0
frag.bulletText = None
frag.bulletFontName = "Helvetica"
frag.zoom = 1.0
frag.outline = False
frag.outlineLevel = 0
frag.outlineOpen = False
frag.insideStaticFrame = 0
return frag
def getDirName(path):
parts = urlparse.urlparse(path)
if parts.scheme:
return path
else:
return os.path.dirname(os.path.abspath(path))
class pisaCSSBuilder(css.CSSBuilder):
def atFontFace(self, declarations):
"""
Embed fonts
"""
result = self.ruleset([self.selector('*')], declarations)
data = result[0].values()[0]
if "src" not in data:
# invalid - source is required, ignore this specification
return {}, {}
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight != "normal":
log.warn(self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(data.get("font-style", "")).lower() in ("italic", "oblique")
src = self.c.getFile(data["src"], relative=self.c.cssParser.rootPath)
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
return {}, {}
def _pisaAddFrame(self, name, data, first=False, border=None, size=(0, 0)):
c = self.c
if not name:
name = "-pdf-frame-%d" % c.UID()
if data.get('is_landscape', False):
size = (size[1], size[0])
x, y, w, h = getFrameDimensions(data, size[0], size[1])
# print name, x, y, w, h
#if not (w and h):
# return None
if first:
return name, None, data.get("-pdf-frame-border", border), x, y, w, h, data
return (name, data.get("-pdf-frame-content", None),
data.get("-pdf-frame-border", border), x, y, w, h, data)
def _getFromData(self, data, attr, default=None, func=None):
if not func:
func = lambda x: x
if type(attr) in (list, tuple):
for a in attr:
if a in data:
return func(data[a])
return default
else:
if attr in data:
return func(data[attr])
return default
def atPage(self, name, pseudopage, declarations):
c = self.c
data = {}
name = name or "body"
pageBorder = None
if declarations:
result = self.ruleset([self.selector('*')], declarations)
if declarations:
try:
data = result[0].values()[0]
except Exception:
data = result[0].popitem()[1]
pageBorder = data.get("-pdf-frame-border", None)
if name in c.templateList:
log.warn(self.c.warning("template '%s' has already been defined", name))
if "-pdf-page-size" in data:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES.get(str(data["-pdf-page-size"]).lower(), c.pageSize)
isLandscape = False
if "size" in data:
size = data["size"]
if type(size) is not ListType:
size = [size]
sizeList = []
for value in size:
valueStr = str(value).lower()
if type(value) is TupleType:
sizeList.append(getSize(value))
elif valueStr == "landscape":
isLandscape = True
elif valueStr == "portrait":
isLandscape = False
elif valueStr in xhtml2pdf.default.PML_PAGESIZES:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES[valueStr]
else:
log.warn(c.warning("Unknown size value for @page"))
if len(sizeList) == 2:
c.pageSize = tuple(sizeList)
if isLandscape:
c.pageSize = landscape(c.pageSize)
padding_top = self._getFromData(data, 'padding-top', 0, getSize)
padding_left = self._getFromData(data, 'padding-left', 0, getSize)
padding_right = self._getFromData(data, 'padding-right', 0, getSize)
padding_bottom = self._getFromData(data, 'padding-bottom', 0, getSize)
border_color = self._getFromData(data, ('border-top-color', 'border-bottom-color',\
'border-left-color', 'border-right-color'), None, getColor)
border_width = self._getFromData(data, ('border-top-width', 'border-bottom-width',\
'border-left-width', 'border-right-width'), 0, getSize)
for prop in ("margin-top", "margin-left", "margin-right", "margin-bottom",
"top", "left", "right", "bottom", "width", "height"):
if prop in data:
c.frameList.append(self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize))
break
# Frames have to be calculated after we know the pagesize
frameList = []
staticList = []
for fname, static, border, x, y, w, h, fdata in c.frameList:
fpadding_top = self._getFromData(fdata, 'padding-top', padding_top, getSize)
fpadding_left = self._getFromData(fdata, 'padding-left', padding_left, getSize)
fpadding_right = self._getFromData(fdata, 'padding-right', padding_right, getSize)
fpadding_bottom = self._getFromData(fdata, 'padding-bottom', padding_bottom, getSize)
fborder_color = self._getFromData(fdata, ('border-top-color', 'border-bottom-color',\
'border-left-color', 'border-right-color'), border_color, getColor)
fborder_width = self._getFromData(fdata, ('border-top-width', 'border-bottom-width',\
'border-left-width', 'border-right-width'), border_width, getSize)
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(color=fborder_color, width=fborder_width)
#fix frame sizing problem.
if static:
x, y, w, h = getFrameDimensions(fdata, c.pageSize[0], c.pageSize[1])
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(self.c.warning("Negative width or height of frame. Check @frame definitions."))
frame = Frame(
x, y, w, h,
id=fname,
leftPadding=fpadding_left,
rightPadding=fpadding_right,
bottomPadding=fpadding_bottom,
topPadding=fpadding_top,
showBoundary=frame_border)
if static:
frame.pisaStaticStory = []
c.frameStatic[static] = [frame] + c.frameStatic.get(static, [])
staticList.append(frame)
else:
frameList.append(frame)
background = data.get("background-image", None)
if background:
#should be relative to the css file
background = self.c.getFile(background, relative=self.c.cssParser.rootPath)
if not frameList:
log.warn(c.warning("missing explicit frame definition for content or just static frames"))
fname, static, border, x, y, w, h, data = self._pisaAddFrame(name, data, first=True, border=pageBorder,
size=c.pageSize)
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(c.warning("Negative width or height of frame. Check @page definitions."))
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(color=border_color, width=border_width)
frameList.append(Frame(
x, y, w, h,
id=fname,
leftPadding=padding_left,
rightPadding=padding_right,
bottomPadding=padding_bottom,
topPadding=padding_top,
showBoundary=frame_border))
pt = PmlPageTemplate(
id=name,
frames=frameList,
pagesize=c.pageSize,
)
pt.pisaStaticList = staticList
pt.pisaBackground = background
pt.pisaBackgroundList = c.pisaBackgroundList
if isLandscape:
pt.pageorientation = pt.LANDSCAPE
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
return {}, {}
def atFrame(self, name, declarations):
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@BOX", name, declarations, result
data = result[0]
if data:
try:
data = data.values()[0]
except Exception:
data = data.popitem()[1]
self.c.frameList.append(
self._pisaAddFrame(name, data, size=self.c.pageSize))
return {}, {} # TODO: It always returns empty dicts?
class pisaCSSParser(css.CSSParser):
def parseExternal(self, cssResourceName):
oldRootPath = self.rootPath
cssFile = self.c.getFile(cssResourceName, relative=self.rootPath)
if not cssFile:
return None
if self.rootPath and urlparse.urlparse(self.rootPath).scheme:
self.rootPath = urlparse.urljoin(self.rootPath, cssResourceName)
else:
self.rootPath = getDirName(cssFile.uri)
result = self.parse(cssFile.getData())
self.rootPath = oldRootPath
return result
class pisaContext(object):
"""
Helper class for creation of reportlab story and container for
various data.
"""
def __init__(self, path, debug=0, capacity=-1):
self.fontList = copy.copy(xhtml2pdf.default.DEFAULT_FONT)
self.path = []
self.capacity = capacity
self.node = None
self.toc = PmlTableOfContents()
self.story = []
self.indexing_story = None
self.text = []
self.log = []
self.err = 0
self.warn = 0
self.text = u""
self.uidctr = 0
self.multiBuild = False
self.pageSize = A4
self.template = None
self.templateList = {}
self.frameList = []
self.frameStatic = {}
self.frameStaticList = []
self.pisaBackgroundList = []
self.keepInFrameIndex = None
self.baseFontSize = getSize("12pt")
self.anchorFrag = []
self.anchorName = []
self.tableData = None
self.frag = self.fragBlock = getParaFrag(ParagraphStyle('default%d' % self.UID()))
self.fragList = []
self.fragAnchor = []
self.fragStack = []
self.fragStrip = True
self.listCounter = 0
self.cssText = ""
self.cssDefaultText = ""
self.image = None
self.imageData = {}
self.force = False
self.pathCallback = None # External callback function for path calculations
# Store path to document
self.pathDocument = path or "__dummy__"
parts = urlparse.urlparse(self.pathDocument)
if not parts.scheme:
self.pathDocument = os.path.abspath(self.pathDocument)
self.pathDirectory = getDirName(self.pathDocument)
self.meta = dict(
author="",
title="",
subject="",
keywords="",
pagesize=A4,
)
def UID(self):
self.uidctr += 1
return self.uidctr
# METHODS FOR CSS
def addCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssText += value.strip() + "\n"
# METHODS FOR CSS
def addDefaultCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssDefaultText += value.strip() + "\n"
def parseCSS(self):
# This self-reference really should be refactored. But for now
# we'll settle for using weak references. This avoids memory
# leaks because the garbage collector (at least on cPython
# 2.7.3) isn't aggressive enough.
import weakref
self.cssBuilder = pisaCSSBuilder(mediumSet=["all", "print", "pdf"])
#self.cssBuilder.c = self
self.cssBuilder._c = weakref.ref(self)
pisaCSSBuilder.c = property(lambda self: self._c())
self.cssParser = pisaCSSParser(self.cssBuilder)
self.cssParser.rootPath = self.pathDirectory
#self.cssParser.c = self
self.cssParser._c = weakref.ref(self)
pisaCSSParser.c = property(lambda self: self._c())
self.css = self.cssParser.parse(self.cssText)
self.cssDefault = self.cssParser.parse(self.cssDefaultText)
self.cssCascade = css.CSSCascadeStrategy(userAgent=self.cssDefault, user=self.css)
self.cssCascade.parser = self.cssParser
# METHODS FOR STORY
def addStory(self, data):
self.story.append(data)
def swapStory(self, story=[]):
self.story, story = copy.copy(story), copy.copy(self.story)
return story
def toParagraphStyle(self, first):
style = ParagraphStyle('default%d' % self.UID(), keepWithNext=first.keepWithNext)
style.fontName = first.fontName
style.fontSize = first.fontSize
style.letterSpacing = first.letterSpacing
style.leading = max(first.leading + first.leadingSpace, first.fontSize * 1.25)
style.backColor = first.backColor
style.spaceBefore = first.spaceBefore
style.spaceAfter = first.spaceAfter
style.leftIndent = first.leftIndent
style.rightIndent = first.rightIndent
style.firstLineIndent = first.firstLineIndent
style.textColor = first.textColor
style.alignment = first.alignment
style.bulletFontName = first.bulletFontName or first.fontName
style.bulletFontSize = first.fontSize
style.bulletIndent = first.bulletIndent
style.wordWrap = first.wordWrap
# Border handling for Paragraph
# Transfer the styles for each side of the border, *not* the whole
# border values that reportlab supports. We'll draw them ourselves in
# PmlParagraph.
style.borderTopStyle = first.borderTopStyle
style.borderTopWidth = first.borderTopWidth
style.borderTopColor = first.borderTopColor
style.borderBottomStyle = first.borderBottomStyle
style.borderBottomWidth = first.borderBottomWidth
style.borderBottomColor = first.borderBottomColor
style.borderLeftStyle = first.borderLeftStyle
style.borderLeftWidth = first.borderLeftWidth
style.borderLeftColor = first.borderLeftColor
style.borderRightStyle = first.borderRightStyle
style.borderRightWidth = first.borderRightWidth
style.borderRightColor = first.borderRightColor
# If no border color is given, the text color is used (XXX Tables!)
if (style.borderTopColor is None) and style.borderTopWidth:
style.borderTopColor = first.textColor
if (style.borderBottomColor is None) and style.borderBottomWidth:
style.borderBottomColor = first.textColor
if (style.borderLeftColor is None) and style.borderLeftWidth:
style.borderLeftColor = first.textColor
if (style.borderRightColor is None) and style.borderRightWidth:
style.borderRightColor = first.textColor
style.borderPadding = first.borderPadding
style.paddingTop = first.paddingTop
style.paddingBottom = first.paddingBottom
style.paddingLeft = first.paddingLeft
style.paddingRight = first.paddingRight
style.fontName = tt2ps(first.fontName, first.bold, first.italic)
return style
def addTOC(self):
styles = []
for i in xrange(20):
self.node.attributes["class"] = "pdftoclevel%d" % i
self.cssAttr = xhtml2pdf.parser.CSSCollect(self.node, self)
xhtml2pdf.parser.CSS2Frag(self, {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}, True)
pstyle = self.toParagraphStyle(self.frag)
styles.append(pstyle)
self.toc.levelStyles = styles
self.addStory(self.toc)
self.indexing_story = None
def addPageCount(self):
if not self.multiBuild:
self.indexing_story = PmlPageCount()
self.multiBuild = True
def dumpPara(self, frags, style):
return
def addPara(self, force=False):
force = (force or self.force)
self.force = False
# Cleanup the trail
try:
rfragList = reversed(self.fragList)
except:
# For Python 2.3 compatibility
rfragList = copy.copy(self.fragList)
rfragList.reverse()
# Find maximum lead
maxLeading = 0
#fontSize = 0
for frag in self.fragList:
leading = getSize(frag.leadingSource, frag.fontSize) + frag.leadingSpace
maxLeading = max(leading, frag.fontSize + frag.leadingSpace, maxLeading)
frag.leading = leading
if force or (self.text.strip() and self.fragList):
# Update paragraph style by style of first fragment
first = self.fragBlock
style = self.toParagraphStyle(first)
# style.leading = first.leading + first.leadingSpace
if first.leadingSpace:
style.leading = maxLeading
else:
style.leading = getSize(first.leadingSource, first.fontSize) + first.leadingSpace
bulletText = copy.copy(first.bulletText)
first.bulletText = None
# Add paragraph to story
if force or len(self.fragAnchor + self.fragList) > 0:
# We need this empty fragment to work around problems in
# Reportlab paragraphs regarding backGround etc.
if self.fragList:
self.fragList.append(self.fragList[- 1].clone(text=''))
else:
blank = self.frag.clone()
blank.fontName = "Helvetica"
blank.text = ''
self.fragList.append(blank)
self.dumpPara(self.fragAnchor + self.fragList, style)
para = PmlParagraph(
self.text,
style,
frags=self.fragAnchor + self.fragList,
bulletText=bulletText)
para.outline = first.outline
para.outlineLevel = first.outlineLevel
para.outlineOpen = first.outlineOpen
para.keepWithNext = first.keepWithNext
para.autoLeading = "max"
if self.image:
para = PmlParagraphAndImage(
para,
self.image,
side=self.imageData.get("align", "left"))
self.addStory(para)
self.fragAnchor = []
first.bulletText = None
# Reset data
self.image = None
self.imageData = {}
self.clearFrag()
# METHODS FOR FRAG
def clearFrag(self):
self.fragList = []
self.fragStrip = True
self.text = u""
def copyFrag(self, **kw):
return self.frag.clone(**kw)
def newFrag(self, **kw):
self.frag = self.frag.clone(**kw)
return self.frag
def _appendFrag(self, frag):
if frag.link and frag.link.startswith("#"):
self.anchorFrag.append((frag, frag.link[1:]))
self.fragList.append(frag)
# XXX Argument frag is useless!
def addFrag(self, text="", frag=None):
frag = baseFrag = self.frag.clone()
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
# XXX Has to be replaced by CSS styles like vertical-align and font-size
if frag.sub:
frag.rise = - frag.fontSize * subFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
elif frag.super:
frag.rise = frag.fontSize * superFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
# bold, italic, and underline
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
# Replace ­ with empty and normalize NBSP
text = (text
.replace(u"\xad", u"")
.replace(u"\xc2\xa0", NBSP)
.replace(u"\xa0", NBSP))
if frag.whiteSpace == "pre":
# Handle by lines
for text in re.split(r'(\r\n|\n|\r)', text):
# This is an exceptionally expensive piece of code
self.text += text
if ("\n" in text) or ("\r" in text):
# If EOL insert a linebreak
frag = baseFrag.clone()
frag.text = ""
frag.lineBreak = 1
self._appendFrag(frag)
else:
# Handle tabs in a simple way
text = text.replace(u"\t", 8 * u" ")
# Somehow for Reportlab NBSP have to be inserted
# as single character fragments
for text in re.split(r'(\ )', text):
frag = baseFrag.clone()
if text == " ":
text = NBSP
frag.text = text
self._appendFrag(frag)
else:
for text in re.split(u'(' + NBSP + u')', text):
frag = baseFrag.clone()
if text == NBSP:
self.force = True
frag.text = NBSP
self.text += text
self._appendFrag(frag)
else:
frag.text = " ".join(("x" + text + "x").split())[1: - 1]
if self.fragStrip:
frag.text = frag.text.lstrip()
if frag.text:
self.fragStrip = False
self.text += frag.text
self._appendFrag(frag)
def pushFrag(self):
self.fragStack.append(self.frag)
self.newFrag()
def pullFrag(self):
self.frag = self.fragStack.pop()
# XXX
def _getFragment(self, l=20):
try:
return repr(" ".join(self.node.toxml().split()[:l]))
except:
return ""
def _getLineNumber(self):
return 0
def context(self, msg):
return "%s\n%s" % (
str(msg),
self._getFragment(50))
def warning(self, msg, *args):
self.warn += 1
self.log.append((xhtml2pdf.default.PML_WARNING, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
def error(self, msg, *args):
self.err += 1
self.log.append((xhtml2pdf.default.PML_ERROR, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
# UTILS
def _getFileDeprecated(self, name, relative):
try:
path = relative or self.pathDirectory
if name.startswith("data:"):
return name
if self.pathCallback is not None:
nv = self.pathCallback(name, relative)
else:
if path is None:
log.warn("Could not find main directory for getting filename. Use CWD")
path = os.getcwd()
nv = os.path.normpath(os.path.join(path, name))
if not (nv and os.path.isfile(nv)):
nv = None
if nv is None:
log.warn(self.warning("File '%s' does not exist", name))
return nv
except:
log.warn(self.warning("getFile %r %r %r", name, relative, path), exc_info=1)
def getFile(self, name, relative=None):
"""
Returns a file name or None
"""
if self.pathCallback is not None:
return getFile(self._getFileDeprecated(name, relative))
return getFile(name, relative or self.pathDirectory)
def getFontName(self, names, default="helvetica"):
"""
Name of a font
"""
# print names, self.fontList
if type(names) is not ListType:
if type(names) not in StringTypes:
names = str(names)
names = names.strip().split(",")
for name in names:
if type(name) not in StringTypes:
name = str(name)
font = self.fontList.get(name.strip().lower(), None)
if font is not None:
return font
return self.fontList.get(default, None)
def registerFont(self, fontname, alias=[]):
self.fontList[str(fontname).lower()] = str(fontname)
for a in alias:
if type(fontname) not in StringTypes:
fontname = str(fontname)
self.fontList[str(a)] = fontname
def loadFont(self, names, src, encoding="WinAnsiEncoding", bold=0, italic=0):
# XXX Just works for local filenames!
if names and src:
file = src
src = file.uri
log.debug("Load font %r", src)
if type(names) is ListType:
fontAlias = names
else:
fontAlias = (x.lower().strip() for x in names.split(",") if x)
# XXX Problems with unicode here
fontAlias = [str(x) for x in fontAlias]
fontName = fontAlias[0]
parts = src.split(".")
baseName, suffix = ".".join(parts[: - 1]), parts[- 1]
suffix = suffix.lower()
if suffix in ["ttc", "ttf"]:
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed ", fullFontName))
else:
# Register TTF font and special name
filename = file.getNamedFile()
pdfmetrics.registerFont(TTFont(fullFontName, filename))
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fullFontName)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName])
elif suffix in ("afm", "pfb"):
if suffix == "afm":
afm = file.getNamedFile()
tfile = pisaFileObject(baseName + ".pfb")
pfb = tfile.getNamedFile()
else:
pfb = file.getNamedFile()
tfile = pisaFileObject(baseName + ".afm")
afm = tfile.getNamedFile()
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed", fontName))
else:
# Include font
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
fontNameOriginal = face.name
pdfmetrics.registerTypeFace(face)
# print fontName, fontNameOriginal, fullFontName
justFont = pdfmetrics.Font(fullFontName, fontNameOriginal, encoding)
pdfmetrics.registerFont(justFont)
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fontNameOriginal)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName, fontNameOriginal])
else:
log.warning(self.warning("wrong attributes for <pdf:font>"))
| {
"content_hash": "6b23209e073d7ce2334857aae5d2e069",
"timestamp": "",
"source": "github",
"line_count": 950,
"max_line_length": 121,
"avg_line_length": 35.565263157894734,
"alnum_prop": 0.56400390682807,
"repo_name": "bogdal/xhtml2pdf",
"id": "9edc5a014af34522c56b1aa1f01300e659b04f53",
"size": "33811",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "xhtml2pdf/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25356"
},
{
"name": "Genshi",
"bytes": "7610"
},
{
"name": "HTML",
"bytes": "469495"
},
{
"name": "Python",
"bytes": "483919"
}
],
"symlink_target": ""
} |
""" Distributed toolkit: Extends `multiprocessing.managers`_ to
support distributed computing.
"""
from getpass import getuser
from multiprocessing import Process, connection, current_process, util
from multiprocessing.managers import (BaseManager, BaseProxy, Server, State,
dispatch)
import re
from socket import getfqdn, gethostbyname
from threading import Thread
def fqhost(*args):
""" Gets fully qualified host."""
return gethostbyname(getfqdn(*args))
def fqaddr(address):
""" Gets fully qualified address."""
return (fqhost(address[0]), address[1])
def parse_address(s):
""" Convert address string `user@host:port` to (user, host, port)
tuple."""
m = re.match("(?:(.*)@)?([\da-zA-Z\-\.]{1,255}):(\d{1,5})", s)
user, host, port = m.groups()
host = fqhost(host)
if port is not None:
port = int(port)
return (user, host, port)
class ConnectionServer(Server):
""" Subclass of multiprocessing.managers.Server that allows for
connection monitoring."""
def __init__(self, registry, address, authkey, serializer, conn_writer):
super(ConnectionServer, self).__init__(registry, fqaddr(address),
authkey, serializer)
self.conn_writer = conn_writer
def __del__(self):
self.conn_writer.close()
@property
def address(self):
return fqaddr(self._address)
@address.setter
def address(self, address):
self._address = fqaddr(address)
def _notify_connection(self):
""" Notifies other end of self.conn_writer Pipe about the last
accepted connection."""
self.conn_writer.send(self.listener._listener._last_accepted)
def serve_forever(self):
""" Run the server forever."""
current_process()._manager_server = self
try:
try:
while True:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
self._notify_connection()
thread = Thread(target=self.handle_request, args=(c,))
thread.daemon = False
thread.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
class ConnectionManager(BaseManager):
""" Subclass of multiprocessing.BaseManager that provides more
access to the underlying network connections and also defines
several information-gathering methods."""
_Server = ConnectionServer
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", getuser())
super(ConnectionManager, self).__init__(*args, **kwargs)
@property
def address(self):
return fqaddr(self._address)
@address.setter
def address(self, address):
self._address = fqaddr(address)
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
self._authkey = authkey
def create_conn_pipe(self):
""" Creates a pipe for communicating with server about
accepted connections. Returns the writer."""
conn_reader, conn_writer = connection.Pipe(duplex=False)
return conn_reader, conn_writer
def get_server(self):
""" Return server object with serve_forever() method and
address attribute."""
assert self._state.value == State.INITIAL
conn_writer = self.create_conn_pipe(self)
return self._Server(self._registry, self.address,
self.authkey, self._serializer, conn_writer)
def start(self, initializer=None, initargs=()):
""" Spawn a server process for this manager object."""
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, "__call__"):
raise TypeError("Initializer must be a callable.")
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# Pipe over which we will communicate accepted connections.
conn_reader, conn_writer = self.create_conn_pipe()
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, fqaddr(self._address), self._authkey,
self._serializer, writer, conn_writer, initializer,
initargs))
ident = ":".join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + "-" + ident
self._process.start()
# get address of server
writer.close()
self._address = fqaddr(reader.recv())
reader.close()
# Start connection monitor.
self.start_conn_monitor(conn_reader)
# Register a finalizer.
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, (conn_writer,), self._Client),
exitpriority=0)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
conn_writer, initializer=None, initargs=()):
""" Create a server, report its address and run it."""
if initializer is not None:
initializer(*initargs)
# Create server.
server = cls._Server(registry, fqaddr(address), authkey, serializer,
conn_writer)
# Inform parent process of the server's address.
writer.send(server.address)
writer.close()
# Run the manager.
util.info("Server running at {}:{}.".format(*server.address))
server.serve_forever()
@staticmethod
def _finalize_manager(process, address, authkey, state, conns, _Client):
""" Shutdown the manager process; will be registered as a
finalizer."""
if process.is_alive():
util.info("Sending shutdown message to manager.")
try:
conn = _Client(fqaddr(address), authkey=authkey)
try:
dispatch(conn, None, "shutdown")
finally:
conn.close()
except Exception:
pass
for conn in conns:
conn.close()
process.join(timeout=0.2)
if process.is_alive():
util.info("Manager still alive.")
if hasattr(process, "terminate"):
util.info("Trying to `terminate()` manager process.")
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info("Manager still alive after terminate.")
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[fqaddr(address)]
except KeyError:
pass
@staticmethod
def _conn_monitor(conn_reader):
""" Runs connection-monitoring loop."""
util.debug("Connection monitor started.")
loop = True
while loop:
conn_reader.poll()
try:
address = fqaddr(conn_reader.recv())
except EOFError:
loop = False
else:
util.debug("\tAccepted connection from: {}:{}.".format(
*address))
conn_reader.close()
util.debug("Connection monitor ended.")
def start_conn_monitor(self, conn_reader):
""" Starts thread that monitors for incoming connections."""
thread = Thread(target=self._conn_monitor, name="conn_monitor",
args=(conn_reader,))
thread.start()
@property
def host(self):
""" Gets host."""
host = fqhost()
return host
@property
def info(self):
""" Get the current host and process pid."""
return self.host, current_process().pid
| {
"content_hash": "676aec13d6e445af6af8114281e30b15",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 76,
"avg_line_length": 34.82203389830509,
"alnum_prop": 0.5744706741299587,
"repo_name": "pbattaglia/distributed",
"id": "781d753876070573e133792c7928ad252eedfc77",
"size": "8218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/core/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57473"
},
{
"name": "Shell",
"bytes": "5106"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="heatmapgl.colorbar", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
**kwargs,
)
| {
"content_hash": "1950608114579742193b4c0a095e4f1d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 35.92307692307692,
"alnum_prop": 0.5802997858672377,
"repo_name": "plotly/plotly.py",
"id": "9b43cb865bf486911507ffd1b750afd4d47a448c",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmapgl/colorbar/_x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
# Utility function to read the README file.
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pinko",
version = "1.0.0dev1",
author = "Marcin Kielkowski",
author_email = "kielkowskimarcin@prokonto.pl",
description = ("Pinko is arcade game"),
license = "BSD",
keywords = "Pinko Game in Python",
url = "https://github.com/MadaooQuake/Pinko",
packages=['core', 'levels', 'main', 'tests*'],
classifiers=[
"Development Status :: 1.0.0dev1",
"Topic :: Game",
"License :: OSI Approved :: BSD License",
],
) | {
"content_hash": "f1f9b7d81b0988d9ee8dc37ce18d1688",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 30.48,
"alnum_prop": 0.6259842519685039,
"repo_name": "MadaooQuake/Pinko",
"id": "a9af03ecc3ab95658d19cb00c839cdb6fa2fec7b",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4724"
}
],
"symlink_target": ""
} |
"""
Requires the JSAnimation package,
https://github.com/jakevdp/JSAnimation
"""
import glob
from matplotlib import image, animation
from matplotlib import pyplot as plt
from JSAnimation import IPython_display
def make_plotdir(plotdir='_plots', clobber=True):
"""
Utility function to create a directory for storing a sequence of plot
files, or if the directory already exists, clear out any old plots.
If clobber==False then it will abort instead of deleting existing files.
"""
import os
if os.path.isdir(plotdir):
if clobber:
os.system("rm %s/*" % plotdir)
else:
raise IOError('*** Cannot clobber existing directory %s' % plotdir)
else:
os.system("mkdir %s" % plotdir)
print "Figure files for each frame will be stored in ", plotdir
def save_frame(frameno, plotdir='_plots', fname_base='frame', verbose=False):
"""
After giving matplotlib commands to create the plot for a single frame
of the desired animation, this can be called to save the figure with
the appropriate file name such as _plots/frame00001.png.
"""
plt.draw()
filename = '_plots/%s%s.png' % (fname_base, str(frameno).zfill(5))
plt.savefig(filename)
if verbose:
print "Saved ",filename
def make_anim(plotdir, fname_base='frame', figsize=(10,6)):
"""
Assumes that a set of frames are available as png files in directory _plots,
numbered consecutively, e.g. frame0000.png, frame0001.png, etc.
Creates an animation based display each frame in turn, and returns anim.
You can then display anim in an IPython notebook, or
call make_html(anim) to create a stand-alone webpage.
"""
import glob # for finding all files matching a pattern
# Find all frame files:
filenames = glob.glob('%s/%s*.png' % (plotdir, fname_base))
# sort them into increasing order:
filenames=sorted(filenames)
fig = plt.figure(figsize=figsize, dpi=80)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off') # so there's not a second set of axes
im = plt.imshow(image.imread(filenames[0]))
def init():
im.set_data(image.imread(filenames[0]))
return im,
def animate(i):
image_i=image.imread(filenames[i])
im.set_data(image_i)
return im,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(filenames), interval=200, blit=True)
return anim
def make_html(anim, file_name='anim.html', title=None, \
fps=None, embed_frames=True, default_mode='loop'):
"""
Take an animation created by make_anim and convert it into a stand-alone
html file.
"""
from JSAnimation.IPython_display import anim_to_html
html_body = anim_to_html(anim, fps=fps, embed_frames=embed_frames, \
default_mode=default_mode)
html_file = open(file_name,'w')
html_file.write("<html>\n <h1>%s</h1>\n" % title)
html_file.write(html_body)
html_file.close()
print "Created %s" % file_name
| {
"content_hash": "54bc2618f6684759fbef67ee87a2d1d0",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 30.74,
"alnum_prop": 0.6509433962264151,
"repo_name": "philipwangdk/HPC",
"id": "c9731937891fd3664c4c80b9538b9da27b0cf97c",
"size": "3074",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "HPC_bitbucket/uwhpsc/homeworks/project/JSAnimation_frametools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32896"
},
{
"name": "FORTRAN",
"bytes": "487910"
},
{
"name": "HTML",
"bytes": "650446"
},
{
"name": "Jupyter Notebook",
"bytes": "2457572"
},
{
"name": "Makefile",
"bytes": "35506"
},
{
"name": "Python",
"bytes": "163282"
},
{
"name": "Shell",
"bytes": "7140"
}
],
"symlink_target": ""
} |
from django import http
from django.template import Context, RequestContext
from coffin.template.loader import render_to_string
__all__ = ('page_not_found', 'server_error', 'shortcut')
# no Jinja version for this needed
from django.views.defaults import shortcut
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: `404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
content = render_to_string(template_name,
RequestContext(request, {'request_path': request.path}))
return http.HttpResponseNotFound(content)
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: `500.html`
Context: None
"""
content = render_to_string(template_name, Context({}))
return http.HttpResponseServerError(content)
| {
"content_hash": "3b7a712775f14f5e132a61658b26e7fd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 26.8,
"alnum_prop": 0.6556503198294243,
"repo_name": "akx/coffin",
"id": "4386a49be7dc8a6a83e80a591931860fedd42f3b",
"size": "938",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "coffin/views/defaults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "104635"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from decimal import Decimal
from django.contrib.gis.db.models import functions
from django.contrib.gis.geos import LineString, Point, Polygon, fromstr
from django.contrib.gis.measure import Area
from django.db import connection
from django.db.models import Sum
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from ..utils import mysql, oracle, postgis, spatialite
from .models import City, Country, CountryWebMercator, State, Track
@skipUnlessDBFeature("gis_enabled")
class GISFunctionsTests(TestCase):
"""
Testing functions from django/contrib/gis/db/models/functions.py.
Several tests are taken and adapted from GeoQuerySetTest.
Area/Distance/Length/Perimeter are tested in distapp/tests.
Please keep the tests in function's alphabetic order.
"""
fixtures = ['initial']
def test_asgeojson(self):
# Only PostGIS and SpatiaLite support GeoJSON.
if not connection.ops.geojson:
with self.assertRaises(NotImplementedError):
list(Country.objects.annotate(json=functions.AsGeoJSON('mpoly')))
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = (
'{"type":"Point","crs":{"type":"name","properties":'
'{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
)
victoria_json = (
'{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],'
'"coordinates":[-123.305196,48.462611]}'
)
chicago_json = (
'{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},'
'"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
)
if spatialite:
victoria_json = (
'{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],'
'"coordinates":[-123.305196,48.462611]}'
)
# Precision argument should only be an integer
with self.assertRaises(TypeError):
City.objects.annotate(geojson=functions.AsGeoJSON('point', precision='foo'))
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0)
# FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(
pueblo_json,
City.objects.annotate(geojson=functions.AsGeoJSON('point')).get(name='Pueblo').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(
houston_json,
City.objects.annotate(json=functions.AsGeoJSON('point', crs=True)).get(name='Houston').json
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(
victoria_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True)
).get(name='Victoria').geojson
)
# SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city"
# WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(
chicago_json,
City.objects.annotate(
geojson=functions.AsGeoJSON('point', bbox=True, crs=True, precision=5)
).get(name='Chicago').geojson
)
@skipUnlessDBFeature("has_AsGML_function")
def test_asgml(self):
# Should throw a TypeError when trying to obtain GML from a
# non-geometry field.
qs = City.objects.all()
with self.assertRaises(TypeError):
qs.annotate(gml=functions.AsGML('name'))
ptown = City.objects.annotate(gml=functions.AsGML('point', precision=9)).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(
r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml">'
r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ '
r'</gml:coordinates></gml:Point>'
)
else:
gml_regex = re.compile(
r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>'
r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>'
)
self.assertTrue(gml_regex.match(ptown.gml))
if postgis:
self.assertIn(
'<gml:pos srsDimension="2">',
City.objects.annotate(gml=functions.AsGML('point', version=3)).get(name='Pueblo').gml
)
@skipUnlessDBFeature("has_AsKML_function")
def test_askml(self):
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
with self.assertRaises(TypeError):
City.objects.annotate(kml=functions.AsKML('name'))
# Ensuring the KML is as expected.
ptown = City.objects.annotate(kml=functions.AsKML('point', precision=9)).get(name='Pueblo')
self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml)
@skipUnlessDBFeature("has_AsSVG_function")
def test_assvg(self):
with self.assertRaises(TypeError):
City.objects.annotate(svg=functions.AsSVG('point', precision='foo'))
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.annotate(svg=functions.AsSVG('point')).get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.annotate(svg=functions.AsSVG('point', relative=5)).get(name='Pueblo').svg)
@skipUnlessDBFeature("has_BoundingCircle_function")
def test_bounding_circle(self):
# The weak precision in the assertions is because the BoundingCircle
# calculation changed on PostGIS 2.3.
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly')).order_by('name')
self.assertAlmostEqual(qs[0].circle.area, 169, 0)
self.assertAlmostEqual(qs[1].circle.area, 136, 0)
qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly', num_seg=12)).order_by('name')
self.assertGreater(qs[0].circle.area, 168.4, 0)
self.assertLess(qs[0].circle.area, 169.5, 0)
self.assertAlmostEqual(qs[1].circle.area, 136, 0)
@skipUnlessDBFeature("has_Centroid_function")
def test_centroid(self):
qs = State.objects.exclude(poly__isnull=True).annotate(centroid=functions.Centroid('poly'))
tol = 1.8 if mysql else (0.1 if oracle else 0.00001)
for state in qs:
self.assertTrue(state.poly.centroid.equals_exact(state.centroid, tol))
with self.assertRaisesMessage(TypeError, "'Centroid' takes exactly 1 argument (2 given)"):
State.objects.annotate(centroid=functions.Centroid('poly', 'poly'))
@skipUnlessDBFeature("has_Difference_function")
def test_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(diff=functions.Difference('mpoly', geom))
# SpatiaLite and Oracle do something screwy with the Texas geometry.
if spatialite or oracle:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.diff))
@skipUnlessDBFeature("has_Difference_function", "has_Transform_function")
def test_difference_mixed_srid(self):
"""Testing with mixed SRID (Country has default 4326)."""
geom = Point(556597.4, 2632018.6, srid=3857) # Spherical mercator
qs = Country.objects.annotate(difference=functions.Difference('mpoly', geom))
# SpatiaLite and Oracle do something screwy with the Texas geometry.
if spatialite or oracle:
qs = qs.exclude(name='Texas')
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.difference))
@skipUnlessDBFeature("has_Envelope_function")
def test_envelope(self):
countries = Country.objects.annotate(envelope=functions.Envelope('mpoly'))
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@skipUnlessDBFeature("has_ForceRHR_function")
def test_force_rhr(self):
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
st = State.objects.annotate(force_rhr=functions.ForceRHR('poly')).get(name='Foo')
self.assertEqual(rhr_rings, st.force_rhr.coords)
@skipUnlessDBFeature("has_GeoHash_function")
def test_geohash(self):
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.annotate(geohash=functions.GeoHash('point')).get(name='Houston')
h2 = City.objects.annotate(geohash=functions.GeoHash('point', precision=5)).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
@skipUnlessDBFeature("has_Intersection_function")
def test_intersection(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(inter=functions.Intersection('mpoly', geom))
for c in qs:
if spatialite or (mysql and not connection.ops.uses_invalid_empty_geometry_collection) or oracle:
# When the intersection is empty, some databases return None.
expected = None
else:
expected = c.mpoly.intersection(geom)
self.assertEqual(c.inter, expected)
@skipUnlessDBFeature("has_IsValid_function")
def test_isvalid(self):
valid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))')
State.objects.create(name='valid', poly=valid_geom)
State.objects.create(name='invalid', poly=invalid_geom)
valid = State.objects.filter(name='valid').annotate(isvalid=functions.IsValid('poly')).first()
invalid = State.objects.filter(name='invalid').annotate(isvalid=functions.IsValid('poly')).first()
self.assertIs(valid.isvalid, True)
self.assertIs(invalid.isvalid, False)
@skipUnlessDBFeature("has_Area_function")
def test_area_with_regular_aggregate(self):
# Create projected country objects, for this test to work on all backends.
for c in Country.objects.all():
CountryWebMercator.objects.create(name=c.name, mpoly=c.mpoly)
# Test in projected coordinate system
qs = CountryWebMercator.objects.annotate(area_sum=Sum(functions.Area('mpoly')))
# Some backends (e.g. Oracle) cannot group by multipolygon values, so
# defer such fields in the aggregation query.
for c in qs.defer('mpoly'):
result = c.area_sum
# If the result is a measure object, get value.
if isinstance(result, Area):
result = result.sq_m
self.assertAlmostEqual((result - c.mpoly.area) / c.mpoly.area, 0)
@skipUnlessDBFeature("has_MakeValid_function")
def test_make_valid(self):
invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))')
State.objects.create(name='invalid', poly=invalid_geom)
invalid = State.objects.filter(name='invalid').annotate(repaired=functions.MakeValid('poly')).first()
self.assertIs(invalid.repaired.valid, True)
self.assertEqual(invalid.repaired, fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))'))
@skipUnlessDBFeature("has_MemSize_function")
def test_memsize(self):
ptown = City.objects.annotate(size=functions.MemSize('point')).get(name='Pueblo')
self.assertTrue(20 <= ptown.size <= 40) # Exact value may depend on PostGIS version
@skipUnlessDBFeature("has_NumGeom_function")
def test_num_geom(self):
# Both 'countries' only have two geometries.
for c in Country.objects.annotate(num_geom=functions.NumGeometries('mpoly')):
self.assertEqual(2, c.num_geom)
qs = City.objects.filter(point__isnull=False).annotate(num_geom=functions.NumGeometries('point'))
for city in qs:
# Oracle and PostGIS return 1 for the number of geometries on
# non-collections, whereas MySQL returns None.
if mysql:
self.assertIsNone(city.num_geom)
else:
self.assertEqual(1, city.num_geom)
@skipUnlessDBFeature("has_NumPoint_function")
def test_num_points(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
qs = Track.objects.annotate(num_points=functions.NumPoints('line'))
self.assertEqual(qs.first().num_points, 2)
if spatialite or mysql:
# SpatiaLite and MySQL can only count points on LineStrings
return
for c in Country.objects.annotate(num_points=functions.NumPoints('mpoly')):
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.annotate(num_points=functions.NumPoints('point')):
self.assertEqual(1, c.num_points)
@skipUnlessDBFeature("has_PointOnSurface_function")
def test_point_on_surface(self):
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05))
# FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
else:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
qs = Country.objects.annotate(point_on_surface=functions.PointOnSurface('mpoly'))
for country in qs:
tol = 0.00001 # SpatiaLite might have WKT-translation-related precision issues
self.assertTrue(ref[country.name].equals_exact(country.point_on_surface, tol))
@skipUnlessDBFeature("has_Reverse_function")
def test_reverse_geom(self):
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
track = Track.objects.annotate(reverse_geom=functions.Reverse('line')).get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), track.reverse_geom.coords)
@skipUnlessDBFeature("has_Scale_function")
def test_scale(self):
xfac, yfac = 2, 3
tol = 5 # The low precision tolerance is for SpatiaLite
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', xfac, yfac))
for country in qs:
for p1, p2 in zip(country.mpoly, country.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
# Test float/Decimal values
qs = Country.objects.annotate(scaled=functions.Scale('mpoly', 1.5, Decimal('2.5')))
self.assertGreater(qs[0].scaled.area, qs[0].mpoly.area)
@skipUnlessDBFeature("has_SnapToGrid_function")
def test_snap_to_grid(self):
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
with self.assertRaises(ValueError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
with self.assertRaises(TypeError):
Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args))
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.1)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23)
).get(name='San Marino').snap,
tol
)
)
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country"
# WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr(
'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))'
)
self.assertTrue(
ref.equals_exact(
Country.objects.annotate(
snap=functions.SnapToGrid('mpoly', 0.05, 0.23, 0.5, 0.17)
).get(name='San Marino').snap,
tol
)
)
@skipUnlessDBFeature("has_SymDifference_function")
def test_sym_difference(self):
geom = Point(5, 23, srid=4326)
qs = Country.objects.annotate(sym_difference=functions.SymDifference('mpoly', geom))
# Oracle does something screwy with the Texas geometry.
if oracle:
qs = qs.exclude(name='Texas')
for country in qs:
self.assertTrue(country.mpoly.sym_difference(geom).equals(country.sym_difference))
@skipUnlessDBFeature("has_Transform_function")
def test_transform(self):
# Pre-transformed points for Houston and Pueblo.
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points.
h = City.objects.annotate(pt=functions.Transform('point', ptown.srid)).get(name='Pueblo')
self.assertEqual(2774, h.pt.srid)
self.assertAlmostEqual(ptown.x, h.pt.x, prec)
self.assertAlmostEqual(ptown.y, h.pt.y, prec)
@skipUnlessDBFeature("has_Translate_function")
def test_translate(self):
xfac, yfac = 5, -23
qs = Country.objects.annotate(translated=functions.Translate('mpoly', xfac, yfac))
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# Some combined function tests
@skipUnlessDBFeature(
"has_Difference_function", "has_Intersection_function",
"has_SymDifference_function", "has_Union_function")
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23, srid=4326)
qs = Country.objects.all().annotate(
difference=functions.Difference('mpoly', geom),
sym_difference=functions.SymDifference('mpoly', geom),
union=functions.Union('mpoly', geom),
)
# For some reason SpatiaLite does something screwy with the Texas geometry here.
# Also, it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.annotate(intersection=functions.Intersection('mpoly', geom))
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
return
for c in qs:
self.assertTrue(c.mpoly.difference(geom).equals(c.difference))
if not (spatialite or mysql):
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
self.assertTrue(c.mpoly.sym_difference(geom).equals(c.sym_difference))
self.assertTrue(c.mpoly.union(geom).equals(c.union))
@skipUnlessDBFeature("has_Union_function")
def test_union(self):
geom = Point(-95.363151, 29.763374, srid=4326)
ptown = City.objects.annotate(union=functions.Union('point', geom)).get(name='Dallas')
tol = 0.00001
# Undefined ordering
expected1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)', srid=4326)
expected2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)', srid=4326)
self.assertTrue(expected1.equals_exact(ptown.union, tol) or expected2.equals_exact(ptown.union, tol))
| {
"content_hash": "190ead99c1cd01a00066694fab7259c4",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 118,
"avg_line_length": 47.579268292682926,
"alnum_prop": 0.6177538553547781,
"repo_name": "carljm/django",
"id": "0e5f2ee693c059711d2f67e376f3edeffb4a4109",
"size": "23409",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/gis_tests/geoapp/test_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12145773"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import os
import sys
import six
from .errors import LoadRAMLError
from .loader import RAMLLoader
if sys.version_info[0] == 2:
from io import open
def load_file(raml_file):
try:
with _get_raml_object(raml_file) as raml:
return RAMLLoader().load(raml)
except IOError as e:
raise LoadRAMLError(e)
def load_string(raml_str):
return RAMLLoader().load(raml_str)
def _get_raml_object(raml_file):
"""
Returns a file object.
"""
if raml_file is None:
msg = "RAML file can not be 'None'."
raise LoadRAMLError(msg)
if isinstance(raml_file, six.text_type) or isinstance(
raml_file, bytes):
return open(os.path.abspath(raml_file), 'r', encoding="UTF-8")
elif hasattr(raml_file, 'read'):
return raml_file
else:
msg = ("Can not load object '{0}': Not a basestring type or "
"file object".format(raml_file))
raise LoadRAMLError(msg)
| {
"content_hash": "d9c3ec52f88c12cea87b795551614d9c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 70,
"avg_line_length": 23.75609756097561,
"alnum_prop": 0.6180698151950719,
"repo_name": "spotify/ramlfications",
"id": "a198e779876ccb1934b97dd2fb988a3919336187",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ramlfications/_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270898"
},
{
"name": "RAML",
"bytes": "2781678"
}
],
"symlink_target": ""
} |
"""Unit tests for tracker_file and parallel_tracker_file."""
from gslib.exception import CommandException
from gslib.parallel_tracker_file import ObjectFromTracker
from gslib.parallel_tracker_file import ReadParallelUploadTrackerFile
from gslib.parallel_tracker_file import ValidateParallelCompositeTrackerData
from gslib.parallel_tracker_file import WriteComponentToParallelUploadTrackerFile
from gslib.parallel_tracker_file import WriteParallelUploadTrackerFile
from gslib.storage_url import StorageUrlFromString
from gslib.tests.testcase.unit_testcase import GsUtilUnitTestCase
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import _HashFilename
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.tracker_file import HashRewriteParameters
from gslib.tracker_file import ReadRewriteTrackerFile
from gslib.tracker_file import WriteRewriteTrackerFile
from gslib.util import CreateLock
class TestTrackerFile(GsUtilUnitTestCase):
"""Unit tests for parallel upload functions in cp command."""
def test_HashFilename(self):
# Tests that _HashFilename function works for both string and unicode
# filenames (without raising any Unicode encode/decode errors).
_HashFilename('file1')
_HashFilename(u'file1')
def test_RewriteTrackerFile(self):
"""Tests Rewrite tracker file functions."""
tracker_file_name = GetRewriteTrackerFilePath('bk1', 'obj1', 'bk2', 'obj2',
self.test_api)
# Should succeed regardless of whether it exists.
DeleteTrackerFile(tracker_file_name)
src_obj_metadata = apitools_messages.Object(
bucket='bk1', name='obj1', etag='etag1', md5Hash='12345')
src_obj2_metadata = apitools_messages.Object(
bucket='bk1', name='obj1', etag='etag2', md5Hash='67890')
dst_obj_metadata = apitools_messages.Object(
bucket='bk2', name='obj2')
rewrite_token = 'token1'
self.assertIsNone(ReadRewriteTrackerFile(tracker_file_name,
src_obj_metadata))
rewrite_params_hash = HashRewriteParameters(
src_obj_metadata, dst_obj_metadata, 'full')
WriteRewriteTrackerFile(tracker_file_name, rewrite_params_hash,
rewrite_token)
self.assertEqual(
ReadRewriteTrackerFile(tracker_file_name, rewrite_params_hash),
rewrite_token)
# Tracker file for an updated source object (with non-matching etag/md5)
# should return None.
rewrite_params_hash2 = HashRewriteParameters(
src_obj2_metadata, dst_obj_metadata, 'full')
self.assertIsNone(ReadRewriteTrackerFile(tracker_file_name,
rewrite_params_hash2))
DeleteTrackerFile(tracker_file_name)
def testReadGsutil416ParallelUploadTrackerFile(self):
"""Tests the parallel upload tracker file format prior to gsutil 4.17."""
random_prefix = '123'
objects = ['obj1', '42', 'obj2', '314159']
contents = '\n'.join([random_prefix] + objects) + '\n'
fpath = self.CreateTempFile(file_name='foo', contents=contents)
expected_objects = [ObjectFromTracker(objects[2 * i], objects[2 * i + 1])
for i in range(0, len(objects) / 2)]
(_, actual_prefix, actual_objects) = ReadParallelUploadTrackerFile(
fpath, self.logger)
self.assertEqual(random_prefix, actual_prefix)
self.assertEqual(expected_objects, actual_objects)
def testReadEmptyGsutil416ParallelUploadTrackerFile(self):
"""Tests reading an empty pre-gsutil 4.17 parallel upload tracker file."""
fpath = self.CreateTempFile(file_name='foo', contents='')
(_, actual_prefix, actual_objects) = ReadParallelUploadTrackerFile(
fpath, self.logger)
self.assertEqual(None, actual_prefix)
self.assertEqual([], actual_objects)
def testParallelUploadTrackerFileNoEncryption(self):
fpath = self.CreateTempFile(file_name='foo')
random_prefix = '123'
objects = [ObjectFromTracker('obj1', '42'),
ObjectFromTracker('obj2', '314159')]
WriteParallelUploadTrackerFile(fpath, random_prefix, objects)
(enc_key, actual_prefix, actual_objects) = ReadParallelUploadTrackerFile(
fpath, self.logger)
self.assertEqual(random_prefix, actual_prefix)
self.assertEqual(None, enc_key)
self.assertEqual(objects, actual_objects)
def testParallelUploadTrackerFileWithEncryption(self):
fpath = self.CreateTempFile(file_name='foo')
random_prefix = '123'
enc_key = '456'
objects = [ObjectFromTracker('obj1', '42'),
ObjectFromTracker('obj2', '314159')]
WriteParallelUploadTrackerFile(fpath, random_prefix, objects,
encryption_key_sha256=enc_key)
(actual_key, actual_prefix, actual_objects) = ReadParallelUploadTrackerFile(
fpath, self.logger)
self.assertEqual(enc_key, actual_key)
self.assertEqual(random_prefix, actual_prefix)
self.assertEqual(objects, actual_objects)
def testWriteComponentToParallelUploadTrackerFile(self):
tracker_file_lock = CreateLock()
fpath = self.CreateTempFile(file_name='foo')
random_prefix = '123'
enc_key = '456'
objects = [ObjectFromTracker('obj1', '42'),
ObjectFromTracker('obj2', '314159')]
WriteParallelUploadTrackerFile(fpath, random_prefix, objects,
encryption_key_sha256=enc_key)
new_object = ObjectFromTracker('obj3', '43')
try:
WriteComponentToParallelUploadTrackerFile(
fpath, tracker_file_lock, new_object, self.logger,
encryption_key_sha256=None)
self.fail('Expected CommandException due to different encryption key')
except CommandException, e:
self.assertIn('does not match encryption key', str(e))
WriteComponentToParallelUploadTrackerFile(
fpath, tracker_file_lock, new_object, self.logger,
encryption_key_sha256='456')
(actual_key, actual_prefix, actual_objects) = ReadParallelUploadTrackerFile(
fpath, self.logger)
self.assertEqual(enc_key, actual_key)
self.assertEqual(random_prefix, actual_prefix)
self.assertEqual(objects + [new_object], actual_objects)
def testValidateParallelCompositeTrackerData(self):
fpath = self.CreateTempFile(file_name='foo')
random_prefix = '123'
old_enc_key = '456'
bucket_url = StorageUrlFromString('gs://foo')
objects = [ObjectFromTracker('obj1', '42'),
ObjectFromTracker('obj2', '314159')]
WriteParallelUploadTrackerFile(fpath, random_prefix, objects,
encryption_key_sha256=old_enc_key)
# Mock command object since Valdiate will call Apply() to delete the
# existing components.
class MockCommandObject(object):
delete_called = False
# We call Apply with parallel_operations_override, which expects this enum
# class to exist.
class ParallelOverrideReason(object):
SPEED = 'speed'
def Apply(self, *unused_args, **unused_kwargs):
self.delete_called = True
def MockDeleteFunc():
pass
def MockDeleteExceptionHandler():
pass
command_obj = MockCommandObject()
# Validate with correct key should succeed.
(actual_prefix, actual_objects) = ValidateParallelCompositeTrackerData(
fpath, old_enc_key, random_prefix,
objects, old_enc_key, bucket_url, command_obj, self.logger,
MockDeleteFunc, MockDeleteExceptionHandler)
self.assertEqual(False, command_obj.delete_called)
self.assertEqual(random_prefix, actual_prefix)
self.assertEqual(objects, actual_objects)
new_enc_key = '789'
command_obj = MockCommandObject()
(actual_prefix, actual_objects) = ValidateParallelCompositeTrackerData(
fpath, old_enc_key, random_prefix,
objects, new_enc_key, bucket_url, command_obj, self.logger,
MockDeleteFunc, MockDeleteExceptionHandler)
self.assertEqual(True, command_obj.delete_called)
self.assertEqual(None, actual_prefix)
self.assertEqual([], actual_objects)
| {
"content_hash": "6cc5cb5b8f6e0237027c9aea2407cd18",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 87,
"avg_line_length": 43.98924731182796,
"alnum_prop": 0.7012955267660719,
"repo_name": "Sorsly/subtle",
"id": "5938d296a049a5a68126330787fd60023cf7bb15",
"size": "8802",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/platform/gsutil/gslib/tests/test_tracker_file.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
from xml.dom.minidom import parse, parseString
from legacy.models import Constituency
from boundaries.models import Boundary
from django.contrib.gis.geos import Point#
import django.core.management.base
maxZoom = 10
google_dist = 20037508.34
def reduceName(n):
return n.lower().replace(",", " ").replace("&", " and ").replace(" ", " ").replace(" ", " ").replace(" ", " ").replace(" ", " ")
class Command(django.core.management.base.BaseCommand):
option_list = django.core.management.base.BaseCommand.option_list + ()
help = "Load constituency data"
def handle(self, *args, **options):
constituencies = {}
for c in Constituency.objects.all():
constituencies[reduceName(c.name)] = c
for b in Boundary.objects.all():
b.delete()
for f in ['boundaries/data/england.kml',
'boundaries/data/wales.kml',
'boundaries/data/scotland.kml',
'boundaries/data/northern_ireland.kml']:
places = parse(f).getElementsByTagName("Placemark")
for place in places:
name = place.getElementsByTagName("name")[0].childNodes[0].toxml()
v = []
for coords in place.getElementsByTagName("coordinates"):
points = []
north = - google_dist
south = google_dist
east = - google_dist
west = google_dist
for coord in coords.childNodes[0].toxml().split(" "):
s = coord.split(",")
if len(s) == 3:
x, y = [float(c) for c in coord.split(",")][:2]
p = Point(x, y, srid=4326)
p.transform(900913)
gx, gy = p.coords
if gy > north: north = gy
if gy < south: south = gy
if gx > east: east = gx
if gx < west: west = gx
points.append((gx, gy))
for z in range(maxZoom + 1):
pixelsize2 = ((2 * google_dist / 256) / (2 ** z)) ** 2
u = []
previousX = 1e20
previousY = 1e20
for x, y in points:
if z == maxZoom:
u.append("(%f, %f)" % (x, y))
elif (x - previousX) ** 2 + (y - previousY) ** 2 > pixelsize2:
u.append("(%f, %f)" % (x, y))
previousX, previousY = x, y
if z != maxZoom and (previousX, previousY) != (x, y):
u.append("(%f, %f)" % (x, y))
if len(u) > 3:
constituency = constituencies[reduceName(name)] #Need to use this function due to slight name mismatches
boundary="[%s]" % reduce(lambda x, y: "%s, %s" %(x, y), u).strip()
b=Boundary(zoom = z,
constituency = constituency,
boundary=boundary,
east = east,
west = west,
north = north,
south = south)
try:
b.save()
except:
# print boundary
pass
# if len(v) >= 1:
# print "'%s'" % name
| {
"content_hash": "e46c069f1dd7340645b3c78f86510a9f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 136,
"avg_line_length": 48.24050632911393,
"alnum_prop": 0.4025190238782472,
"repo_name": "electionleaflets/electionleaflets",
"id": "19d778e289af3d6407f48d5e2fe1e778449e595b",
"size": "3811",
"binary": false,
"copies": "1",
"ref": "refs/heads/django_1_7",
"path": "electionleaflets/apps/boundaries/management/commands/loadboundaries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17642"
},
{
"name": "JavaScript",
"bytes": "9551"
},
{
"name": "Python",
"bytes": "106088"
}
],
"symlink_target": ""
} |
import copy
import crcmod
from common.conversions import Conversions as CV
from selfdrive.car.tesla.values import CANBUS, CarControllerParams
class TeslaCAN:
def __init__(self, packer, pt_packer):
self.packer = packer
self.pt_packer = pt_packer
self.crc = crcmod.mkCrcFun(0x11d, initCrc=0x00, rev=False, xorOut=0xff)
@staticmethod
def checksum(msg_id, dat):
# TODO: get message ID from name instead
ret = (msg_id & 0xFF) + ((msg_id >> 8) & 0xFF)
ret += sum(dat)
return ret & 0xFF
def create_steering_control(self, angle, enabled, frame):
values = {
"DAS_steeringAngleRequest": -angle,
"DAS_steeringHapticRequest": 0,
"DAS_steeringControlType": 1 if enabled else 0,
"DAS_steeringControlCounter": (frame % 16),
}
data = self.packer.make_can_msg("DAS_steeringControl", CANBUS.chassis, values)[2]
values["DAS_steeringControlChecksum"] = self.checksum(0x488, data[:3])
return self.packer.make_can_msg("DAS_steeringControl", CANBUS.chassis, values)
def create_action_request(self, msg_stw_actn_req, cancel, bus, counter):
values = copy.copy(msg_stw_actn_req)
if cancel:
values["SpdCtrlLvr_Stat"] = 1
values["MC_STW_ACTN_RQ"] = counter
data = self.packer.make_can_msg("STW_ACTN_RQ", bus, values)[2]
values["CRC_STW_ACTN_RQ"] = self.crc(data[:7])
return self.packer.make_can_msg("STW_ACTN_RQ", bus, values)
def create_longitudinal_commands(self, acc_state, speed, min_accel, max_accel, cnt):
messages = []
values = {
"DAS_setSpeed": speed * CV.MS_TO_KPH,
"DAS_accState": acc_state,
"DAS_aebEvent": 0,
"DAS_jerkMin": CarControllerParams.JERK_LIMIT_MIN,
"DAS_jerkMax": CarControllerParams.JERK_LIMIT_MAX,
"DAS_accelMin": min_accel,
"DAS_accelMax": max_accel,
"DAS_controlCounter": cnt,
"DAS_controlChecksum": 0,
}
for packer, bus in [(self.packer, CANBUS.chassis), (self.pt_packer, CANBUS.powertrain)]:
data = packer.make_can_msg("DAS_control", bus, values)[2]
values["DAS_controlChecksum"] = self.checksum(0x2b9, data[:7])
messages.append(packer.make_can_msg("DAS_control", bus, values))
return messages
| {
"content_hash": "2fa3f95b72c7a44000b9d604811abd7a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 92,
"avg_line_length": 35.70967741935484,
"alnum_prop": 0.6635049683830172,
"repo_name": "commaai/openpilot",
"id": "e5d904f80efc50f6c6905a4453adf9ecfcaa9fc5",
"size": "2214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selfdrive/car/tesla/teslacan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "604924"
},
{
"name": "C++",
"bytes": "1125553"
},
{
"name": "Cython",
"bytes": "50503"
},
{
"name": "Dockerfile",
"bytes": "1239"
},
{
"name": "Emacs Lisp",
"bytes": "124"
},
{
"name": "HTML",
"bytes": "11493"
},
{
"name": "Kaitai Struct",
"bytes": "8093"
},
{
"name": "MATLAB",
"bytes": "35190"
},
{
"name": "Makefile",
"bytes": "14018"
},
{
"name": "Python",
"bytes": "2386885"
},
{
"name": "QML",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "32876"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import pony
from pony.utils import decorator_with_params, tostring
from pony.templating import htmltag, Html, htmljoin, lazy, BoundMarkup
from pony.web import local, http, url
from pony.postprocessing import css_link, script_link
def blueprint_link(column_count=24, column_width=30, gutter_width=10, ns=''):
if column_count == 24 and column_width == 30 and gutter_width == 10 and ns == '':
return Html(
'<link rel="stylesheet" href="/pony/static/blueprint/screen.css" type="text/css" media="screen, projection">\n'
'<link rel="stylesheet" href="/pony/static/blueprint/print.css" type="text/css" media="print">\n'
'<!--[if IE]><link rel="stylesheet" href="/pony/static/blueprint/ie.css.css" type="text/css" media="screen, projection"><![endif]-->\n'
)
if not ns: params = Html('%s/%s/%s') % (column_count, column_width, gutter_width)
else: params = Html('%s/%s/%s/%s') % (column_count, column_width, gutter_width, ns)
return Html(
'<link rel="stylesheet" href="/pony/static/blueprint/%s/screen.css" type="text/css" media="screen, projection">\n'
'<link rel="stylesheet" href="/pony/static/blueprint/%s/print.css" type="text/css" media="print">\n'
'<!--[if IE]><link rel="stylesheet" href="/pony/static/blueprint/%s/ie.css.css" type="text/css" media="screen, projection"><![endif]-->\n'
) % (params, params, params)
def jquery_link():
return Html('<script src="/pony/static/jquery/jquery.js"></script>')
link_funcs = dict(
blueprint=blueprint_link,
jquery=jquery_link
)
@lazy
def link(*args, **kwargs):
if not args: raise TypeError('link() function requires at least one positional argument')
attrs = None
last = args[-1]
if isinstance(last, BoundMarkup):
description = last()
args = (description,) + args[:-1]
first = args[0]
if hasattr(first, 'routes'):
func = first
args = args[1:]
if func.__doc__ is None: description = func.__name__
else: description = Html(func.__doc__.split('\n', 1)[0])
elif len(args) > 1 and hasattr(args[1], 'routes'):
description = tostring(first)
func = args[1]
args = args[2:]
elif len(args) > 2 and hasattr(args[2], 'routes'):
attrs = args[1]
if isinstance(attrs, basestring): attrs = {'class': attrs}
elif not hasattr(attrs, 'items'):
raise TypeError('Invalid second argument of link() function: %r' % second)
description = tostring(first)
func = args[2]
args = args[3:]
elif isinstance(first, basestring):
func = link_funcs.get(first)
if func is not None: return func(*args[1:], **kwargs)
if first.endswith('.css'):
if kwargs: raise TypeError('Unexpected key arguments')
return css_link(args)
if first.endswith('.js'):
if len(args) > 1: raise TypeError('Unexpected positional arguments')
if kwargs: raise TypeError('Unexpected key arguments')
return script_link(first)
raise TypeError('Invalid arguments of link() function')
href = url(func, *args, **kwargs)
return htmljoin([htmltag('a', attrs, href=href), description, Html('</a>')])
img_template = Html(u'<img src="%s" title="%s" alt="%s">')
def img(*args, **kwargs):
description = None
if isinstance(args[0], basestring):
description = args[0]
func = args[1]
args = args[2:]
else:
func = args[0]
args = args[1:]
if func.__doc__ is None: description = func.__name__
else: description = Html(func.__doc__.split('\n', 1)[0])
href = url(func, *args, **kwargs)
return img_template % (href, description, description)
@decorator_with_params
def component(css=None, js=None):
def new_dec(func, *args, **kwargs):
response = local.response
if css is not None:
if isinstance(css, (basestring, tuple)):
response.add_component_stylesheets([ css ])
else: response.add_component_stylesheets(css)
if js is not None:
if isinstance(js, basestring):
response.add_scripts([ js ])
else: response.add_scripts(js)
return func(*args, **kwargs)
return new_dec
@component(css='/pony/static/css/rounded-corners.css')
def rounded(markup, **attrs):
tagname = attrs.pop('tagname', 'div')
radius = attrs.pop('radius', 10)
result = [ htmltag(tagname, {'class': 'rounded'}, **attrs), markup,
Html('<div class="top-left radius-%s"></div>\n'
'<div class="top-right radius-%s"></div>\n'
'<div class="bottom-left radius-%s"></div>\n'
'<div class="bottom-right radius-%s"></div>\n'
'</%s>') % (radius, radius, radius, radius, tagname)]
return Html('\n').join(result)
class tabs(object):
@component(css=[ ('/pony/static/jquery/ui.tabs.css', 'print, projection, screen'),
('/pony/static/jquery/ui.tabs-ie.css', 'projection, screen', 'if lte IE 7') ],
js=[ '/pony/static/jquery/jquery.js',
'/pony/static/jquery/ui.core.js',
'/pony/static/jquery/ui.tabs.js',
'/pony/static/js/tabs.js' ])
def __init__(self, **attrs):
self.attrs = attrs
self._tabs = []
def tab(self, name, markup, **attrs):
if id not in attrs: attrs['id'] = next(http.response.id_counter)
self._tabs.append((name, markup, attrs))
def __unicode__(self):
result = [htmltag('div', {'class':'pony-tabs clearfix'}, **self.attrs), Html('\n<ul>\n') ]
for name, markup, attrs in self._tabs:
result.append(Html('<li><a href="#%s"><span>%s</span></a>\n') % (attrs['id'], name))
result.append(Html('</ul>\n'))
for name, markup, attrs in self._tabs:
result.extend([htmltag('div', {'class': 'pony-tab clearfix'}, **attrs), markup, Html('</div>\n')])
result.append(Html('</div>'))
return Html('').join(result)
@component(css='/pony/static/css/button.css')
def button(link, markup, **attrs):
result = [htmltag('a', {'class' : 'button', 'href' : link}, **attrs), Html('<span>%s</span></a>') % markup]
return Html('').join(result)
| {
"content_hash": "d3bb2d032944dd1fdf8b34dde0e71651",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 147,
"avg_line_length": 45.358620689655176,
"alnum_prop": 0.5727535350463737,
"repo_name": "compiteing/flask-ponypermission",
"id": "d5ee9c96dc8de51cdf17ab525664566ce2b3c69d",
"size": "6577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pony/webutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "1793"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Python",
"bytes": "4130219"
},
{
"name": "Shell",
"bytes": "3783"
}
],
"symlink_target": ""
} |
from azure.mgmt.storage.models import StorageAccount
from c7n_azure.constants import FUNCTION_EVENT_TRIGGER_MODE, FUNCTION_TIME_TRIGGER_MODE, \
CONTAINER_EVENT_TRIGGER_MODE, CONTAINER_TIME_TRIGGER_MODE
from c7n_azure.policy import AzureEventGridMode, AzureFunctionMode, AzureModeCommon
from mock import mock, patch, Mock
from c7n.config import Bag
from c7n.exceptions import PolicyValidationError, PolicyExecutionError
from .azure_common import BaseTest, DEFAULT_SUBSCRIPTION_ID, arm_template, cassette_name
class AzurePolicyModeTest(BaseTest):
def setUp(self):
super(AzurePolicyModeTest, self).setUp()
def test_azure_function_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
}, validate=True)
self.assertTrue(p)
def test_azure_function_event_mode_too_many_events_throws(self):
with self.sign_out_patch():
with self.assertRaises(PolicyValidationError):
self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': [
'VmWrite',
{
'resourceProvider': 'Microsoft.Compute/virtualMachines/',
'event': 'delete'
},
{
'resourceProvider': 'Microsoft.Compute/virtualMachines/',
'event': 'powerOff/action'
},
{
'resourceProvider': 'Microsoft.Compute/virtualMachines/',
'event': 'reimage/action'
},
{
'resourceProvider': 'Microsoft.Compute/virtualMachines/',
'event': 'redeploy/action'
},
{
'resourceProvider': 'Microsoft.Compute/virtualMachines/',
'event': 'start/action'
}
]
}
}, validate=True)
def test_azure_function_event_mode_incorrect_event_type(self):
with self.sign_out_patch():
with self.assertRaises(PolicyValidationError):
self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': [
'CosmosDbWrite',
]
}
}, validate=True)
def test_azure_function_event_mode_child_event_type(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.networksecuritygroup',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': [
{
'resourceProvider':
'Microsoft.Network/networkSecurityGroups/securityRules',
'event': 'write'
}
]
}
}, validate=True)
self.assertTrue(p)
def test_azure_function_event_mode_generic_resource_type(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.armresource',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': [
'KeyVaultWrite',
'ResourceGroupWrite',
'VmWrite'
]
}
}, validate=True)
self.assertTrue(p)
def test_azure_function_event_mode_unsupported_resource_type(self):
with self.sign_out_patch():
with self.assertRaises(PolicyValidationError):
self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.keyvault-key',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': [
'KeyVaultWrite',
]
}
}, validate=True)
def test_azure_function_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_TIME_TRIGGER_MODE,
'schedule': '0 */5 * * * *',
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
}, validate=True)
self.assertTrue(p)
def test_azure_function_periodic_schema_schedule_valid(self):
policy = {
'name': 'test-azure-schema-schedule-valid',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_TIME_TRIGGER_MODE,
'schedule': ''
}
}
valid_schedules = [
'0 5 */2 * * friday',
'0 * 5 * February *',
'5-7 * * * * 1-5',
'5,8,10 * * * Jan Mon'
]
result = True
for valid_schedule in valid_schedules:
policy['mode']['schedule'] = valid_schedule
p = self.load_policy(policy, validate=True)
result = result and p
self.assertTrue(result)
def test_azure_function_periodic_schema_schedule_invalid(self):
policy = {
'name': 'test-azure-schema-schedule-invalid',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_TIME_TRIGGER_MODE,
'schedule': ''
}
}
invalid_schedules = [
'* * * * *',
'0 * * * * * *',
'* * * * * *',
'0 0 0 0 0 0',
'15-60 * * * * 7'
]
for invalid_schedule in invalid_schedules:
policy['mode']['schedule'] = invalid_schedule
with self.assertRaises(PolicyValidationError):
self.load_policy(policy, validate=True)
def test_container_periodic_schema_schedule_valid(self):
policy = {
'name': 'test-azure-periodic-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': ''}
}
valid_schedules = [
'5 */2 * * fri',
' * 5 * feb * ',
'5-7 * * * 1-5 ',
'5,8,10 * * jan mon'
]
result = True
for valid_schedule in valid_schedules:
policy['mode']['schedule'] = valid_schedule
p = self.load_policy(policy, validate=True)
result = result and p
self.assertTrue(result)
def test_container_periodic_schema_schedule_invalid(self):
policy = {
'name': 'test-azure-periodic-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': ''}
}
invalid_schedules = [
'* * * *',
'* * * * * *'
'*/15 * Jan 1-5',
'* 15 * jan 7',
]
for invalid_schedule in invalid_schedules:
policy['mode']['schedule'] = invalid_schedule
with self.assertRaises(PolicyValidationError):
self.load_policy(policy, validate=True)
def test_container_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-event-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
}, validate=True)
self.assertTrue(p)
def test_container_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-periodic-mode',
'resource': 'azure.vm',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': '*/5 * * * *'}
}, validate=True)
self.assertTrue(p)
def test_azure_function_uai_sans_id(self):
with self.assertRaises(PolicyValidationError) as em:
self.load_policy({
'name': 'something',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'identity': {'type': 'UserAssigned'}}}},
validate=True)
self.assertIn(
'policy:something user assigned identity requires specifying id',
str(em.exception))
def test_azure_function_unresolved_uai_identity(self):
session = mock.MagicMock()
p = self.load_policy({
'name': 'sm',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'identity': {'type': 'UserAssigned', 'id': 'mike'}}}})
exec_mode = p.get_execution_mode()
with self.assertRaises(PolicyExecutionError) as em:
exec_mode._get_identity(session)
self.assertIn(
'policy:sm Could not find the user assigned identity mike',
str(em.exception))
def test_azure_function_resolved_uai_identity(self):
session = mock.MagicMock()
p = self.load_policy({
'name': 'sm',
'resource': 'azure.vm',
'mode': {
'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'identity': {'type': 'UserAssigned', 'id': 'mike'}}}})
exec_mode = p.get_execution_mode()
uai = dict(
name='mike', id='/subscriptions/xyz/userAssignedIdentities/foo',
client_id='bob')
session.client(
'azure.mgmt.msi.ManagedServiceIdentityClient'
).user_assigned_identities.list_by_subscription.return_value = [Bag(uai)]
identity = exec_mode._get_identity(session)
self.assertEqual(identity, {
'type': 'UserAssigned',
'client_id': 'bob',
'id': '/subscriptions/xyz/userAssignedIdentities/foo'})
def test_init_azure_function_mode_with_service_plan(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'}
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.app_insights['name'], 'test-cloud-custodian')
self.assertEqual(params.service_plan['name'], "test-cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'test')
self.assertEqual(params.app_insights['resource_group_name'], 'test')
self.assertEqual(params.service_plan['resource_group_name'], "test")
self.assertTrue(params.function_app['name'].startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_no_service_plan_name(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.service_plan['name'], "cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.service_plan['resource_group_name'], "cloud-custodian")
self.assertEqual(params.app_insights['name'], 'cloud-custodian')
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.app_insights['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.function_app['name'].startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_invalid_policy_name(self):
p = self.load_policy({
'name': 'this-policy-name-is-going-to-be-too-long-since-the-maximum-size-is-60',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
with self.assertRaises(ValueError):
function_mode.get_function_app_params()
def test_init_azure_function_mode_invalid_characters_in_policy_name(self):
p = self.load_policy({
'name': 'invalid_policy_name1',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertRegex(params.function_app['name'], "invalid-policy-name1-[a-zA-Z0-9]+")
def test_init_azure_function_mode_with_resource_ids(self):
ai_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/microsoft.insights/components/testai'
sp_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Web/serverFarms/testsp'
sa_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Storage/storageAccounts/testsa'
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': sp_id,
'storageAccount': sa_id,
'appInsights': ai_id
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.storage_account['id'], sa_id)
self.assertEqual(params.storage_account['name'], 'testsa')
self.assertEqual(params.storage_account['resource_group_name'], 'testrg')
self.assertEqual(params.app_insights['id'], ai_id)
self.assertEqual(params.app_insights['name'], 'testai')
self.assertEqual(params.app_insights['resource_group_name'], 'testrg')
self.assertEqual(params.service_plan['id'], sp_id)
self.assertEqual(params.service_plan['name'], "testsp")
self.assertEqual(params.service_plan['resource_group_name'], "testrg")
self.assertTrue(params.function_app['name'].startswith('test-azure-serverless-mode-'))
def test_event_grid_mode_creates_advanced_filtered_subscription(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values, ['Microsoft.Compute/virtualMachines/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_event_grid_mode_creates_advanced_filtered_subscription_with_multiple_events(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events':
['VmWrite',
{
'resourceProvider': 'Microsoft.Compute/virtualMachines',
'event': 'powerOff/action'
}]},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values,
['Microsoft.Compute/virtualMachines/write',
'Microsoft.Compute/virtualMachines/powerOff/action'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_extract_properties(self):
resource_id = '/subscriptions/{0}/resourceGroups/rg/providers' \
'/Microsoft.Web/serverFarms/test'.format(DEFAULT_SUBSCRIPTION_ID)
r = AzureFunctionMode.extract_properties({}, '', {})
self.assertEqual(r, {})
r = AzureFunctionMode.extract_properties({}, 'v', {'v': 'default'})
self.assertEqual(r, {'v': 'default'})
r = AzureFunctionMode.extract_properties({'v': resource_id}, 'v', {'v': 'default'})
self.assertEqual(r, {'id': resource_id, 'name': 'test', 'resource_group_name': 'rg'})
r = AzureFunctionMode.extract_properties(
{'v': {'test1': 'value1', 'testCamel': 'valueCamel'}},
'v',
{'test1': None, 'test_camel': None})
self.assertEqual(r, {'test1': 'value1', 'test_camel': 'valueCamel'})
r = AzureFunctionMode.extract_properties(
{'v': {'t1': 'v1', 'nestedValue': {'testCamel': 'valueCamel'}}},
'v',
{'t1': None, 'nested_value': {'test_camel': None}, 't2': 'v2'})
self.assertEqual(r, {'t1': 'v1', 't2': 'v2', 'nested_value': {'test_camel': 'valueCamel'}})
@arm_template('emptyrg.json')
@cassette_name('resourcegroup')
@patch('c7n_azure.actions.delete.DeleteAction._process_resource')
def test_empty_group_function_event(self, mock_delete):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['ResourceGroupWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian'
}
}},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}],
'actions': [
{'type': 'delete'}]})
event = AzurePolicyModeTest.get_sample_event()
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
self.assertTrue(mock_delete.called)
@arm_template('emptyrg.json')
@cassette_name('resourcegroup')
@patch('c7n_azure.actions.delete.DeleteAction._process_resource')
def test_empty_group_container_event(self, mock_delete):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['ResourceGroupWrite']},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}],
'actions': [
{'type': 'delete'}]})
event = AzurePolicyModeTest.get_sample_event()
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
self.assertTrue(mock_delete.called)
@arm_template('emptyrg.json')
def test_empty_group_container_scheduled(self):
p = self.load_policy({
'name': 'test-azure-resource-group',
'mode':
{'type': CONTAINER_TIME_TRIGGER_MODE,
'schedule': '* * * * *'},
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}]})
resources = p.push(None, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
def test_extract_resource_id(self):
rg_id = "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_emptyrg"
nsg_id = rg_id + '/providers/Microsoft.Network/networkSecurityGroups/test-nsg'
sr_id = nsg_id + '/securityRules/test-rule'
string_as_is = 'as-is-for-armresource'
resource_type = ''
policy = Mock()
policy.resource_manager.resource_type.resource_type = resource_type
event = {'subject': rg_id}
policy.resource_manager.resource_type.resource_type = \
'resourceGroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), rg_id)
event = {'subject': nsg_id}
policy.resource_manager.resource_type.resource_type = \
'resourceGroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), rg_id)
event = {'subject': nsg_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Network/networksecuritygroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), nsg_id)
event = {'subject': sr_id}
policy.resource_manager.resource_type.resource_type =\
'Microsoft.Network/networksecuritygroups'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), nsg_id)
event = {'subject': string_as_is}
policy.resource_manager.resource_type.resource_type =\
'armresource'
self.assertEqual(AzureModeCommon.extract_resource_id(policy, event), string_as_is)
@staticmethod
def get_sample_event():
return {"subject": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/"
"resourceGroups/test_emptyrg",
"eventType": "Microsoft.Resources.ResourceWriteSuccess",
"eventTime": "2019-07-16T18:30:43.3595255Z",
"id": "619d2674-b396-4356-9619-6c5a52fe4e88",
"data": {
"correlationId": "7dd5a476-e052-40e2-99e4-bb9852dc1f86",
"resourceProvider": "Microsoft.Resources",
"resourceUri": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e/"
"resourceGroups/test_emptyrg",
"operationName": "Microsoft.Resources/subscriptions/resourceGroups/write",
"status": "Succeeded"
},
"topic": "/subscriptions/ea98974b-5d2a-4d98-a78a-382f3715d07e"}
| {
"content_hash": "dbc2a7083ac004999588c793589cea47",
"timestamp": "",
"source": "github",
"line_count": 657,
"max_line_length": 99,
"avg_line_length": 41.23744292237443,
"alnum_prop": 0.5170339202007899,
"repo_name": "capitalone/cloud-custodian",
"id": "981d45725216fb17565eec54f8a0e1fc82153dee",
"size": "27220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests_azure/test_policy_mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
"""
chdir2
~~~~~~
An alternative implementation of :func:`chdir.chdir()`.
:copyright: © 2014 by Petr Zemek <s3rvac@gmail.com>
:license: BSD, see LICENSE for more details
"""
import os
class chdir2():
"""An alternative implementation of :func:`chdir.chdir()`."""
def __init__(self, dir):
self.dir = dir
def __enter__(self):
self.orig_cwd = os.getcwd()
os.chdir(self.dir)
def __exit__(self, *exc_info):
os.chdir(self.orig_cwd)
| {
"content_hash": "ad9996e86069abb12b32753fa85b19a3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 20.04,
"alnum_prop": 0.5748502994011976,
"repo_name": "s3rvac/blog",
"id": "50d482ec65f793292dd3090bb9ec7df3bc6a4636",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "en-2014-06-21-unit-testing-with-unittest-mock-patch/chdir2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1002"
},
{
"name": "C++",
"bytes": "21409"
},
{
"name": "CMake",
"bytes": "2455"
},
{
"name": "Haskell",
"bytes": "8592"
},
{
"name": "Makefile",
"bytes": "3621"
},
{
"name": "Python",
"bytes": "19704"
},
{
"name": "Rust",
"bytes": "22749"
},
{
"name": "TeX",
"bytes": "1800"
}
],
"symlink_target": ""
} |
import os
from nose.tools import assert_is_not_none, assert_equals
from cmt.components import Meteorology as Component
from . import example_dir
cfg_file = os.path.join(example_dir, 'June_20_67_meteorology.cfg')
var_name = 'atmosphere_water__rainfall_volume_flux'
def setup_module():
global component
component = Component()
def teardown_module():
pass
def test_irf():
component.initialize(cfg_file)
component.update(1.0)
component.finalize()
def test_get_component_name():
x = component.get_component_name()
assert_equals(x, 'TopoFlow_Meteorology')
def test_get_start_time():
x = component.get_start_time()
assert_equals(x, 0.0)
def test_get_end_time():
x = component.get_end_time()
assert_equals(x, 600.0)
def test_get_var_type():
x = component.get_var_type(var_name)
assert_equals(x, 'float64')
def test_get_var_units():
x = component.get_var_units(var_name)
assert_equals(x, 'm s-1')
def test_get_var_itemsize():
x = component.get_var_itemsize(var_name)
assert_equals(x, 8)
# The get_var_nbytes method isn't implemented in TopoFlow.
# def test_get_var_nbytes():
# x = component.get_var_nbytes(var_name)
def test_get_value():
x = component.get_value(var_name)
assert_is_not_none(x)
def test_get_var_grid():
x = component.get_var_grid(var_name)
assert_equals(x, 0)
def test_get_grid_type():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_type(grid_id)
assert_equals(x, 'uniform')
def test_get_grid_rank():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_rank(grid_id)
assert_equals(x, 2)
def test_get_grid_shape():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_shape(grid_id)
assert_equals(x[0], 44)
assert_equals(x[1], 29)
def test_get_grid_size():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_size(grid_id)
assert_equals(x, 44*29)
def test_get_grid_spacing():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_spacing(grid_id)
assert_equals(x[0], 30.0)
assert_equals(x[1], 30.0)
def test_get_grid_origin():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_origin(grid_id)
assert_equals(x[0], 4560090.42)
assert_equals(x[1], 277850.358)
| {
"content_hash": "c10c499d31409dc151375e5141d7a3f3",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 66,
"avg_line_length": 22.102803738317757,
"alnum_prop": 0.66553911205074,
"repo_name": "Elchin/topoflow-cmi-testing",
"id": "e841c3c83b6ffa676729aea92145df1e207a63a4",
"size": "2440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_meteorology.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10117"
}
],
"symlink_target": ""
} |
def test_distro_metadata(analyzed_data):
result = analyzed_data()
actual = result["image"]["imagedata"]["analysis_report"]["analyzer_meta"]
# This is odd, it nests another `analyzer_meta` in there
expected = {
"analyzer_meta": {
"base": {"DISTRO": "centos", "DISTROVERS": "8", "LIKEDISTRO": "rhel,fedora"}
}
}
assert actual == expected
def test_alpine_metadata(analyzed_data):
result = analyzed_data("alpine2.6")
actual = result["image"]["imagedata"]["analysis_report"]["analyzer_meta"]
expected = {
"analyzer_meta": {
"base": {
"DISTRO": "busybox",
"DISTROVERS": "v1.21.1",
"LIKEDISTRO": "busybox",
}
}
}
assert actual == expected
| {
"content_hash": "885fe0d15c94a21bd77600897e602a67",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 88,
"avg_line_length": 31.76,
"alnum_prop": 0.5440806045340051,
"repo_name": "anchore/anchore-engine",
"id": "7816fea99b5abe849a3cff8fc1c9ac4750e7ad0e",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/clients/standalone/test_analyzer_meta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
import pytest
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.loading.definitions.schema import schema_validator
from tests.utils import (
assert_path_not_in_errors,
assert_message_in_errors,
)
def test_pattern_is_not_required():
try:
schema_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('pattern', errors)
@pytest.mark.parametrize(
'value',
([1, 2], None, {'a': 1}, True, 1, 1.1),
)
def test_pattern_with_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({'pattern': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'pattern.type',
)
def test_pattern_with_invalid_regex():
with pytest.raises(ValidationError) as err:
schema_validator({'pattern': '(arrst'})
assert_message_in_errors(
MESSAGES['pattern']['invalid_regex'],
err.value.detail,
'pattern',
)
def test_pattern_for_valid_regex():
try:
schema_validator({'pattern': '^test$'})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('pattern', errors)
| {
"content_hash": "5c9d3cfdc8854dcd338cb669fc6038bc",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 60,
"avg_line_length": 22.775862068965516,
"alnum_prop": 0.6343679031037093,
"repo_name": "pipermerriam/flex",
"id": "732f6c731ebb989732d808eff160e7f03db95d15",
"size": "1321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/loading/definition/schema/test_pattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1187"
},
{
"name": "Python",
"bytes": "510857"
}
],
"symlink_target": ""
} |
import gzip
from ..system_tools import gunzip
def test_gunzip(tmp_path):
filename = tmp_path / 'test_gunzip.txt.gz'
# First create a gzip file
content = b"Bla"
with gzip.open(filename, "wb") as f:
f.write(content)
# Then test our gunzip command works
gunzip(str(filename))
with open(filename.with_suffix(''), "rb") as f:
new_content = f.read()
assert new_content == content
| {
"content_hash": "a3a10dbc42c5439e2d2cdf9aa8d9651a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 26.5625,
"alnum_prop": 0.6376470588235295,
"repo_name": "imbasimba/astroquery",
"id": "32e98d6e58b0bb30da96c2586e5b39753fab92c3",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astroquery/utils/tests/test_system_tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "493404"
},
{
"name": "Python",
"bytes": "2852847"
}
],
"symlink_target": ""
} |
from colorthief import ColorThief
import sys
color_thief = ColorThief(sys.argv[1])
palette = color_thief.get_palette(color_count=6)
print palette
| {
"content_hash": "7ee096c27f776d8d4c884ae2839c7d86",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 29.2,
"alnum_prop": 0.8013698630136986,
"repo_name": "fthomasmorel/insapp-go",
"id": "cfbf0032903e68a5926ba8f3fae2a57610246c5b",
"size": "146",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/color-thief.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "71954"
},
{
"name": "Python",
"bytes": "146"
}
],
"symlink_target": ""
} |
import errno
import logging
import optparse
import os
import os.path
import re
import shutil
import subprocess
import sys
SOURCE_ROOT = os.path.join(os.path.dirname(__file__), "../..")
# Add the build-support dir to the system path so we can import kudu-util.
sys.path.append(os.path.join(SOURCE_ROOT, "build-support"))
from kudu_util import check_output, Colors, init_logging
from dep_extract import DependencyExtractor
# Constants.
LC_RPATH = 'LC_RPATH'
LC_LOAD_DYLIB = 'LC_LOAD_DYLIB'
KEY_CMD = 'cmd'
KEY_NAME = 'name'
KEY_PATH = 'path'
PAT_SASL_LIBPLAIN = re.compile(r'libplain')
# Exclude libraries that are (L)GPL-licensed and libraries that are not
# portable across Linux kernel versions. One exception is 'libpcre', which
# is BSD-licensed. It is excluded because it is a transitive dependency
# introduced by 'libselinux'.
PAT_LINUX_LIB_EXCLUDE = re.compile(r"""(libpthread|
libc|
libstdc\+\+|
librt|
libdl|
libresolv|
libgcc.*|
libcrypt|
libm|
libkeyutils|
libcom_err|
libdb-[\d.]+|
libselinux|
libpcre|
libtinfo
)\.so""", re.VERBOSE)
# We don't want to ship libSystem because it includes kernel and thread
# routines that we assume may not be portable between macOS versions.
# Also do not ship core libraries that come with the default macOS install
# unless we know that we need to for ABI reasons.
PAT_MACOS_LIB_EXCLUDE = re.compile(r"""(AppleFSCompression$|
CFNetwork$|
CoreFoundation$|
CoreServices$|
DiskArbitration$|
IOKit$|
Foundation$|
Kerberos$|
Security$|
SystemConfiguration$|
libCRFSuite|
libDiagnosticMessagesClient|
libSystem|
libapple_nghttp2|
libarchive|
libc\+\+\.|
libenergytrace|
libicucore|
libncurses|
libnetwork|
libobjc|
libresolv|
libsasl2|
libxar|
libz
)""",
re.VERBOSE)
# Config keys.
BUILD_ROOT = 'build_root'
BUILD_BIN_DIR = 'build_bin_dir'
ARTIFACT_ROOT = 'artifact_root'
ARTIFACT_BIN_DIR = 'artifact_bin_dir'
ARTIFACT_LIB_DIR = 'artifact_lib_dir'
IS_MACOS = os.uname()[0] == "Darwin"
IS_LINUX = os.uname()[0] == "Linux"
def check_for_command(command):
"""
Ensure that the specified command is available on the PATH.
"""
try:
_ = check_output(['which', command])
except subprocess.CalledProcessError as err:
logging.error("Unable to find %s command", command)
raise err
def dump_load_commands_macos(binary_path):
"""
Run `otool -l` on the given binary.
Returns a list with one line of otool output per entry.
We use 'otool -l' instead of 'objdump -p' because 'otool' supports Universal
Mach-O binaries.
"""
check_for_command('otool')
try:
output = check_output(["otool", "-l", binary_path])
except subprocess.CalledProcessError as err:
logging.error("Failed to run %s", err.cmd)
raise err
return output.strip().decode("utf-8").split("\n")
def parse_load_commands_macos(cmd_type, dump):
"""
Parses the output from dump_load_commands_macos() for macOS.
'cmd_type' must be one of the following:
* LC_RPATH: Returns a list containing the rpath search path, with one
search path per entry.
* LC_LOAD_DYLIB: Returns a list of shared object library dependencies, with
one shared object per entry. They are returned as stored in the MachO
header, without being first resolved to an absolute path, and may look
like: @rpath/Foo.framework/Versions/A/Foo
'dump' is the output from dump_load_commands_macos().
"""
# Parsing state enum values.
PARSING_NONE = 0
PARSING_NEW_RECORD = 1
PARSING_RPATH = 2
PARSING_LIB_PATHS = 3
state = PARSING_NONE
values = []
for line in dump:
# Ensure the line is a string-like object.
try:
line = line.decode('utf-8')
except (UnicodeDecodeError, AttributeError):
pass
if re.match('^Load command', line):
state = PARSING_NEW_RECORD
continue
splits = re.split('\s+', line.strip(), maxsplit=2)
key = splits[0]
val = splits[1] if len(splits) > 1 else None
if state == PARSING_NEW_RECORD:
if key == KEY_CMD and val == LC_RPATH:
state = PARSING_RPATH
continue
if key == KEY_CMD and val == LC_LOAD_DYLIB:
state = PARSING_LIB_PATHS
continue
if state == PARSING_RPATH and cmd_type == LC_RPATH:
if key == KEY_PATH:
# Strip trailing metadata from rpath dump line.
values.append(val)
if state == PARSING_LIB_PATHS and cmd_type == LC_LOAD_DYLIB:
if key == KEY_NAME:
values.append(val)
return values
def get_rpaths_macos(binary_path):
"""
Helper function that returns a list of rpaths parsed from the given binary.
"""
dump = dump_load_commands_macos(binary_path)
return parse_load_commands_macos(LC_RPATH, dump)
def resolve_library_paths_macos(raw_library_paths, rpaths):
"""
Resolve the library paths from parse_load_commands_macos(LC_LOAD_DYLIB, ...) to
absolute filesystem paths using the rpath information returned from
get_rpaths_macos().
Returns a mapping from original to resolved library paths on success.
If any libraries cannot be resolved, prints an error to stderr and returns
an empty map.
"""
resolved_paths = {}
for raw_lib_path in raw_library_paths:
if not raw_lib_path.startswith("@rpath"):
resolved_paths[raw_lib_path] = raw_lib_path
continue
resolved = False
for rpath in rpaths:
resolved_path = re.sub('@rpath', rpath, raw_lib_path)
if os.path.exists(resolved_path):
resolved_paths[raw_lib_path] = resolved_path
resolved = True
break
if not resolved:
raise IOError(errno.ENOENT, "Unable to locate library %s in rpath %s" % (raw_lib_path, rpaths))
return resolved_paths
def get_resolved_dep_library_paths_macos(binary_path):
"""
Returns a map of symbolic to resolved library dependencies of the given binary.
See resolve_library_paths_macos().
"""
load_commands = dump_load_commands_macos(binary_path)
lib_search_paths = parse_load_commands_macos(LC_LOAD_DYLIB, load_commands)
rpaths = parse_load_commands_macos(LC_RPATH, load_commands)
return resolve_library_paths_macos(lib_search_paths, rpaths)
def get_artifact_name():
"""
Create an archive with an appropriate name. Including version, OS, and architecture.
"""
if IS_LINUX:
os_str = "linux"
elif IS_MACOS:
os_str = "osx"
else:
raise NotImplementedError("Unsupported platform")
arch = os.uname()[4]
with open(os.path.join(SOURCE_ROOT, "version.txt"), 'r') as version:
line = version.readline()
# Ensure the line is a string-like object.
try:
line = line.decode('utf-8')
except (UnicodeDecodeError, AttributeError):
pass
version = line.strip()
artifact_name = "kudu-binary-%s-%s-%s" % (version, os_str, arch)
return artifact_name
def mkconfig(build_root, artifact_root):
"""
Build a configuration map for convenient plumbing of path information.
"""
config = {}
config[BUILD_ROOT] = build_root
config[BUILD_BIN_DIR] = os.path.join(build_root, "bin")
config[ARTIFACT_ROOT] = artifact_root
config[ARTIFACT_BIN_DIR] = os.path.join(artifact_root, "bin")
config[ARTIFACT_LIB_DIR] = os.path.join(artifact_root, "lib")
return config
def prep_artifact_dirs(config):
"""
Create any required artifact output directories, if needed.
"""
if not os.path.exists(config[ARTIFACT_ROOT]):
os.makedirs(config[ARTIFACT_ROOT], mode=0o755)
if not os.path.exists(config[ARTIFACT_BIN_DIR]):
os.makedirs(config[ARTIFACT_BIN_DIR], mode=0o755)
if not os.path.exists(config[ARTIFACT_LIB_DIR]):
os.makedirs(config[ARTIFACT_LIB_DIR], mode=0o755)
def copy_file(src, dest):
"""
Copy the file with path 'src' to path 'dest'.
If 'src' is a symlink, the link will be followed and 'dest' will be written
as a plain file.
"""
shutil.copyfile(src, dest)
def copy_file_preserve_links(src, dest):
"""
Same as copy_file but preserves symlinks.
"""
if not os.path.islink(src):
copy_file(src, dest)
return
link_target = os.readlink(src)
os.symlink(link_target, dest)
def chrpath(target, new_rpath):
"""
Change the RPATH or RUNPATH for the specified target. See man chrpath(1).
"""
# Continue with a warning if no rpath is set on the binary.
try:
subprocess.check_call(['chrpath', '-l', target])
except subprocess.CalledProcessError as err:
logging.warning("No RPATH or RUNPATH set on target %s, continuing...", target)
return
# Update the rpath.
try:
subprocess.check_call(['chrpath', '-r', new_rpath, target])
except subprocess.CalledProcessError as err:
logging.warning("Failed to chrpath for target %s", target)
raise err
def get_resolved_deps(target):
"""
Return a list of resolved library dependencies for the given target.
"""
if IS_LINUX:
return DependencyExtractor().extract_deps(target)
if IS_MACOS:
return get_resolved_dep_library_paths_macos(target).values()
raise NotImplementedError("not implemented")
def relocate_deps_linux(target_src, target_dst, config):
"""
See relocate_deps(). Linux implementation.
"""
NEW_RPATH = '$ORIGIN/../lib'
# Make sure we have the chrpath command available in the Linux build.
check_for_command('chrpath')
# Copy the linked libraries.
dep_extractor = DependencyExtractor()
dep_extractor.set_library_filter(lambda path: False if PAT_LINUX_LIB_EXCLUDE.search(path) else True)
libs = dep_extractor.extract_deps(target_src)
for lib_src in libs:
lib_dst = os.path.join(config[ARTIFACT_LIB_DIR], os.path.basename(lib_src))
copy_file(lib_src, lib_dst)
# We have to set the RUNPATH of the shared objects as well for transitive
# dependencies to be properly resolved. $ORIGIN is always relative to the
# running executable.
chrpath(lib_dst, NEW_RPATH)
# We must also update the RUNPATH of the executable itself to look for its
# dependencies in a relative location.
chrpath(target_dst, NEW_RPATH)
def fix_rpath_macos(target_dst):
check_for_command('install_name_tool')
rpaths = get_rpaths_macos(target_dst)
for rpath in rpaths:
subprocess.check_call(['install_name_tool', '-delete_rpath', rpath, target_dst])
subprocess.check_call(['install_name_tool', '-add_rpath', '@executable_path/../lib',
target_dst])
def relocate_dep_path_macos(target_dst, dep_search_name):
"""
Change library search path to @rpath for the specified search named in the
specified binary.
"""
modified_search_name = re.sub('^.*/', '@rpath/', dep_search_name)
subprocess.check_call(['install_name_tool', '-change',
dep_search_name, modified_search_name, target_dst])
def relocate_deps_macos(target_src, target_dst, config):
"""
See relocate_deps(). macOS implementation.
"""
target_deps = get_resolved_dep_library_paths_macos(target_src)
check_for_command('install_name_tool')
# Modify the rpath of the target.
fix_rpath_macos(target_dst)
# For each dependency, relocate the path we will search for it and ensure it
# is shipped with the archive.
for (dep_search_name, dep_src) in target_deps.items():
# Filter out libs we don't want to archive.
if PAT_MACOS_LIB_EXCLUDE.search(dep_search_name):
continue
# Change the search path of the specified dep in 'target_dst'.
relocate_dep_path_macos(target_dst, dep_search_name)
# Archive the rest of the runtime dependencies.
dep_dst = os.path.join(config[ARTIFACT_LIB_DIR], os.path.basename(dep_src))
if not os.path.isfile(dep_dst):
# Recursively copy and relocate library dependencies as they are found.
copy_file(dep_src, dep_dst)
relocate_deps_macos(dep_src, dep_dst, config)
def relocate_deps(target_src, target_dst, config):
"""
Make the target relocatable and copy all of its dependencies into the
artifact directory.
"""
if IS_LINUX:
return relocate_deps_linux(target_src, target_dst, config)
if IS_MACOS:
return relocate_deps_macos(target_src, target_dst, config)
raise NotImplementedError("Unsupported platform")
def relocate_sasl2(target_src, config):
"""
Relocate the sasl2 dynamically loaded modules.
Returns False if the modules could not be found.
Returns True if the modules were found and relocated.
Raises an error if there is a problem during relocation of the sasl2 modules.
"""
# Find the libsasl2 module in our dependencies.
deps = get_resolved_deps(target_src)
sasl_lib = None
for dep in deps:
if re.search('libsasl2', dep):
sasl_lib = dep
break
# Look for libplain in potential sasl2 module paths, which is required for
# Kudu's basic operation.
sasl_path = None
if sasl_lib:
path = os.path.join(os.path.dirname(sasl_lib), "sasl2")
if os.path.exists(path):
children = os.listdir(path)
for child in children:
if PAT_SASL_LIBPLAIN.search(child):
sasl_path = path
break
if not sasl_path:
return False
dest_dir = os.path.join(config[ARTIFACT_LIB_DIR], 'sasl2')
os.mkdir(dest_dir)
to_relocate = []
for dirpath, subdirs, files in os.walk(sasl_path):
for f in files:
file_src = os.path.join(dirpath, f)
file_dst = os.path.join(dest_dir, f)
copy_file_preserve_links(file_src, file_dst)
if os.path.islink(file_src): continue
relocate_deps(file_src, file_dst, config)
return True
def main():
if len(sys.argv) < 3:
print("Usage: %s kudu_build_dir target [target ...]" % (sys.argv[0], ))
sys.exit(1)
# Command-line arguments.
build_root = sys.argv[1]
targets = sys.argv[2:]
init_logging()
if not os.path.exists(build_root):
logging.error("Build directory %s does not exist", build_root)
sys.exit(1)
artifact_name = get_artifact_name()
artifact_root = os.path.join(build_root, artifact_name)
config = mkconfig(build_root, artifact_root)
# Clear the artifact root to ensure a clean build.
if os.path.exists(artifact_root):
shutil.rmtree(artifact_root)
# Create artifact directories, if needed.
prep_artifact_dirs(config)
relocated_sasl = False
for target in targets:
logging.info("Including target '%s' and its dependencies in archive...", target)
# Copy the target into the artifact directory.
target_src = os.path.join(config[BUILD_BIN_DIR], target)
target_dst = os.path.join(config[ARTIFACT_BIN_DIR], target)
copy_file(target_src, target_dst)
if IS_LINUX and not relocated_sasl:
# We only relocate sasl2 on Linux because macOS appears to ship sasl2 with
# the default distribution and we've observed ABI compatibility issues
# involving calls from libsasl2 into libSystem when shipping libsasl2 with
# the binary artifact.
logging.info("Attempting to relocate sasl2 modules...")
relocated_sasl = relocate_sasl2(target_src, config)
# Make the target relocatable and copy all of its dependencies into the
# artifact directory.
relocate_deps(target_src, target_dst, config)
if __name__ == "__main__":
main()
| {
"content_hash": "bb49239603769a8d0552e9e46fe1d0b2",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 102,
"avg_line_length": 35.01054852320675,
"alnum_prop": 0.6254896053028021,
"repo_name": "helifu/kudu",
"id": "c17e911dc846eea7378e2b7b9727b8415080cbef",
"size": "18030",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build-support/mini-cluster/relocate_binaries_for_mini_cluster.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423003"
},
{
"name": "C++",
"bytes": "15896567"
},
{
"name": "CMake",
"bytes": "217437"
},
{
"name": "CSS",
"bytes": "1364"
},
{
"name": "Clojure",
"bytes": "54903"
},
{
"name": "Dockerfile",
"bytes": "8783"
},
{
"name": "HTML",
"bytes": "29992"
},
{
"name": "Java",
"bytes": "2284107"
},
{
"name": "JavaScript",
"bytes": "8018"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Perl",
"bytes": "36186"
},
{
"name": "Python",
"bytes": "560917"
},
{
"name": "R",
"bytes": "11537"
},
{
"name": "Scala",
"bytes": "313182"
},
{
"name": "Shell",
"bytes": "147843"
},
{
"name": "Smarty",
"bytes": "2702"
},
{
"name": "Thrift",
"bytes": "81270"
}
],
"symlink_target": ""
} |
import pytest
from unittest.mock import patch
from cartoframes.exceptions import CatalogError
from cartoframes.data.observatory.catalog.entity import CatalogList
from cartoframes.data.observatory.catalog.country import Country
from cartoframes.data.observatory.catalog.repository.country_repo import CountryRepository
from cartoframes.data.observatory.catalog.repository.repo_client import RepoClient
from cartoframes.data.observatory.catalog.repository.constants import (
CATEGORY_FILTER, DATASET_FILTER, GEOGRAPHY_FILTER, PROVIDER_FILTER, VARIABLE_FILTER,
VARIABLE_GROUP_FILTER
)
from ..examples import test_countries, test_country1, db_country1, db_country2
class TestCountryRepo(object):
@patch.object(RepoClient, 'get_countries')
def test_get_all(self, mocked_repo):
# Given
mocked_repo.return_value = [db_country1, db_country2]
repo = CountryRepository()
# When
countries = repo.get_all()
# Then
mocked_repo.assert_called_once_with(None)
assert isinstance(countries, CatalogList)
assert countries == test_countries
@patch.object(RepoClient, 'get_countries')
def test_get_all_when_empty(self, mocked_repo):
# Given
mocked_repo.return_value = []
repo = CountryRepository()
# When
countries = repo.get_all()
# Then
assert countries == []
@patch.object(RepoClient, 'get_countries')
def test_get_all_only_uses_allowed_filters(self, mocked_repo):
# Given
mocked_repo.return_value = [db_country1, db_country2]
repo = CountryRepository()
filters = {
DATASET_FILTER: 'carto-do.project.census2011',
CATEGORY_FILTER: 'demographics',
VARIABLE_FILTER: 'population',
GEOGRAPHY_FILTER: 'census-geo',
VARIABLE_GROUP_FILTER: 'var-group',
PROVIDER_FILTER: 'open_data',
'fake_field_id': 'fake_value'
}
# When
countries = repo.get_all(filters)
# Then
mocked_repo.assert_called_once_with({
CATEGORY_FILTER: 'demographics',
PROVIDER_FILTER: 'open_data'
})
assert countries == test_countries
@patch.object(RepoClient, 'get_countries')
def test_get_by_id(self, mocked_repo):
# Given
mocked_repo.return_value = [db_country1, db_country2]
requested_iso_code = db_country1['id']
repo = CountryRepository()
# When
country = repo.get_by_id(requested_iso_code)
# Then
mocked_repo.assert_called_once_with({'id': [requested_iso_code]})
assert isinstance(country, Country)
assert country == test_country1
@patch.object(RepoClient, 'get_countries')
def test_get_by_id_unknown_fails(self, mocked_repo):
# Given
mocked_repo.return_value = []
requested_iso_code = 'fra'
repo = CountryRepository()
# Then
with pytest.raises(CatalogError):
repo.get_by_id(requested_iso_code)
@patch.object(RepoClient, 'get_countries')
def test_get_by_id_list(self, mocked_repo):
# Given
mocked_repo.return_value = [db_country1, db_country2]
repo = CountryRepository()
# When
countries = repo.get_by_id_list([db_country1['id'], db_country2['id']])
# Then
mocked_repo.assert_called_once_with({'id': [db_country1['id'], db_country2['id']]})
assert isinstance(countries, CatalogList)
assert countries == test_countries
@patch.object(RepoClient, 'get_countries')
def test_missing_fields_are_mapped_as_None(self, mocked_repo):
# Given
mocked_repo.return_value = [{}]
repo = CountryRepository()
expected_countries = CatalogList([Country({
'id': None,
'name': None
})])
# When
countries = repo.get_all()
# Then
assert countries == expected_countries
| {
"content_hash": "b4122f94f6e96d91b19231772d63bc8a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 91,
"avg_line_length": 32.104,
"alnum_prop": 0.6287067032145527,
"repo_name": "CartoDB/cartoframes",
"id": "df8477a8ebd4ff9a56814eedfc6393cacf05b6b0",
"size": "4013",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/data/observatory/catalog/repository/test_country_repo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "51696"
},
{
"name": "Jinja",
"bytes": "18917"
},
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "773606"
}
],
"symlink_target": ""
} |
from OpenGLCffi.GLX import params
@params(api='glx', prms=['dpy', 'drawable', 'group'])
def glXJoinSwapGroupNV(dpy, drawable, group):
pass
@params(api='glx', prms=['dpy', 'group', 'barrier'])
def glXBindSwapBarrierNV(dpy, group, barrier):
pass
@params(api='glx', prms=['dpy', 'drawable', 'group', 'barrier'])
def glXQuerySwapGroupNV(dpy, drawable, group, barrier):
pass
@params(api='glx', prms=['dpy', 'screen', 'maxGroups', 'maxBarriers'])
def glXQueryMaxSwapGroupsNV(dpy, screen, maxGroups, maxBarriers):
pass
@params(api='glx', prms=['dpy', 'screen', 'count'])
def glXQueryFrameCountNV(dpy, screen, count):
pass
@params(api='glx', prms=['dpy', 'screen'])
def glXResetFrameCountNV(dpy, screen):
pass
| {
"content_hash": "3c65eabe0c61a2001894889a5838fb59",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 23.225806451612904,
"alnum_prop": 0.6930555555555555,
"repo_name": "cydenix/OpenGLCffi",
"id": "96e8adbb59fce3122be3bb6b3c2b3f932c32b3f1",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GLX/EXT/NV/swap_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import dkmodelfields.monthfield
import dkmodelfields.statusfield
import dkmodelfields.yearfield
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='M',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', dkmodelfields.monthfield.MonthField()),
],
),
migrations.CreateModel(
name='S',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', dkmodelfields.statusfield.StatusField('\n =============== =========================================== ============\n status verbose explanation category\n =============== =========================================== ============\n first First status # [init]\n second Second status # [ok]\n third Third status # [post]\n =============== =========================================== ============\n @end-progress-status\n ', choices=[('first', 'First status'), ('second', 'Second status'), ('third', 'Third status')], db_index=True, default='first', max_length=15, verbose_name='Status')),
],
),
migrations.CreateModel(
name='Y',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('yr', dkmodelfields.yearfield.YearField()),
],
),
]
| {
"content_hash": "59149b0b9757faf0fd408e0c776110f0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 825,
"avg_line_length": 49.8421052631579,
"alnum_prop": 0.45987328405491024,
"repo_name": "datakortet/dkmodelfields",
"id": "2208b766ebc80635958d8da57f4d09156f172e21",
"size": "1967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp_dkmodelfields/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66973"
}
],
"symlink_target": ""
} |
"""Main function a la Guido:
http://www.artima.com/weblogs/viewpost.jsp?thread=4829
"""
import getopt
import sys
from assertEquals.cli.reporters import detail, summarize
WINDOWS = sys.platform.find('win') == 0
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
short = "fst:x:"
long_ = [ "find-only"
, "scripted"
, "testcase=","TestCase="
, "stopwords="
]
opts, args = getopt.getopt(argv[1:], short, long_)
except getopt.error, msg:
raise Usage(msg)
find_only = False # -f
scripted = False # -s
stopwords = [] # -x
testcase = None # -t
for opt, value in opts:
if opt in ('-f', '--find-only'):
find_only = True
elif opt in ('-s', '--scripted'):
scripted = True
elif opt in ('-x', '--stopwords'):
stopwords = value.split(',')
elif opt in ('-t', '--testcase', '--TestCase'):
testcase = value
if len(args) == 1:
module = args[0]
else:
raise Usage("Please specify a module.")
if WINDOWS or scripted:
if testcase is None:
report = summarize(module, find_only, stopwords)
else:
report = detail(module, testcase)
sys.stdout.write(report)
tfail, terr, tall = summarize._Summarize__totals
if tfail > 0 or terr > 0: return 2 # non-zero exit-code on errors
else: return 0
else:
from assertEquals.interactive import CursesInterface
CursesInterface(module, stopwords)
except Usage, err:
print >> sys.stderr, err.msg
print >> sys.stderr, "'man 1 assertEquals' for instructions."
return 2
| {
"content_hash": "bbfa83bbf5e08c41ff73a4483879c725",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.4970472440944882,
"repo_name": "whit537/assertEquals",
"id": "624ba7fd5afb5e8a152d2ad53a048718053ed061",
"size": "2032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assertEquals/cli/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "134311"
}
],
"symlink_target": ""
} |
import os
import sys
import cv2
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
from ravens.models import ResNet43_8s
from ravens import utils
class TransportGoal:
"""Daniel: Transporter for the placing module, with goal images.
Built on top of the normal Transporters class, with three FCNs. We assume
by nature that we have a goal image. We also crop after the query, and
will not use per-pixel losses, so ignore those vs normal transporters.
"""
def __init__(self, input_shape, num_rotations, crop_size, preprocess):
self.num_rotations = num_rotations
self.crop_size = crop_size # crop size must be N*16 (e.g. 96)
self.preprocess = preprocess
self.pad_size = int(self.crop_size / 2)
self.padding = np.zeros((3, 2), dtype=int)
self.padding[:2, :] = self.pad_size
input_shape = np.array(input_shape)
input_shape[0:2] += self.pad_size * 2
input_shape = tuple(input_shape)
self.odim = output_dim = 3
# 3 fully convolutional ResNets. Third one is for the goal.
in0, out0 = ResNet43_8s(input_shape, output_dim, prefix='s0_')
in1, out1 = ResNet43_8s(input_shape, output_dim, prefix='s1_')
in2, out2 = ResNet43_8s(input_shape, output_dim, prefix='s2_')
self.model = tf.keras.Model(inputs=[in0, in1, in2], outputs=[out0, out1, out2])
self.optim = tf.keras.optimizers.Adam(learning_rate=1e-4)
self.metric = tf.keras.metrics.Mean(name='transport_loss')
def forward(self, in_img, goal_img, p, apply_softmax=True):
"""Forward pass of our goal-conditioned Transporter.
Relevant shapes and info:
in_img and goal_img: (320,160,6)
p: integer pixels on in_img, e.g., [158, 30]
self.padding: [[32,32],[32,32],0,0]], with shape (3,2)
Run input through all three networks, to get output of the same
shape, except that the last channel is 3 (output_dim). Then, the
output for one stream has the convolutional kernels for another. Call
tf.nn.convolution. That's it, and the operation is be differentiable,
so that gradients apply to all the FCNs.
I actually think cropping after the query network is easier, because
otherwise we have to do a forward pass, then call tf.multiply, then
do another forward pass, which splits up the computation.
"""
assert in_img.shape == goal_img.shape, f'{in_img.shape}, {goal_img.shape}'
# input image --> TF tensor
input_unproc = np.pad(in_img, self.padding, mode='constant') # (384,224,6)
input_data = self.preprocess(input_unproc.copy()) # (384,224,6)
input_shape = (1,) + input_data.shape
input_data = input_data.reshape(input_shape) # (1,384,224,6)
in_tensor = tf.convert_to_tensor(input_data, dtype=tf.float32) # (1,384,224,6)
# goal image --> TF tensor
goal_unproc = np.pad(goal_img, self.padding, mode='constant') # (384,224,6)
goal_data = self.preprocess(goal_unproc.copy()) # (384,224,6)
goal_shape = (1,) + goal_data.shape
goal_data = goal_data.reshape(goal_shape) # (1,384,224,6)
goal_tensor = tf.convert_to_tensor(goal_data, dtype=tf.float32) # (1,384,224,6)
# Get SE2 rotation vectors for cropping.
pivot = np.array([p[1], p[0]]) + self.pad_size
rvecs = self.get_se2(self.num_rotations, pivot)
# Forward pass through three separate FCNs. All logits will be: (1,384,224,3).
in_logits, kernel_nocrop_logits, goal_logits = \
self.model([in_tensor, in_tensor, goal_tensor])
# Use features from goal logits and combine with input and kernel.
goal_x_in_logits = tf.multiply(goal_logits, in_logits)
goal_x_kernel_logits = tf.multiply(goal_logits, kernel_nocrop_logits)
# Crop the kernel_logits about the picking point and get rotations.
crop = tf.identity(goal_x_kernel_logits) # (1,384,224,3)
crop = tf.repeat(crop, repeats=self.num_rotations, axis=0) # (24,384,224,3)
crop = tfa.image.transform(crop, rvecs, interpolation='NEAREST') # (24,384,224,3)
kernel = crop[:,
p[0]:(p[0] + self.crop_size),
p[1]:(p[1] + self.crop_size),
:]
assert kernel.shape == (self.num_rotations, self.crop_size, self.crop_size, self.odim)
# Cross-convolve `in_x_goal_logits`. Padding kernel: (24,64,64,3) --> (65,65,3,24).
kernel_paddings = tf.constant([[0, 0], [0, 1], [0, 1], [0, 0]])
kernel = tf.pad(kernel, kernel_paddings, mode='CONSTANT')
kernel = tf.transpose(kernel, [1, 2, 3, 0])
output = tf.nn.convolution(goal_x_in_logits, kernel, data_format="NHWC")
output = (1 / (self.crop_size**2)) * output
if apply_softmax:
output_shape = output.shape
output = tf.reshape(output, (1, np.prod(output.shape)))
output = tf.nn.softmax(output)
output = np.float32(output).reshape(output_shape[1:])
# Daniel: visualize crops and kernels, for Transporter-Goal figure.
#self.visualize_images(p, in_img, input_data, crop)
#self.visualize_transport(p, in_img, input_data, crop, kernel)
#self.visualize_logits(in_logits, name='input')
#self.visualize_logits(goal_logits, name='goal')
#self.visualize_logits(kernel_nocrop_logits, name='kernel')
#self.visualize_logits(goal_x_in_logits, name='goal_x_in')
#self.visualize_logits(goal_x_kernel_logits, name='goal_x_kernel')
return output
def train(self, in_img, goal_img, p, q, theta):
"""Transport Goal training.
Both `in_img` and `goal_img` have the color and depth. Much is
similar to the attention model: (a) forward pass, (b) get angle
discretizations, (c) make the label consider rotations in the last
axis, but only provide the label to one single (pixel,rotation).
"""
self.metric.reset_states()
with tf.GradientTape() as tape:
output = self.forward(in_img, goal_img, p, apply_softmax=False)
# Compute label
itheta = theta / (2 * np.pi / self.num_rotations)
itheta = np.int32(np.round(itheta)) % self.num_rotations
label_size = in_img.shape[:2] + (self.num_rotations,)
label = np.zeros(label_size)
label[q[0], q[1], itheta] = 1
label = label.reshape(1, np.prod(label.shape))
label = tf.convert_to_tensor(label, dtype=tf.float32)
# Compute loss after re-shaping the output.
output = tf.reshape(output, (1, np.prod(output.shape)))
loss = tf.nn.softmax_cross_entropy_with_logits(label, output)
loss = tf.reduce_mean(loss)
grad = tape.gradient(loss, self.model.trainable_variables)
self.optim.apply_gradients(zip(grad, self.model.trainable_variables))
self.metric(loss)
return np.float32(loss)
def get_se2(self, num_rotations, pivot):
'''
Get SE2 rotations discretized into num_rotations angles counter-clockwise.
'''
rvecs = []
for i in range(num_rotations):
theta = i * 2 * np.pi / num_rotations
rmat = utils.get_image_transform(theta, (0, 0), pivot)
rvec = rmat.reshape(-1)[:-1]
rvecs.append(rvec)
return np.array(rvecs, dtype=np.float32)
def save(self, fname):
self.model.save(fname)
def load(self, fname):
self.model.load_weights(fname)
#-------------------------------------------------------------------------
# Visualization.
#-------------------------------------------------------------------------
def visualize_images(self, p, in_img, input_data, crop):
def get_itheta(theta):
itheta = theta / (2 * np.pi / self.num_rotations)
return np.int32(np.round(itheta)) % self.num_rotations
plt.subplot(1, 3, 1)
plt.title(f'Perturbed', fontsize=15)
plt.imshow(np.array(in_img[:, :, :3]).astype(np.uint8))
plt.subplot(1, 3, 2)
plt.title(f'Process/Pad', fontsize=15)
plt.imshow(input_data[0, :, :, :3])
plt.subplot(1, 3, 3)
# Let's stack two crops together.
theta1 = 0.0
theta2 = 90.0
itheta1 = get_itheta(theta1)
itheta2 = get_itheta(theta2)
crop1 = crop[itheta1, :, :, :3]
crop2 = crop[itheta2, :, :, :3]
barrier = np.ones_like(crop1)
barrier = barrier[:4, :, :] # white barrier of 4 pixels
stacked = np.concatenate((crop1, barrier, crop2), axis=0)
plt.imshow(stacked)
plt.title(f'{theta1}, {theta2}', fontsize=15)
plt.suptitle(f'pick: {p}', fontsize=15)
plt.tight_layout()
plt.show()
#plt.savefig('viz.png')
def visualize_transport(self, p, in_img, input_data, crop, kernel):
"""Like the attention map, let's visualize the transport data from a
trained model.
https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html
In my normal usage, the attention is already softmax-ed but just be
aware in case it's not. Also be aware of RGB vs BGR mode. We should
ensure we're in BGR mode before saving. Also with RAINBOW mode,
red=hottest (highest attention values), green=medium, blue=lowest.
See also:
https://matplotlib.org/3.3.0/api/_as_gen/matplotlib.pyplot.subplot.html
crop.shape: (24,64,64,6)
kernel.shape = (65,65,3,24)
"""
def colorize(img):
# I don't think we have to convert to BGR here...
img = img - np.min(img)
img = 255 * img / np.max(img)
img = cv2.applyColorMap(np.uint8(img), cv2.COLORMAP_RAINBOW)
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
kernel = (tf.transpose(kernel, [3, 0, 1, 2])).numpy()
# Top two rows: crops from processed RGBD. Bottom two: output from FCN.
nrows = 4
ncols = 12
assert self.num_rotations == nrows * (ncols / 2)
idx = 0
fig, ax = plt.subplots(nrows, ncols, figsize=(12,6))
for _ in range(nrows):
for _ in range(ncols):
plt.subplot(nrows, ncols, idx+1)
plt.axis('off') # Ah, you need to put this here ...
if idx < self.num_rotations:
plt.imshow(crop[idx, :, :, :3])
else:
# Offset because idx goes from 0 to (rotations * 2) - 1.
_idx = idx - self.num_rotations
processed = colorize(img=kernel[_idx, :, :, :])
plt.imshow(processed)
idx += 1
plt.tight_layout()
plt.show()
def visualize_logits(self, logits, name):
"""Given logits (BEFORE tf.nn.convolution), get a heatmap.
Here we apply a softmax to make it more human-readable. However, the
tf.nn.convolution with the learned kernels happens without a softmax
on the logits. [Update: wait, then why should we have a softmax,
then? I forgot why we did this ...]
"""
original_shape = logits.shape
logits = tf.reshape(logits, (1, np.prod(original_shape)))
# logits = tf.nn.softmax(logits) # Is this necessary?
vis_transport = np.float32(logits).reshape(original_shape)
vis_transport = vis_transport[0]
vis_transport = vis_transport - np.min(vis_transport)
vis_transport = 255 * vis_transport / np.max(vis_transport)
vis_transport = cv2.applyColorMap(np.uint8(vis_transport), cv2.COLORMAP_RAINBOW)
# Only if we're saving with cv2.imwrite()
vis_transport = cv2.cvtColor(vis_transport, cv2.COLOR_RGB2BGR)
cv2.imwrite(f'tmp/logits_{name}.png', vis_transport)
plt.subplot(1, 1, 1)
plt.title(f'Logits: {name}', fontsize=15)
plt.imshow(vis_transport)
plt.tight_layout()
plt.show()
def get_transport_heatmap(self, transport):
"""Given transport output, get a human-readable heatmap.
https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html
In my normal usage, the attention is already softmax-ed but just be
aware in case it's not. Also be aware of RGB vs BGR mode. We should
ensure we're in BGR mode before saving. Also with RAINBOW mode, red =
hottest (highest attention values), green=medium, blue=lowest.
"""
# Options: cv2.COLORMAP_PLASMA, cv2.COLORMAP_JET, etc.
#transport = tf.reshape(transport, (1, np.prod(transport.shape)))
#transport = tf.nn.softmax(transport)
assert transport.shape == (320, 160, self.num_rotations), transport.shape
vis_images = []
for idx in range(self.num_rotations):
t_img = transport[:, :, idx]
vis_transport = np.float32(t_img)
vis_transport = vis_transport - np.min(vis_transport)
vis_transport = 255 * vis_transport / np.max(vis_transport)
vis_transport = cv2.applyColorMap(np.uint8(vis_transport), cv2.COLORMAP_RAINBOW)
vis_transport = cv2.cvtColor(vis_transport, cv2.COLOR_RGB2BGR)
vis_images.append(vis_transport)
return vis_images | {
"content_hash": "62d4bf78163e1a5fb07e792c73063ea1",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 94,
"avg_line_length": 44.72875816993464,
"alnum_prop": 0.5896105793818952,
"repo_name": "DanielTakeshi/deformable-ravens",
"id": "0aafbd728ded3c727fb9518d2ce6a2c0e234ddaa",
"size": "13710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ravens/models/transport_goal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "594601"
},
{
"name": "Shell",
"bytes": "60383"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
# Create your views here.
def angular(request):
return render(request, 'base.html') | {
"content_hash": "588783bb0272dde35bf7d3cc5168c2b1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 24.8,
"alnum_prop": 0.7580645161290323,
"repo_name": "chackett87/GameGap",
"id": "bf3a1bdaa3972f0d990cbe3b0ea4c9901fe5988c",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gameGap/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3198"
},
{
"name": "HTML",
"bytes": "10966"
},
{
"name": "JavaScript",
"bytes": "8901"
},
{
"name": "Python",
"bytes": "10264"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="carpet.baxis", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "55e5ab98c16b67e7ac1b95e231b2c30c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 82,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6045454545454545,
"repo_name": "plotly/python-api",
"id": "03a04003c94ab44ad69fce7f3c2b384678faab79",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/baxis/_color.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
import os.path
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.source import InlineTemplate, StaticFile
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.stack_features import check_stack_feature
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from ambari_commons import OSConst
from ambari_commons.constants import SERVICE
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def webhcat():
import params
XmlConfig("webhcat-site.xml",
conf_dir=params.hcat_config_dir,
configurations=params.config['configurations']['webhcat-site']
)
# Manually overriding service logon user & password set by the installation package
ServiceConfig(params.webhcat_server_win_service_name,
action="change_user",
username = params.webhcat_user,
password = Script.get_password(params.webhcat_user))
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def webhcat():
import params
Directory(params.templeton_pid_dir,
owner=params.webhcat_user,
mode=0755,
group=params.user_group,
create_parents = True)
Directory(params.templeton_log_dir,
owner=params.webhcat_user,
mode=0755,
group=params.user_group,
create_parents = True)
Directory(params.config_dir,
create_parents = True,
owner=params.webhcat_user,
group=params.user_group,
cd_access="a")
# Replace _HOST with hostname in relevant principal-related properties
webhcat_site = params.config['configurations']['webhcat-site'].copy()
for prop_name in ['templeton.hive.properties', 'templeton.kerberos.principal']:
if prop_name in webhcat_site:
webhcat_site[prop_name] = webhcat_site[prop_name].replace("_HOST", params.hostname)
XmlConfig("webhcat-site.xml",
conf_dir=params.config_dir,
configurations=webhcat_site,
configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
owner=params.webhcat_user,
group=params.user_group,
)
# if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
if params.stack_version_formatted_major and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.stack_version_formatted_major) and \
params.version and params.stack_root:
XmlConfig("hive-site.xml",
conf_dir = format("{stack_root}/{version}/hive/conf"),
configurations = params.config['configurations']['hive-site'],
configuration_attributes = params.config['configuration_attributes']['hive-site'],
owner = params.hive_user,
group = params.user_group,
)
XmlConfig("yarn-site.xml",
conf_dir = format("{stack_root}/{version}/hadoop/conf"),
configurations = params.config['configurations']['yarn-site'],
configuration_attributes = params.config['configuration_attributes']['yarn-site'],
owner = params.yarn_user,
group = params.user_group,
)
File(format("{config_dir}/webhcat-env.sh"),
owner=params.webhcat_user,
group=params.user_group,
content=InlineTemplate(params.webhcat_env_sh_template)
)
Directory(params.webhcat_conf_dir,
cd_access='a',
create_parents = True
)
log4j_webhcat_filename = 'webhcat-log4j.properties'
if (params.log4j_webhcat_props != None):
File(format("{config_dir}/{log4j_webhcat_filename}"),
mode=0644,
group=params.user_group,
owner=params.webhcat_user,
content=InlineTemplate(params.log4j_webhcat_props)
)
elif (os.path.exists("{config_dir}/{log4j_webhcat_filename}.template")):
File(format("{config_dir}/{log4j_webhcat_filename}"),
mode=0644,
group=params.user_group,
owner=params.webhcat_user,
content=StaticFile(format("{config_dir}/{log4j_webhcat_filename}.template"))
)
# Generate atlas-application.properties.xml file
if params.enable_atlas_hook:
# WebHCat uses a different config dir than the rest of the daemons in Hive.
atlas_hook_filepath = os.path.join(params.config_dir, params.atlas_hook_filename)
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
| {
"content_hash": "ba6282df11f5c04090d512b761c28d7c",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 142,
"avg_line_length": 40.70503597122302,
"alnum_prop": 0.7143867090844821,
"repo_name": "radicalbit/ambari",
"id": "66731f80ef5a6e1be3b4e341dbc629eb76c7ce14",
"size": "5658",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("annotations", "0001_initial"),
)
def forwards(self, orm):
# Adding field 'Source.labelset'
db.add_column('images_source', 'labelset', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['annotations.LabelSet']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Source.labelset'
db.delete_column('images_source', 'labelset_id')
models = {
'annotations.label': {
'Meta': {'object_name': 'Label'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelgroup': {
'Meta': {'object_name': 'LabelGroup'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelset': {
'Meta': {'object_name': 'LabelSet'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['annotations.Label']", 'symmetrical': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Metadata']"}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'original_height': ('django.db.models.fields.IntegerField', [], {}),
'original_width': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'total_points': ('django.db.models.fields.IntegerField', [], {}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'images.metadata': {
'Meta': {'object_name': 'Metadata'},
'camera': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'group1_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group2_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group3_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group4_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group5_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group6_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group7_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'photo_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'pixel_cm_ratio': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'strobes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'value1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value1']", 'null': 'True', 'blank': 'True'}),
'value2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value2']", 'null': 'True', 'blank': 'True'}),
'value3': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value3']", 'null': 'True', 'blank': 'True'}),
'value4': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value4']", 'null': 'True', 'blank': 'True'}),
'value5': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value5']", 'null': 'True', 'blank': 'True'}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_total_points': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'labelset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelSet']"}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
},
'images.value1': {
'Meta': {'object_name': 'Value1'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value2': {
'Meta': {'object_name': 'Value2'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value3': {
'Meta': {'object_name': 'Value3'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value4': {
'Meta': {'object_name': 'Value4'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value5': {
'Meta': {'object_name': 'Value5'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
}
}
complete_apps = ['images']
| {
"content_hash": "2074d91d51fc5173c3ea9607f7fa93c5",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 182,
"avg_line_length": 71.07954545454545,
"alnum_prop": 0.538449240607514,
"repo_name": "DevangS/CoralNet",
"id": "f8655442d1c513305f032bd45b9b780c2ddfc38a",
"size": "12528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images/migrations/0005_auto__add_field_source_labelset.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "30089"
},
{
"name": "C++",
"bytes": "37023"
},
{
"name": "CSS",
"bytes": "85725"
},
{
"name": "HTML",
"bytes": "875721"
},
{
"name": "JavaScript",
"bytes": "406565"
},
{
"name": "Jupyter Notebook",
"bytes": "495187"
},
{
"name": "M",
"bytes": "1647"
},
{
"name": "Matlab",
"bytes": "774560"
},
{
"name": "Nginx",
"bytes": "749"
},
{
"name": "Objective-C",
"bytes": "702"
},
{
"name": "Python",
"bytes": "1293581"
},
{
"name": "Shell",
"bytes": "4490"
}
],
"symlink_target": ""
} |
"""A simple network to use in tests and examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import step_fn
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.layers import core
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def single_loss_example(optimizer_fn, distribution, use_bias=False):
"""Build a very simple network to use in tests and examples."""
dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
optimizer = optimizer_fn()
layer = core.Dense(1, use_bias=use_bias)
def loss_fn(x):
y = array_ops.reshape(layer(x), []) - constant_op.constant(1.)
return y * y
single_loss_step = step_fn.StandardSingleLossStep(dataset, loss_fn, optimizer,
distribution)
# Layer is returned for inspecting the kernels in tests.
return single_loss_step, layer
def minimize_loss_example(optimizer_fn,
use_bias=False,
use_callable_loss=True,
create_optimizer_inside_model_fn=False):
"""Example of non-distribution-aware legacy code."""
dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
# An Optimizer instance is created either outside or inside model_fn.
outer_optimizer = None
if not create_optimizer_inside_model_fn:
outer_optimizer = optimizer_fn()
layer = core.Dense(1, use_bias=use_bias)
def model_fn(x):
"""A very simple model written by the user."""
def loss_fn():
y = array_ops.reshape(layer(x), []) - constant_op.constant(1.)
return y * y
optimizer = outer_optimizer or optimizer_fn()
if use_callable_loss:
return optimizer.minimize(loss_fn)
else:
return optimizer.minimize(loss_fn())
return model_fn, dataset, layer
def batchnorm_example(optimizer_fn,
batch_per_epoch=1,
momentum=0.9,
renorm=False):
"""Example of non-distribution-aware legacy code with batch normalization."""
# input shape is [16, 8], input values are increasing in both dimensions.
dataset = dataset_ops.Dataset.from_tensor_slices(
[[[float(x * 8 + y + z * 100)
for y in range(8)]
for x in range(16)]
for z in range(batch_per_epoch)]).repeat()
optimizer = optimizer_fn()
batchnorm = normalization.BatchNormalization(
renorm=renorm, momentum=momentum, fused=False)
def model_fn(x):
def loss_fn():
y = math_ops.reduce_sum(batchnorm(x, training=True), axis=1)
loss = math_ops.reduce_mean(y - constant_op.constant(1.))
return loss
# Callable loss.
return optimizer.minimize(loss_fn)
return model_fn, dataset, batchnorm
| {
"content_hash": "8a669b42460bd719666cfe551ecff748",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 33.69318181818182,
"alnum_prop": 0.66070826306914,
"repo_name": "allenlavoie/tensorflow",
"id": "cef5fd2f8943d348a0721cd72032bf6cb2199ad9",
"size": "3654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/single_loss_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340645"
},
{
"name": "C++",
"bytes": "40746519"
},
{
"name": "CMake",
"bytes": "198073"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "735737"
},
{
"name": "Jupyter Notebook",
"bytes": "2117270"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "34933340"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426884"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
import configparser
import yaml
import time
from PyQt4 import QtSql
class DataBase:
def __init__(self):
# get url from kagisys.conf
self.config = configparser.SafeConfigParser()
self.config.read('/home/pi/project/kagisys.conf')
url = self.config.get('Slack', 'url')
self.db = QtSql.QSqlDatabase.addDatabase('QMYSQL')
self.db.setHostName(self.config.get('SQL', 'host_name'))
self.db.setUserName(self.config.get('SQL', 'user_name'))
self.db.setPassword(self.config.get('SQL', 'user_password'))
self.db.setDatabaseName(self.config.get('SQL', 'database_name'))
def __open(self):
if not self.db.open():
print('***error*** database can\'t open!')
exit()
def checkIDm(self, IDm):
self.__open()
query = QtSql.QSqlQuery()
query.prepare('select COUNT(*) from nfctag where IDm=:IDm')
query.bindValue(':IDm', IDm)
query.exec_()
query.next()
if query.value(0).toInt()[0] == 0:
return False
else:
return True
def addNewIDm(self, IDm, account_id):
self.__open()
query = QtSql.QSqlQuery()
query.prepare('insert into nfctag VALUES (:IDm, :account_id)')
query.bindValue(':IDm', IDm)
query.bindValue(':account_id', account_id)
query.exec_()
def addTouchedLog(self, IDm):
now = time.time()
self.__open()
query = QtSql.QSqlQuery()
query.prepare('insert into touchedlog VALUES (:IDm, :timestamp)')
query.bindValue(':IDm', IDm)
query.bindValue(':timestamp', now)
query.exec_()
| {
"content_hash": "1adebb4c2613359e414374eaa716447e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 67,
"avg_line_length": 27.18867924528302,
"alnum_prop": 0.6766134628730048,
"repo_name": "tnct-spc/kagisys_logic",
"id": "092c28f49f4e41140380ac3c1ec1fad597a4a1d7",
"size": "1485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nfc_lock/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9040"
}
],
"symlink_target": ""
} |
class StorageError(Exception):
pass
class NoSuchFile(StorageError):
pass
class FileExists(StorageError):
pass
class WaitForLockTimout(StorageError):
pass
class WaitForUnlockTimout(StorageError):
pass
| {
"content_hash": "1fd755b0e0394522b0337f1b982b11f0",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 40,
"avg_line_length": 16,
"alnum_prop": 0.7589285714285714,
"repo_name": "epegzz/StorageAlchemy",
"id": "51760b2abee95cd109be8ccb53c14bdeab2f8ae1",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storagealchemy/exception.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22088"
},
{
"name": "Shell",
"bytes": "50"
}
],
"symlink_target": ""
} |
import csv
import json
import zipfile
from pprint import pformat
from cStringIO import StringIO
import grequests
from crowdflower import logger
def read_zip_csv(zf):
for zipinfo in zf.filelist:
zipinfo_fp = zf.open(zipinfo)
reader = csv.DictReader(zipinfo_fp)
for row in reader:
yield row
def to_params(props):
# not sure if this is properly recursive
for key, value in props.items():
if isinstance(value, list):
# Rails cruft inherent in the CrowdFlower API
for subvalue in value:
yield '[%s][]' % key, subvalue
elif isinstance(value, dict):
for subkey, subvalue in to_params(value):
yield '[%s]%s' % (key, subkey), subvalue
else:
yield '[%s]' % key, value
class Job(object):
'''
Read / Write attributes
auto_order
auto_order_threshold
auto_order_timeout
cml
cml_fields
confidence_fields
css
custom_key
excluded_countries
gold_per_assignment
included_countries
instructions
js
judgments_per_unit
language
max_judgments_per_unit
max_judgments_per_contributor
min_unit_confidence
options
pages_per_assignment
problem
send_judgments_webhook
state
title
units_per_assignment
webhook_uri
Read-only attributes
completed
completed_at
created_at
gold
golds_count
id
judgments_count
units_count
updated_at
Not sure about:
payment_cents
'''
READ_WRITE_FIELDS = ['auto_order', 'auto_order_threshold', 'auto_order_timeout', 'cml', 'cml_fields', 'confidence_fields', 'css', 'custom_key', 'excluded_countries', 'gold_per_assignment', 'included_countries', 'instructions', 'js', 'judgments_per_unit', 'language', 'max_judgments_per_unit', 'max_judgments_per_contributor', 'min_unit_confidence', 'options', 'pages_per_assignment', 'problem', 'send_judgments_webhook', 'state', 'title', 'units_per_assignment', 'webhook_uri']
def __init__(self, job_id, connection):
self.id = job_id
self._connection = connection
# cacheable:
self._properties = {}
self._units = {}
def __json__(self):
return self.properties
def __repr__(self):
return pformat(self.properties)
@property
def properties(self):
if len(self._properties) == 0:
self._properties = self._connection.request('/jobs/%s' % self.id)
return self._properties
@property
def units(self):
if len(self._units) == 0:
self._units = self._connection.request('/jobs/%s/units' % self.id)
return self._units
def clear_units(self, parallel=20):
reqs = (self._connection.grequest('/jobs/%s/units/%s' % (self.id, unit_id), method='DELETE')
for unit_id in self.units.keys())
for response in grequests.imap(reqs, size=parallel):
yield response
def upload(self, units):
headers = {'Content-Type': 'application/json'}
data = '\n'.join(json.dumps(unit) for unit in units)
res = self._connection.request('/jobs/%s/upload' % self.id, method='POST', headers=headers, data=data)
# reset cached units
self._units = {}
return res
def update(self, props):
params = [('job' + key, value) for key, value in to_params(props)]
logger.debug('Updating Job#%d: %r', self.id, params)
res = self._connection.request('/jobs/%s' % self.id, method='PUT', params=params)
# reset cached properties
self._properties = {}
return res
def channels(self):
'''
Manual channel control is deprecated.
The API documentation includes a PUT call at this endpoint, but I'm
not sure if it actually does anything.
'''
return self._connection.request('/jobs/%s/channels' % self.id)
def legend(self):
'''
From the CrowdFlower documentation:
> The legend will show you the generated keys that will end up being
> submitted with your form.
'''
return self._connection.request('/jobs/%s/legend' % self.id)
def gold_reset(self):
'''
Mark all of this job's test questions (gold data) as NOT gold.
Splitting the /jobs/:job_id/gold API call into gold_reset() and
gold_add() is not faithful to the API, but resetting gold marks
and adding them should not have the same API endpoint in the first place.
'''
params = dict(reset='true')
res = self._connection.request('/jobs/%s/gold' % self.id, method='PUT', params=params)
# reset cache
self._properties = {}
self._units = {}
return res
def gold_add(self, check, check_with=None):
'''
Configure the gold labels for a task.
* check: the name of the field being checked against
- Can call /jobs/{job_id}/legend to see options
- And as far as I can tell, the job.properties['gold'] field is a
hash with keys that are "check" names, and values that are "with" names.
* check_with: the name of the field containing the gold label for check
- Crowdflower calls this field "with", which is a Python keyword
- defaults to check + '_gold'
I'm not sure why convert_units would be anything but true.
'''
params = dict(check=check, convert_units='true')
if check_with is not None:
params['with'] = check_with
res = self._connection.request('/jobs/%s/gold' % self.id, method='PUT', params=params)
# reset cache
self._properties = {}
self._units = {}
return res
def delete(self):
return self._connection.request('/jobs/%s' % self.id, method='DELETE')
def download(self, full=True):
'''The resulting CSV will have headers like:
_unit_id
Integer
Unique ID per unit
_created_at
Date: m/d/yyyy hh:mm:ss
_golden
Enum: "true" | "false"
_canary
Always empty, ???
_id
Integer
Unique ID per judgment
_missed
???
_started_at
Date: m/d/yyyy hh:mm:ss
Can use
_tainted
Always false, ???
_channel
Enum: "neodev" | "clixsense" | [etc.]
_trust
Always 1, ???
_worker_id
Integer
Unique ID per worker
_country
3-letter ISO code
_region
String
A number for all countries except UK, USA, Canada (others?)
_city
String
City name
_ip
String
IPv4 address
And then the rest just copies over whatever fields were originally used, e.g.:
id
text
sentiment
sentiment_gold
'''
# pulls down the csv endpoint, unzips it, and returns a list of all the rows
params = dict(full='true' if full else 'false')
# use .csv, not headers=dict(Accept='text/csv'), which Crowdflower rejects
req = self._connection.create_request('/jobs/%s.csv' % self.id, method='GET', params=params)
res = self._connection.send_request(req)
# because ZipFile insists on seeking, we can't simply pass over the res.raw stream
fp = StringIO()
fp.write(res.content)
# fp.seek(0)
zf = zipfile.ZipFile(fp)
# yield each row?
return list(read_zip_csv(zf))
| {
"content_hash": "21663fc3e52756f95bb2274d5a6ed1f3",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 481,
"avg_line_length": 32.20161290322581,
"alnum_prop": 0.5593538692712247,
"repo_name": "jfrazee/crowdflower",
"id": "c91d505bd37e9171bca9d128bd0acb870cfb94e4",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowdflower/job.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class RetailPartnersConfig(AppConfig):
name = 'retail_partners'
| {
"content_hash": "64642605d6e916f9e66b60d35ec68204",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 20.714285714285715,
"alnum_prop": 0.7724137931034483,
"repo_name": "darshanbagul/EntityManagement",
"id": "d88a40b5aae92f455a014b1c92183a729442beed",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RetailPartners/retail_partners/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1540"
},
{
"name": "HTML",
"bytes": "84127"
},
{
"name": "JavaScript",
"bytes": "81428"
},
{
"name": "Python",
"bytes": "31153"
}
],
"symlink_target": ""
} |
"""Resource state representation handlers for GGRC models. Builder modules will
produce specific resource state representations for GGRC models as well as
update/create GGRC model instances from resource state representations.
"""
from .json import * # noqa
class simple_property(property):
pass
| {
"content_hash": "7ae2c599515a56733811f6db7ebe0e08",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 33.44444444444444,
"alnum_prop": 0.8006644518272426,
"repo_name": "selahssea/ggrc-core",
"id": "d8e45246dcf91e0529aaaa0e6d73ca5bf4ea5689",
"size": "415",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "src/ggrc/builder/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "211857"
},
{
"name": "HTML",
"bytes": "1056523"
},
{
"name": "JavaScript",
"bytes": "1852333"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2613417"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
} |
"""Python wrappers for the Google Storage RESTful API."""
__all__ = ['ReadBuffer',
'StreamingBuffer',
]
import collections
import os
import urlparse
from . import api_utils
from . import common
from . import errors
from . import rest_api
try:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.api import app_identity
def _get_storage_api(retry_params, account_id=None):
"""Returns storage_api instance for API methods.
Args:
retry_params: An instance of api_utils.RetryParams. If none,
thread's default will be used.
account_id: Internal-use only.
Returns:
A storage_api instance to handle urlfetch work to GCS.
On dev appserver, this instance will talk to a local stub by default.
However, if you pass the arguments --appidentity_email_address and
--appidentity_private_key_path to dev_appserver.py it will attempt to use
the real GCS with these credentials. Alternatively, you can set a specific
access token with common.set_access_token. You can also pass
--default_gcs_bucket_name to set the default bucket.
"""
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
# when running local unit tests, the service account is test@localhost
# from google.appengine.api.app_identity.app_identity_stub.APP_SERVICE_ACCOUNT_NAME
service_account = app_identity.get_service_account_name()
if (common.local_run() and not common.get_access_token()
and (not service_account or service_account.endswith('@localhost'))):
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
class _StorageApi(rest_api._RestApi):
"""A simple wrapper for the Google Storage RESTful API.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
All async methods have similar args and returns.
Args:
path: The path to the Google Storage object or bucket, e.g.
'/mybucket/myfile' or '/mybucket'.
**kwd: Options for urlfetch. e.g.
headers={'content-type': 'text/plain'}, payload='blah'.
Returns:
A ndb Future. When fulfilled, future.get_result() should return
a tuple of (status, headers, content) that represents a HTTP response
of Google Cloud Storage XML API.
"""
api_url = 'https://storage.googleapis.com'
read_only_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
read_write_scope = 'https://www.googleapis.com/auth/devstorage.read_write'
full_control_scope = 'https://www.googleapis.com/auth/devstorage.full_control'
def __getstate__(self):
"""Store state as part of serialization/pickling.
Returns:
A tuple (of dictionaries) with the state of this object
"""
return (super(_StorageApi, self).__getstate__(), {'api_url': self.api_url})
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the tuple from a __getstate__ call
"""
superstate, localstate = state
super(_StorageApi, self).__setstate__(superstate)
self.api_url = localstate['api_url']
@api_utils._eager_tasklet
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Inherit docs.
This method translates urlfetch exceptions to more service specific ones.
"""
if headers is None:
headers = {}
if 'x-goog-api-version' not in headers:
headers['x-goog-api-version'] = '2'
headers['accept-encoding'] = 'gzip, *'
try:
resp_tuple = yield super(_StorageApi, self).do_request_async(
url, method=method, headers=headers, payload=payload,
deadline=deadline, callback=callback)
except urlfetch.DownloadError as e:
raise errors.TimeoutError(
'Request to Google Cloud Storage timed out.', e)
raise ndb.Return(resp_tuple)
def post_object_async(self, path, **kwds):
"""POST to an object."""
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def put_object_async(self, path, **kwds):
"""PUT an object."""
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def get_object_async(self, path, **kwds):
"""GET an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def delete_object_async(self, path, **kwds):
"""DELETE an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def head_object_async(self, path, **kwds):
"""HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def get_bucket_async(self, path, **kwds):
"""GET a bucket."""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def compose_object(self, file_list, destination_file, content_type):
"""COMPOSE multiple objects together.
Using the given list of files, calls the put object with the compose flag.
This call merges all the files into the destination file.
Args:
file_list: list of dicts with the file name.
destination_file: Path to the destination file.
content_type: Content type for the destination file.
"""
xml_setting_list = ['<ComposeRequest>']
for meta_data in file_list:
xml_setting_list.append('<Component>')
for key, val in meta_data.iteritems():
xml_setting_list.append('<%s>%s</%s>' % (key, val, key))
xml_setting_list.append('</Component>')
xml_setting_list.append('</ComposeRequest>')
xml = ''.join(xml_setting_list)
if content_type is not None:
headers = {'Content-Type': content_type}
else:
headers = None
status, resp_headers, content = self.put_object(
api_utils._quote_filename(destination_file) + '?compose',
payload=xml,
headers=headers)
errors.check_status(status, [200], destination_file, resp_headers,
body=content)
_StorageApi = rest_api.add_sync_methods(_StorageApi)
class ReadBuffer(object):
"""A class for reading Google storage files."""
DEFAULT_BUFFER_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 30 * DEFAULT_BUFFER_SIZE
def __init__(self,
api,
path,
buffer_size=DEFAULT_BUFFER_SIZE,
max_request_size=MAX_REQUEST_SIZE,
offset=0):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
buffer_size: buffer size. The ReadBuffer keeps
one buffer. But there may be a pending future that contains
a second buffer. This size must be less than max_request_size.
max_request_size: Max bytes to request in one urlfetch.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
"""
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
assert buffer_size <= max_request_size
self._buffer_size = buffer_size
self._max_request_size = max_request_size
self._offset = offset
self._buffer = _Buffer()
self._etag = None
get_future = self._get_segment(offset, self._buffer_size, check_response=False)
status, headers, content = self._api.head_object(path)
errors.check_status(status, [200], path, resp_headers=headers, body=content)
self._file_size = long(common.get_stored_content_length(headers))
self._check_etag(headers.get('etag'))
self._buffer_future = None
if self._file_size != 0:
content, check_response_closure = get_future.get_result()
check_response_closure()
self._buffer.reset(content)
self._request_next_buffer()
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the read buffer are not stored, only the current offset for
data read by the client. A new read buffer is established at unpickling.
The head information for the object (file size and etag) are stored to
reduce startup and ensure the file has not changed.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'buffer_size': self._buffer_size,
'request_size': self._max_request_size,
'etag': self._etag,
'size': self._file_size,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
Along with restoring the state, pre-fetch the next read buffer.
"""
self._api = state['api']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
self._buffer_size = state['buffer_size']
self._max_request_size = state['request_size']
self._etag = state['etag']
self._file_size = state['size']
self._offset = state['offset']
self._buffer = _Buffer()
self.closed = state['closed']
self._buffer_future = None
if self._remaining() and not self.closed:
self._request_next_buffer()
def __iter__(self):
"""Iterator interface.
Note the ReadBuffer container itself is the iterator. It's
(quote PEP0234)
'destructive: they consumes all the values and a second iterator
cannot easily be created that iterates independently over the same values.
You could open the file for the second time, or seek() to the beginning.'
Returns:
Self.
"""
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self, size=-1):
"""Read one line delimited by '\n' from the file.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if size == 0 or not self._remaining():
return ''
data_list = []
newline_offset = self._buffer.find_newline(size)
while newline_offset < 0:
data = self._buffer.read(size)
size -= len(data)
self._offset += len(data)
data_list.append(data)
if size == 0 or not self._remaining():
return ''.join(data_list)
self._buffer.reset(self._buffer_future.get_result())
self._request_next_buffer()
newline_offset = self._buffer.find_newline(size)
data = self._buffer.read_to_offset(newline_offset + 1)
self._offset += len(data)
data_list.append(data)
return ''.join(data_list)
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list)
def _remaining(self):
return self._file_size - self._offset
def _request_next_buffer(self):
"""Request next buffer.
Requires self._offset and self._buffer are in consistent state.
"""
self._buffer_future = None
next_offset = self._offset + self._buffer.remaining()
if next_offset != self._file_size:
self._buffer_future = self._get_segment(next_offset,
self._buffer_size)
def _get_segments(self, start, request_size):
"""Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
"""
if not request_size:
return []
end = start + request_size
futures = []
while request_size > self._max_request_size:
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if start < end:
futures.append(self._get_segment(start, end - start))
return [fut.get_result() for fut in futures]
@ndb.tasklet
def _get_segment(self, start, request_size, check_response=True):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
check_response: True to check the validity of GCS response automatically
before the future returns. False otherwise. See Yields section.
Yields:
If check_response is True, the segment [start, start + request_size)
of the file.
Otherwise, a tuple. The first element is the unverified file segment.
The second element is a closure that checks response. Caller should
first invoke the closure before consuing the file segment.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
status, resp_headers, content = yield self._api.get_object_async(
self._path, headers=headers)
def _checker():
errors.check_status(status, [200, 206], self._path, headers,
resp_headers, body=content)
self._check_etag(resp_headers.get('etag'))
if check_response:
_checker()
raise ndb.Return(content)
raise ndb.Return(content, _checker)
def _check_etag(self, etag):
"""Check if etag is the same across requests to GCS.
If self._etag is None, set it. If etag is set, check that the new
etag equals the old one.
In the __init__ method, we fire one HEAD and one GET request using
ndb tasklet. One of them would return first and set the first value.
Args:
etag: etag from a GCS HTTP response. None if etag is not part of the
response header. It could be None for example in the case of GCS
composite file.
Raises:
ValueError: if two etags are not equal.
"""
if etag is None:
return
elif self._etag is None:
self._etag = etag
elif self._etag != etag:
raise ValueError('File on GCS has changed while reading.')
def close(self):
self.closed = True
self._buffer = None
self._buffer_future = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self._buffer.reset()
self._buffer_future = None
if whence == os.SEEK_SET:
self._offset = offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = self._file_size + offset
else:
raise ValueError('Whence mode %s is invalid.' % str(whence))
self._offset = min(self._offset, self._file_size)
self._offset = max(self._offset, 0)
if self._remaining():
self._request_next_buffer()
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self._offset
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
class _Buffer(object):
"""In memory buffer."""
def __init__(self):
self.reset()
def reset(self, content='', offset=0):
self._buffer = content
self._offset = offset
def read(self, size=-1):
"""Returns bytes from self._buffer and update related offsets.
Args:
size: number of bytes to read starting from current offset.
Read the entire buffer if negative.
Returns:
Requested bytes from buffer.
"""
if size < 0:
offset = len(self._buffer)
else:
offset = self._offset + size
return self.read_to_offset(offset)
def read_to_offset(self, offset):
"""Returns bytes from self._buffer and update related offsets.
Args:
offset: read from current offset to this offset, exclusive.
Returns:
Requested bytes from buffer.
"""
assert offset >= self._offset
result = self._buffer[self._offset: offset]
self._offset += len(result)
return result
def remaining(self):
return len(self._buffer) - self._offset
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size)
class StreamingBuffer(object):
"""A class for creating large objects using the 'resumable' API.
The API is a subset of the Python writable stream API sufficient to
support writing zip files using the zipfile module.
The exact sequence of calls and use of headers is documented at
https://developers.google.com/storage/docs/developer-guide#unknownresumables
"""
_blocksize = 256 * 1024
_flushsize = 8 * _blocksize
_maxrequestsize = 9 * 4 * _blocksize
def __init__(self,
api,
path,
content_type=None,
gcs_headers=None):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
content_type: Optional content-type; Default value is
delegate to Google Cloud Storage.
gcs_headers: additional gs headers as a str->str dict, e.g
{'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Raises:
IOError: When this location can not be found.
"""
assert self._maxrequestsize > self._blocksize
assert self._maxrequestsize % self._blocksize == 0
assert self._maxrequestsize >= self._flushsize
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
self._buffer = collections.deque()
self._buffered = 0
self._written = 0
self._offset = 0
headers = {'x-goog-resumable': 'start'}
if content_type:
headers['content-type'] = content_type
if gcs_headers:
headers.update(gcs_headers)
status, resp_headers, content = self._api.post_object(path, headers=headers)
errors.check_status(status, [201], path, headers, resp_headers,
body=content)
loc = resp_headers.get('location')
if not loc:
raise IOError('No location header found in 201 response')
parsed = urlparse.urlparse(loc)
self._path_with_token = '%s?%s' % (self._path, parsed.query)
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the write buffer are stored. Writes to the underlying
storage are required to be on block boundaries (_blocksize) except for the
last write. In the worst case the pickled version of this object may be
slightly larger than the blocksize.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'path_token': self._path_with_token,
'buffer': self._buffer,
'buffered': self._buffered,
'written': self._written,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
"""
self._api = state['api']
self._path_with_token = state['path_token']
self._buffer = state['buffer']
self._buffered = state['buffered']
self._written = state['written']
self._offset = state['offset']
self.closed = state['closed']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
def write(self, data):
"""Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str.
"""
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._flushsize:
self._flush()
def flush(self):
"""Flush as much as possible to GCS.
GCS *requires* that all writes except for the final one align on
256KB boundaries. So the internal buffer may still have < 256KB bytes left
after flush.
"""
self._check_open()
self._flush(finish=False)
def tell(self):
"""Return the total number of bytes passed to write() so far.
(There is no seek() method.)
"""
return self._offset
def close(self):
"""Flush the buffer and finalize the file.
When this returns the new file is available for reading.
"""
if not self.closed:
self.closed = True
self._flush(finish=True)
self._buffer = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def _flush(self, finish=False):
"""Internal API to flush.
Buffer is flushed to GCS only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True.
"""
while ((finish and self._buffered >= 0) or
(not finish and self._buffered >= self._blocksize)):
tmp_buffer = []
tmp_buffer_len = 0
excess = 0
while self._buffer:
buf = self._buffer.popleft()
size = len(buf)
self._buffered -= size
tmp_buffer.append(buf)
tmp_buffer_len += size
if tmp_buffer_len >= self._maxrequestsize:
excess = tmp_buffer_len - self._maxrequestsize
break
if not finish and (
tmp_buffer_len % self._blocksize + self._buffered <
self._blocksize):
excess = tmp_buffer_len % self._blocksize
break
if excess:
over = tmp_buffer.pop()
size = len(over)
assert size >= excess
tmp_buffer_len -= size
head, tail = over[:-excess], over[-excess:]
self._buffer.appendleft(tail)
self._buffered += len(tail)
if head:
tmp_buffer.append(head)
tmp_buffer_len += len(head)
data = ''.join(tmp_buffer)
file_len = '*'
if finish and not self._buffered:
file_len = self._written + len(data)
self._send_data(data, self._written, file_len)
self._written += len(data)
if file_len != '*':
break
def _send_data(self, data, start_offset, file_len):
"""Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
"""
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
def _get_offset_from_gcs(self):
"""Get the last offset that has been written to GCS.
This is a utility method that does not modify self.
Returns:
an int of the last offset written to GCS by this upload, inclusive.
-1 means nothing has been written.
"""
headers = {'content-range': 'bytes */*'}
status, response_headers, content = self._api.put_object(
self._path_with_token, headers=headers)
errors.check_status(status, [308], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
val = response_headers.get('range')
if val is None:
return -1
_, offset = val.rsplit('-', 1)
return int(offset)
def _force_close(self, file_length=None):
"""Close this buffer on file_length.
Finalize this upload immediately on file_length.
Contents that are still in memory will not be uploaded.
This is a utility method that does not modify self.
Args:
file_length: file length. Must match what has been uploaded. If None,
it will be queried from GCS.
"""
if file_length is None:
file_length = self._get_offset_from_gcs() + 1
self._send_data('', 0, file_length)
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
| {
"content_hash": "f4d840ed6c8031ac2211c3f61480e347",
"timestamp": "",
"source": "github",
"line_count": 919,
"max_line_length": 85,
"avg_line_length": 30.652883569096844,
"alnum_prop": 0.6339013134540291,
"repo_name": "bentilly/heroes",
"id": "26254fdade028c68649df683fb31c171d4954ffa",
"size": "28766",
"binary": false,
"copies": "6",
"ref": "refs/heads/development",
"path": "cloudstorage/storage_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "36675"
},
{
"name": "HTML",
"bytes": "206994"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "3830570"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from radmin.utils import *
from radmin.console import *
class TestUtils(TestCase):
def test_radmin_import(self):
""" testing loading a module by string """
impo = radmin_import('radmin.views.sample')
self.assertEqual(impo(), 'Hi there!')
self.assertEqual(None,radmin_import('radmin.views.idontexist'))
| {
"content_hash": "873c0fc083fd8815e6efcffbfb6b4ecc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 37.2,
"alnum_prop": 0.6908602150537635,
"repo_name": "mick-t/django-radmin-console",
"id": "b4ccf0743f3f7b34db64b7c3813ca858f8726694",
"size": "372",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "radmin/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1122"
},
{
"name": "HTML",
"bytes": "567"
},
{
"name": "JavaScript",
"bytes": "3922"
},
{
"name": "Python",
"bytes": "8409"
}
],
"symlink_target": ""
} |
import unittest
import pytest
from selenium.webdriver.common.by import By
class RenderedWebElementTests(unittest.TestCase):
@pytest.mark.ignore_chrome
def testShouldPickUpStyleOfAnElement(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-parent")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(0, 128, 0, 1)", backgroundColour)
element = self.driver.find_element(by=By.ID, value="red-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(255, 0, 0, 1)", backgroundColour)
@pytest.mark.ignore_chrome
def testShouldAllowInheritedStylesToBeUsed(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs has an issue with getting the right value for background-color")
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("transparent", backgroundColour)
def testShouldCorrectlyIdentifyThatAnElementHasWidth(self):
self._loadPage("xhtmlTest")
shrinko = self.driver.find_element(by=By.ID, value="linkId")
size = shrinko.size
self.assertTrue(size["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(size["height"] > 0, "Height expected to be greater than 0")
def testShouldBeAbleToDetermineTheRectOfAnElement(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support rect command")
self._loadPage("xhtmlTest")
element = self.driver.find_element(By.ID, "username")
rect = element.rect
self.assertTrue(rect["x"] > 0, "Element should not be in the top left")
self.assertTrue(rect["y"] > 0, "Element should not be in the top left")
self.assertTrue(rect["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(rect["height"] > 0, "Height expected to be greater than 0")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| {
"content_hash": "ab4d8c86aed4aab882460b4dd134a147",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 100,
"avg_line_length": 39.38709677419355,
"alnum_prop": 0.6781326781326781,
"repo_name": "sag-enorman/selenium",
"id": "c1fed12640b3f16f5fdf6914fd77363b8a0e2780",
"size": "3230",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/common/rendered_webelement_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C",
"bytes": "62267"
},
{
"name": "C#",
"bytes": "2830849"
},
{
"name": "C++",
"bytes": "1914688"
},
{
"name": "CSS",
"bytes": "25162"
},
{
"name": "HTML",
"bytes": "1839154"
},
{
"name": "Java",
"bytes": "4318082"
},
{
"name": "JavaScript",
"bytes": "4933013"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "730677"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "787735"
},
{
"name": "Shell",
"bytes": "1305"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
import os
import sys
import shutil
import subprocess
def check_sudo_status():
sudo_status = subprocess.Popen('sudo', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if "usage: sudo" not in sudo_status.stdout.read().decode('utf-8'):
print('\nThe command "sudo" dose not exists')
return False
else:
return True
def get_test_module_repo(url, module):
repo_target_path = '/tmp'
repo_path = os.path.join('/tmp',module)
if os.path.exists(repo_path):
status = check_sudo_status()
if status is False:
print('\nPlease remove the exists repository of' + module + 'at first using root')
sys.exit(1)
else:
shutil.rmtree(repo_path)
ori_path = os.getcwd()
os.chdir(repo_target_path)
if module == 'iotivity-node':
git_cmd = ''.join([
'wget ',
url
])
else:
git_cmd = ''.join([
'git ',
'clone ',
url
])
git_status = subprocess.Popen(git_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
git_status.wait()
if git_status.returncode != 0:
print('\nClone into '+ module +' failed!\n' + git_status.stdout.read().decode('utf-8'))
sys.exit(1)
inst_node_modules(url, module)
os.chdir(ori_path)
def inst_node_modules(url, module):
if module == 'iotivity-node':
sub_str = url.split('/')
for child_str in sub_str:
if child_str.endswith('zip'):
os.system('unzip %s >/dev/null 2>&1' % child_str)
os.system('mv iotivity-node-%s iotivity-node' % \
child_str.strip('.zip'))
os.system('rm %s*' % child_str)
os.chdir('/tmp/%s' % module)
inst_node_cmd = 'npm install'
inst_node_status = subprocess.Popen(inst_node_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
inst_node_status.wait()
if inst_node_status.returncode != 0:
print('\nRe-install node modules using root!')
inst_node = subprocess.Popen('sudo npm install', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
inst_node.wait()
if inst_node.returncode != 0:
print('\nInstall node modules failed! Please check it!\n' + inst_node.stdout.read().decode('utf-8'))
sys.exit(1)
if module == "iotivity-node":
inst_grunt_cli_cmd = 'npm install grunt-cli'
inst_grunt_cli_status = subprocess.Popen(inst_grunt_cli_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
inst_grunt_cli_status.wait()
if inst_grunt_cli_status.returncode != 0:
print('\nInstall grunt-cli failed!\n' + inst_grunt_cli_status.stdout.read().decode('utf-8'))
sys.exit(1)
else:
print('\nInstall grunt-cli done!')
| {
"content_hash": "be25e7851a771bcb8a2f414c7f582d61",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 130,
"avg_line_length": 39.2972972972973,
"alnum_prop": 0.5842503438789546,
"repo_name": "wanghongjuan/meta-iotqa-1",
"id": "42cfe2fff9171cde7be2de3df7f74319a3599b51",
"size": "2908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/oeqa/runtime/nodejs/get_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "BitBake",
"bytes": "6776"
},
{
"name": "C",
"bytes": "5633"
},
{
"name": "Java",
"bytes": "504"
},
{
"name": "JavaScript",
"bytes": "24097"
},
{
"name": "M4",
"bytes": "5945"
},
{
"name": "Makefile",
"bytes": "392"
},
{
"name": "Python",
"bytes": "540824"
},
{
"name": "Shell",
"bytes": "6714"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from createsend.createsend import CreateSendBase
from createsend.utils import json_to_py
class JourneyEmail(CreateSendBase):
"""Represents a journey and associated functionality"""
def __init__(self, auth=None, journey_email_id=None):
self.journey_email_id = journey_email_id
super(JourneyEmail, self).__init__(auth)
def bounces(self, date=None, page=None, page_size=None, order_direction=None):
"""Retrieves the bounces for this journey email."""
return self.get_journey_email_response(date, page, page_size, order_direction, "bounces")
def clicks(self, date=None, page=None, page_size=None, order_direction=None):
"""Retrieves the clicks for this journey email."""
return self.get_journey_email_response(date, page, page_size, order_direction, "clicks")
def opens(self, date=None, page=None, page_size=None, order_direction=None):
"""Retrieves the opens for this journey email."""
return self.get_journey_email_response(date, page, page_size, order_direction, "opens")
def recipients(self, date=None, page=None, page_size=None, order_direction=None):
"""Retrieves the recipients for this journey email."""
return self.get_journey_email_response(date, page, page_size, order_direction, "recipients")
def unsubscribes(self, date=None, page=None, page_size=None, order_direction=None):
"""Retrieves the unsubscribes for this journey email."""
return self.get_journey_email_response(date, page, page_size, order_direction, "unsubscribes")
def get_journey_email_response(self, date, page, page_size, order_direction, uri):
"""Retrieves information for the journey email - based on theuri"""
params = {}
if date is not None:
params["date"] = date
if page is not None:
params["page"] = page
if page_size is not None:
params["pagesize"] = page_size
if order_direction is not None:
params["orderdirection"] = order_direction
response = self._get(self.uri_for(uri), params=params)
return json_to_py(response)
def uri_for(self, action):
return "/journeys/email/%s/%s.json" % (self.journey_email_id, action)
| {
"content_hash": "710caada98cb9db255ed9e43036ea8ba",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 102,
"avg_line_length": 47.02,
"alnum_prop": 0.6563164610803913,
"repo_name": "campaignmonitor/createsend-python",
"id": "fd0139481ca1f8ec4889357549b2b81cd187cc63",
"size": "2351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/createsend/journey_email.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181030"
},
{
"name": "Ruby",
"bytes": "421"
}
],
"symlink_target": ""
} |
"""Test modules for nested-dict."""
| {
"content_hash": "8e0a2f95ef91e462291410c7fa21dfdf",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.6666666666666666,
"repo_name": "bunbun/nested-dict",
"id": "4dd636addbf3c71a6f6475f4c296a1c8d8755f06",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35758"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
import os.path
from mock import Mock, patch, call
from tempfile import mkdtemp
from shutil import rmtree
from test._common import unittest
from test.helper import TestHelper
from beets.util import bytestring_path
from beetsplug.thumbnails import (ThumbnailsPlugin, NORMAL_DIR, LARGE_DIR,
write_metadata_im, write_metadata_pil,
PathlibURI, GioURI)
class ThumbnailsTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
@patch('beetsplug.thumbnails.util')
def test_write_metadata_im(self, mock_util):
metadata = {"a": u"A", "b": u"B"}
write_metadata_im("foo", metadata)
try:
command = u"convert foo -set a A -set b B foo".split(' ')
mock_util.command_output.assert_called_once_with(command)
except AssertionError:
command = u"convert foo -set b B -set a A foo".split(' ')
mock_util.command_output.assert_called_once_with(command)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.os.stat')
def test_add_tags(self, mock_stat, _):
plugin = ThumbnailsPlugin()
plugin.write_metadata = Mock()
plugin.get_uri = Mock(side_effect={b"/path/to/cover":
"COVER_URI"}.__getitem__)
album = Mock(artpath=b"/path/to/cover")
mock_stat.return_value.st_mtime = 12345
plugin.add_tags(album, b"/path/to/thumbnail")
metadata = {"Thumb::URI": "COVER_URI",
"Thumb::MTime": u"12345"}
plugin.write_metadata.assert_called_once_with(b"/path/to/thumbnail",
metadata)
mock_stat.assert_called_once_with(album.artpath)
@patch('beetsplug.thumbnails.os')
@patch('beetsplug.thumbnails.ArtResizer')
@patch('beetsplug.thumbnails.get_im_version')
@patch('beetsplug.thumbnails.get_pil_version')
@patch('beetsplug.thumbnails.GioURI')
def test_check_local_ok(self, mock_giouri, mock_pil, mock_im,
mock_artresizer, mock_os):
# test local resizing capability
mock_artresizer.shared.local = False
plugin = ThumbnailsPlugin()
self.assertFalse(plugin._check_local_ok())
# test dirs creation
mock_artresizer.shared.local = True
def exists(path):
if path == NORMAL_DIR:
return False
if path == LARGE_DIR:
return True
raise ValueError(u"unexpected path {0!r}".format(path))
mock_os.path.exists = exists
plugin = ThumbnailsPlugin()
mock_os.makedirs.assert_called_once_with(NORMAL_DIR)
self.assertTrue(plugin._check_local_ok())
# test metadata writer function
mock_os.path.exists = lambda _: True
mock_pil.return_value = False
mock_im.return_value = False
with self.assertRaises(AssertionError):
ThumbnailsPlugin()
mock_pil.return_value = True
self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_pil)
mock_im.return_value = True
self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im)
mock_pil.return_value = False
self.assertEqual(ThumbnailsPlugin().write_metadata, write_metadata_im)
self.assertTrue(ThumbnailsPlugin()._check_local_ok())
# test URI getter function
giouri_inst = mock_giouri.return_value
giouri_inst.available = True
self.assertEqual(ThumbnailsPlugin().get_uri, giouri_inst.uri)
giouri_inst.available = False
self.assertEqual(ThumbnailsPlugin().get_uri.__self__.__class__,
PathlibURI)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.ArtResizer')
@patch('beetsplug.thumbnails.util')
@patch('beetsplug.thumbnails.os')
@patch('beetsplug.thumbnails.shutil')
def test_make_cover_thumbnail(self, mock_shutils, mock_os, mock_util,
mock_artresizer, _):
thumbnail_dir = os.path.normpath(b"/thumbnail/dir")
md5_file = os.path.join(thumbnail_dir, b"md5")
path_to_art = os.path.normpath(b"/path/to/art")
mock_os.path.join = os.path.join # don't mock that function
plugin = ThumbnailsPlugin()
plugin.add_tags = Mock()
album = Mock(artpath=path_to_art)
mock_util.syspath.side_effect = lambda x: x
plugin.thumbnail_file_name = Mock(return_value=b'md5')
mock_os.path.exists.return_value = False
def os_stat(target):
if target == md5_file:
return Mock(st_mtime=1)
elif target == path_to_art:
return Mock(st_mtime=2)
else:
raise ValueError(u"invalid target {0}".format(target))
mock_os.stat.side_effect = os_stat
plugin.make_cover_thumbnail(album, 12345, thumbnail_dir)
mock_os.path.exists.assert_called_once_with(md5_file)
mock_os.stat.has_calls([call(md5_file), call(path_to_art)],
any_order=True)
resize = mock_artresizer.shared.resize
resize.assert_called_once_with(12345, path_to_art, md5_file)
plugin.add_tags.assert_called_once_with(album, resize.return_value)
mock_shutils.move.assert_called_once_with(resize.return_value,
md5_file)
# now test with recent thumbnail & with force
mock_os.path.exists.return_value = True
plugin.force = False
resize.reset_mock()
def os_stat(target):
if target == md5_file:
return Mock(st_mtime=3)
elif target == path_to_art:
return Mock(st_mtime=2)
else:
raise ValueError(u"invalid target {0}".format(target))
mock_os.stat.side_effect = os_stat
plugin.make_cover_thumbnail(album, 12345, thumbnail_dir)
self.assertEqual(resize.call_count, 0)
# and with force
plugin.config['force'] = True
plugin.make_cover_thumbnail(album, 12345, thumbnail_dir)
resize.assert_called_once_with(12345, path_to_art, md5_file)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
def test_make_dolphin_cover_thumbnail(self, _):
plugin = ThumbnailsPlugin()
tmp = bytestring_path(mkdtemp())
album = Mock(path=tmp,
artpath=os.path.join(tmp, b"cover.jpg"))
plugin.make_dolphin_cover_thumbnail(album)
with open(os.path.join(tmp, b".directory"), "rb") as f:
self.assertEqual(
f.read().splitlines(),
[b"[Desktop Entry]", b"Icon=./cover.jpg"]
)
# not rewritten when it already exists (yup that's a big limitation)
album.artpath = b"/my/awesome/art.tiff"
plugin.make_dolphin_cover_thumbnail(album)
with open(os.path.join(tmp, b".directory"), "rb") as f:
self.assertEqual(
f.read().splitlines(),
[b"[Desktop Entry]", b"Icon=./cover.jpg"]
)
rmtree(tmp)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.ArtResizer')
def test_process_album(self, mock_artresizer, _):
get_size = mock_artresizer.shared.get_size
plugin = ThumbnailsPlugin()
make_cover = plugin.make_cover_thumbnail = Mock(return_value=True)
make_dolphin = plugin.make_dolphin_cover_thumbnail = Mock()
# no art
album = Mock(artpath=None)
plugin.process_album(album)
self.assertEqual(get_size.call_count, 0)
self.assertEqual(make_dolphin.call_count, 0)
# cannot get art size
album.artpath = b"/path/to/art"
get_size.return_value = None
plugin.process_album(album)
get_size.assert_called_once_with(b"/path/to/art")
self.assertEqual(make_cover.call_count, 0)
# dolphin tests
plugin.config['dolphin'] = False
plugin.process_album(album)
self.assertEqual(make_dolphin.call_count, 0)
plugin.config['dolphin'] = True
plugin.process_album(album)
make_dolphin.assert_called_once_with(album)
# small art
get_size.return_value = 200, 200
plugin.process_album(album)
make_cover.assert_called_once_with(album, 128, NORMAL_DIR)
# big art
make_cover.reset_mock()
get_size.return_value = 500, 500
plugin.process_album(album)
make_cover.has_calls([call(album, 128, NORMAL_DIR),
call(album, 256, LARGE_DIR)], any_order=True)
@patch('beetsplug.thumbnails.ThumbnailsPlugin._check_local_ok')
@patch('beetsplug.thumbnails.decargs')
def test_invokations(self, mock_decargs, _):
plugin = ThumbnailsPlugin()
plugin.process_album = Mock()
album = Mock()
plugin.process_album.reset_mock()
lib = Mock()
album2 = Mock()
lib.albums.return_value = [album, album2]
plugin.process_query(lib, Mock(), None)
lib.albums.assert_called_once_with(mock_decargs.return_value)
plugin.process_album.has_calls([call(album), call(album2)],
any_order=True)
@patch('beetsplug.thumbnails.BaseDirectory')
def test_thumbnail_file_name(self, mock_basedir):
plug = ThumbnailsPlugin()
plug.get_uri = Mock(return_value=u"file:///my/uri")
self.assertEqual(plug.thumbnail_file_name(b'idontcare'),
b"9488f5797fbe12ffb316d607dfd93d04.png")
def test_uri(self):
gio = GioURI()
if not gio.available:
self.skipTest(u"GIO library not found")
self.assertEqual(gio.uri(u"/foo"), b"file:///") # silent fail
self.assertEqual(gio.uri(b"/foo"), b"file:///foo")
self.assertEqual(gio.uri(b"/foo!"), b"file:///foo!")
self.assertEqual(
gio.uri(b'/music/\xec\x8b\xb8\xec\x9d\xb4'),
b'file:///music/%EC%8B%B8%EC%9D%B4')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "d3a32a8e825700442a1a4ebaa7976eb9",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 79,
"avg_line_length": 38.268115942028984,
"alnum_prop": 0.6014012497633024,
"repo_name": "jcoady9/beets",
"id": "fe1c80f4a860630fe1ff86b37bcc39d829aa35b9",
"size": "11229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_thumbnails.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1767900"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
from rpython.translator.c.test.test_genc import compile
from rpython.rlib.longlong2float import longlong2float, float2longlong
from rpython.rlib.longlong2float import uint2singlefloat, singlefloat2uint
from rpython.rlib.rarithmetic import r_singlefloat, r_longlong
from rpython.rtyper.test.test_llinterp import interpret
def fn(f1):
ll = float2longlong(f1)
f2 = longlong2float(ll)
return f2
def enum_floats():
inf = 1e200 * 1e200
yield 0.0
yield -0.0
yield 1.0
yield -2.34567
yield 2.134891117e22
yield inf
yield -inf
yield inf / inf # nan
def test_float2longlong():
assert float2longlong(0.0) == r_longlong(0)
def test_longlong_as_float():
for x in enum_floats():
res = fn(x)
assert repr(res) == repr(x)
def test_compiled():
fn2 = compile(fn, [float])
for x in enum_floats():
res = fn2(x)
assert repr(res) == repr(x)
def test_interpreted():
def f(f1):
try:
ll = float2longlong(f1)
return longlong2float(ll)
except Exception:
return 500
for x in enum_floats():
res = interpret(f, [x])
assert repr(res) == repr(x)
# ____________________________________________________________
def fnsingle(f1):
sf1 = r_singlefloat(f1)
ii = singlefloat2uint(sf1)
sf2 = uint2singlefloat(ii)
f2 = float(sf2)
return f2
def test_int_as_singlefloat():
for x in enum_floats():
res = fnsingle(x)
assert repr(res) == repr(float(r_singlefloat(x)))
def test_compiled_single():
fn2 = compile(fnsingle, [float])
for x in enum_floats():
res = fn2(x)
assert repr(res) == repr(float(r_singlefloat(x)))
# ____________________________________________________________
def fn_encode_nan(f1, i2):
from rpython.rlib.longlong2float import can_encode_float, can_encode_int32
from rpython.rlib.longlong2float import encode_int32_into_longlong_nan
from rpython.rlib.longlong2float import decode_int32_from_longlong_nan
from rpython.rlib.longlong2float import is_int32_from_longlong_nan
from rpython.rlib.rfloat import isnan
assert can_encode_float(f1)
assert can_encode_int32(i2)
l1 = float2longlong(f1)
l2 = encode_int32_into_longlong_nan(i2)
assert not is_int32_from_longlong_nan(l1)
assert is_int32_from_longlong_nan(l2)
f1b = longlong2float(l1)
assert f1b == f1 or (isnan(f1b) and isnan(f1))
assert decode_int32_from_longlong_nan(l2) == i2
return 42
def test_compiled_encode_nan():
fn2 = compile(fn_encode_nan, [float, int])
ints = [int(-2**31), int(2**31-1), 42]
for x in enum_floats():
y = ints.pop()
ints.insert(0, y)
fn_encode_nan(x, y)
res = fn2(x, y)
assert res == 42
| {
"content_hash": "19e760b8ed6c37f63378a734b62a725d",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 28.88659793814433,
"alnum_prop": 0.6142041399000714,
"repo_name": "oblique-labs/pyVM",
"id": "1b39085c3bf6a0900f0334c497ac11c9bc949a6d",
"size": "2802",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/rlib/test/test_longlong2float.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
} |
from app import db
# Define a base model for other database tables to inherit
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(
db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
# Define a User model
class User(Base):
__tablename__ = 'auth_user'
# User Name
name = db.Column(db.String(128), nullable=False)
# Identification Data: email & password
email = db.Column(db.String(128), nullable=False,
unique=True)
password = db.Column(db.String(192), nullable=False)
# Authorisation Data: role & status
role = db.Column(db.SmallInteger, nullable=False)
status = db.Column(db.SmallInteger, nullable=False)
# New instance instantiation procedure
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
def __repr__(self):
return '<User %r>' % (self.name)
| {
"content_hash": "14791e1ac48027ef6b4fae2d409aaba0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 58,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.6378091872791519,
"repo_name": "rosaldo/large-flask-app",
"id": "47a325f3cbf2841c14678071c9f270dfe122cf1d",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/mod_auth/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4768"
}
],
"symlink_target": ""
} |
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Sends a bunch of URLs through selenium"
CATEGORIES = ['RCE', 'SQLI', 'XSS', 'CHARSET']
def run(PluginInfo):
Content = []
config = ServiceLocator.get_component("config")
OWTFLogger.log("WARNING: This plugin requires a small selenium installation, please run '%s' if you have issues" %
config.FrameworkConfigGet('INSTALL_SCRIPT'))
plugin_params = ServiceLocator.get_component("plugin_params")
args = {
'Description': DESCRIPTION,
'Mandatory': {
'BASE_URL': 'The URL to be pre-pended to the tests',
'CATEGORY': 'Category to use (i.e. ' + ', '.join(sorted(CATEGORIES)) + ')'
},
'Optional': {'REPEAT_DELIM': config.FrameworkConfigGet('REPEAT_DELIM_DESCRIP')}
}
for Args in plugin_params.GetArgs(args, PluginInfo):
plugin_params.SetConfig(Args)
InputFile = config.FrameworkConfigGet("SELENIUM_URL_VECTORS_" + Args['CATEGORY'])
URLLauncher = ServiceLocator.get_component("selenium_handler").CreateURLLauncher({
'BASE_URL': Args['BASE_URL'],
'INPUT_FILE': InputFile
})
URLLauncher.Run()
return Content
| {
"content_hash": "78d962ce7dbab69f080ce62017b41172",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 118,
"avg_line_length": 39.515151515151516,
"alnum_prop": 0.6510736196319018,
"repo_name": "DarKnight24/owtf",
"id": "9be7f93e4a259b853acf65b5567d34706c036022",
"size": "1304",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "plugins/auxiliary/selenium/Selenium_URL_Launcher@OWTF-ASEL-001.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "165961"
},
{
"name": "JavaScript",
"bytes": "20557"
},
{
"name": "Python",
"bytes": "704531"
},
{
"name": "Shell",
"bytes": "58019"
}
],
"symlink_target": ""
} |
"""
Implementation of handler class and corresponding descriptor.
"""
import weakref
import inspect
from ._dict import Dict
from ._loop import loop
from . import logger
console = window = None
def this_is_js():
return False
def looks_like_method(func):
if hasattr(func, '__func__'):
return False # this is a bound method
try:
return inspect.getargspec(func)[0][0] in ('self', 'this')
except (TypeError, IndexError):
return False
# Decorator to wrap a function in a Handler object
def connect(*connection_strings):
""" Decorator to turn a method of HasEvents into an event
:class:`Handler <flexx.event.Handler>`.
A method can be connected to multiple event types. Each connection
string represents an event type to connect to. Read more about
dynamism and labels for further information on the possibilities
of connection strings.
To connect functions or methods to an event from another HasEvents
object, use that object's
:func:`HasEvents.connect()<flexx.event.HasEvents.connect>` method.
.. code-block:: py
class MyObject(event.HasEvents):
@event.connect('first_name', 'last_name')
def greet(self, *events):
print('hello %s %s' % (self.first_name, self.last_name))
"""
if (not connection_strings) or (len(connection_strings) == 1 and
callable(connection_strings[0])):
raise RuntimeError('Connect decorator needs one or more event strings.')
func = None
if callable(connection_strings[0]):
func = connection_strings[0]
connection_strings = connection_strings[1:]
for s in connection_strings:
if not (isinstance(s, str) and len(s) > 0):
raise ValueError('Connection string must be nonempty strings.')
def _connect(func):
if not callable(func):
raise TypeError('connect() decorator requires a callable.')
if not looks_like_method(func):
raise TypeError('connect() decorator requires a method '
'(first arg must be self).')
return HandlerDescriptor(func, connection_strings)
if func is not None:
return _connect(func)
else:
return _connect
class HandlerDescriptor:
""" Class descriptor for handlers.
Arguments:
func (callable): function that handles the events.
connection_strings (list): the strings that represent the connections.
ob (HasEvents, optional): the HasEvents object to use a a basis for the
connection. A weak reference to this object is stored.
"""
def __init__(self, func, connection_strings, ob=None):
assert callable(func) # HandlerDescriptor is not instantiated directly
self._func = func
self._name = func.__name__ # updated by HasEvents meta class
self._ob = None if ob is None else weakref.ref(ob)
self._connection_strings = connection_strings
self.__doc__ = '*%s*: %s' % ('event handler', func.__doc__ or self._name)
def __repr__(self):
t = '<%s %r(this should be a class attribute) at 0x%x>'
return t % (self.__class__.__name__, self._name, id(self))
def __set__(self, obj, value):
raise AttributeError('Cannot overwrite handler %r.' % self._name)
def __delete__(self, obj):
raise AttributeError('Cannot delete handler %r.' % self._name)
def __get__(self, instance, owner):
if instance is None:
return self
private_name = '_' + self._name + '_handler'
try:
handler = getattr(instance, private_name)
except AttributeError:
handler = Handler((self._func, instance), self._connection_strings,
instance if self._ob is None else self._ob())
setattr(instance, private_name, handler)
# Make the handler use *our* func one time. In most situations
# this is the same function that the handler has, but not when
# using super(); i.e. this allows a handler to call the same
# handler of its super class.
handler._use_once(self._func)
return handler
@property
def local_connection_strings(self):
""" List of connection strings that are local to the object.
"""
return [s for s in self._connection_strings if '.' not in s]
class Handler:
""" Wrapper around a function object to connect it to one or more events.
This class should not be instantiated directly; use ``event.connect`` or
``HasEvents.connect`` instead.
Arguments:
func (callable): function that handles the events.
connection_strings (list): the strings that represent the connections.
ob (HasEvents): the HasEvents object to use a a basis for the
connection. A weak reference to this object is stored.
"""
_count = 0
def __init__(self, func, connection_strings, ob):
Handler._count += 1
self._id = 'h%i' % Handler._count # to ensure a consistent event order
# Store objects using a weakref.
# - ob1 is the HasEvents object of which the connect() method was called
# to create the handler. Connection strings are relative to this object.
# - ob2 is the object to be passed to func (if it is a method). Is often
# the same as ob1, but not per see. Can be None.
self._ob1 = weakref.ref(ob)
# Get unbounded version of bound methods.
self._ob2 = None # if None, its regarded a regular function
if isinstance(func, tuple):
self._ob2 = weakref.ref(func[1])
func = func[0]
if getattr(func, '__self__', None) is not None: # builtin funcs have __self__
if getattr(func, '__func__', None) is not None:
self._ob2 = weakref.ref(func.__self__)
func = func.__func__
# Store func, name, and docstring (e.g. for sphinx docs)
assert callable(func)
self._func = func
self._func_once = func
self._name = func.__name__
self.__doc__ = '*%s*: %s' % ('event handler', func.__doc__ or self._name)
self._init(connection_strings)
def _init(self, connection_strings):
""" Init of this handler that is compatible with PyScript.
"""
# Init connections
self._connections = []
for s in connection_strings:
d = Dict() # don't do Dict(foo=x) bc PyScript only supports that for dict
self._connections.append(d)
d.fullname = s
d.type = s.split('.')[-1]
d.objects = []
# Pending events for this handler
self._scheduled_update = False
self._pending = [] # pending events
# Connect
for index in range(len(self._connections)):
self._connect_to_event(index)
def __repr__(self):
c = '+'.join([str(len(c.objects)) for c in self._connections])
cname = self.__class__.__name__
return '<%s %r with %s connections at 0x%x>' % (cname, self._name, c, id(self))
def get_name(self):
""" Get the name of this handler, usually corresponding to the name
of the function that this handler wraps.
"""
return self._name
def get_connection_info(self):
""" Get a list of tuples (name, connection_names), where
connection_names is a list of type names (including label) for
the made connections.
"""
return [(c.fullname, [u[1] for u in c.objects])
for c in self._connections]
## Calling / handling
def _use_once(self, func):
self._func_once = func
def __call__(self, *events):
""" Call the handler function.
"""
func = self._func_once
if self._ob2 is not None:
if self._ob2() is not None:
res = func(self._ob2(), *events)
else:
# We detected that the object that wants the events no longer exist
self.dispose()
return
else:
res = func(*events)
self._func_once = self._func
return res
def _add_pending_event(self, label, ev):
""" Add an event object to be handled at the next event loop
iteration. Called from HasEvents.emit().
"""
if not self._scheduled_update:
# register only once
self._scheduled_update = True
if this_is_js():
#setTimeout(self._handle_now_callback.bind(self), 0)
loop.call_later(self._handle_now_callback.bind(self))
else:
loop.call_later(self._handle_now_callback)
self._pending.append((label, ev))
def _handle_now_callback(self):
self._scheduled_update = False
self.handle_now()
def handle_now(self):
""" Invoke a call to the handler function with all pending
events. This is normally called in a next event loop iteration
when an event is scheduled for this handler, but it can also
be called manually to force the handler to process pending
events *now*.
"""
# Collect pending events and clear current list
events, reconnect = self._collect()
self._pending = []
# Reconnect (dynamism)
for index in reconnect:
self._connect_to_event(index)
# Collect newly created events (corresponding to props)
events2, reconnect2 = self._collect()
if not len(reconnect2):
events = events + events2
self._pending = []
# Handle events
if len(events):
if not this_is_js():
logger.debug('Handler %s is processing %i events' %
(self._name, len(events)))
try:
self(*events)
except Exception as err:
if this_is_js():
console.error(err)
else:
err.skip_tb = 2
logger.exception(err)
def _collect(self):
""" Get list of events and reconnect-events from list of pending events.
"""
events = []
reconnect = []
for label, ev in self._pending:
if label.startswith('reconnect_'):
index = int(label.split('_')[-1])
reconnect.append(index)
else:
events.append(ev)
return events, reconnect
## Connecting
def dispose(self):
""" Cleanup any references.
Disconnects all connections, and cancel all pending events.
"""
if not this_is_js():
logger.debug('Disposing Handler %r ' % self)
for connection in self._connections:
while len(connection.objects):
ob, name = connection.objects.pop(0)
ob.disconnect(name, self)
while len(self._pending):
self._pending.pop() # no list.clear on legacy py
def _clear_hasevents_refs(self, ob):
""" Clear all references to the given HasEvents instance. This is
called from a HasEvents' dispose() method. This handler remains
working, but wont receive events from that object anymore.
"""
for connection in self._connections:
for i in range(len(connection.objects)-1, -1, -1):
if connection.objects[i][0] is ob:
connection.objects.pop(i)
# Do not clear pending events. This handler is assumed to continue
# working, and should thus handle its pending events at some point,
# at which point it cannot hold any references to ob anymore.
def _connect_to_event(self, index):
""" Connect one connection.
"""
connection = self._connections[index]
# Disconnect
while len(connection.objects):
ob, name = connection.objects.pop(0)
ob.disconnect(name, self)
path = connection.fullname.replace('.*', '*').split('.')[:-1]
# Obtain root object and setup connections
ob = self._ob1()
if ob is not None:
self._seek_event_object(index, path, ob)
# Verify
if not connection.objects:
raise RuntimeError('Could not connect to %r' % connection.fullname)
# Connect
for ob, type in connection.objects:
ob._register_handler(type, self)
def _seek_event_object(self, index, path, ob):
""" Seek an event object based on the name (PyScript compatible).
"""
connection = self._connections[index]
# Done traversing name: add to list or fail
if ob is None or not len(path):
if ob is None or not hasattr(ob, '_IS_HASEVENTS'):
return # we cannot seek further
connection.objects.append((ob, connection.type))
return # found it
# Resolve name
obname_full, path = path[0], path[1:]
obname = obname_full.rstrip('*')
selector = obname_full[len(obname):]
# Internally, 3-star notation is used for optional selectors
if selector == '***':
self._seek_event_object(index, path, ob)
# Select object
if hasattr(ob, '_IS_HASEVENTS') and obname in ob.__properties__:
name_label = obname + ':reconnect_' + str(index)
connection.objects.append((ob, name_label))
ob = getattr(ob, obname, None)
else:
ob = getattr(ob, obname, None)
# Look inside?
if selector in '***' and isinstance(ob, (tuple, list)):
if len(selector) > 1:
path = [obname + '***'] + path # recurse (avoid insert for space)
for sub_ob in ob:
self._seek_event_object(index, path, sub_ob)
return
elif selector == '*': # "**" is recursive, so allow more
t = "Invalid connection {name_full} because {name} is not a tuple/list."
raise RuntimeError(t.replace("{name_full}", obname_full)
.replace("{name}", obname))
else:
return self._seek_event_object(index, path, ob)
| {
"content_hash": "5234ce2be4aa2bcec1ff9c3d1f30f0c0",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 87,
"avg_line_length": 36.566326530612244,
"alnum_prop": 0.5790428352169666,
"repo_name": "JohnLunzer/flexx",
"id": "577e6a5d9247b75c2e3eb6a4575ac42a42c246ce",
"size": "14334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexx/event/_handler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3085"
},
{
"name": "JavaScript",
"bytes": "2932"
},
{
"name": "Python",
"bytes": "1193274"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, column_property, foreign, joinedload, contains_eager
from sqlalchemy.sql import select, func, exists
from sqlalchemy.dialects.postgresql import UUID, JSONB
from geoalchemy2 import Geometry
from geoalchemy2.shape import to_shape
from geojson import Feature
from flask import g
from flask_sqlalchemy import BaseQuery
from werkzeug.exceptions import NotFound
from pypnnomenclature.models import TNomenclatures
from pypnusershub.db.models import User
from utils_flask_sqla.serializers import serializable, SERIALIZERS
from utils_flask_sqla_geo.serializers import geoserializable, shapeserializable
from utils_flask_sqla_geo.mixins import GeoFeatureCollectionMixin
from pypn_habref_api.models import Habref
from apptax.taxonomie.models import Taxref
from geonature.core.gn_meta.models import TDatasets, TAcquisitionFramework
from geonature.core.ref_geo.models import LAreas
from geonature.core.gn_commons.models import THistoryActions, TValidations, last_validation, \
TMedias, TModules
from geonature.utils.env import DB, db
from geonature.utils.config import config
@serializable
class TSources(DB.Model):
__tablename__ = "t_sources"
__table_args__ = {"schema": "gn_synthese"}
id_source = DB.Column(DB.Integer, primary_key=True)
name_source = DB.Column(DB.Unicode)
desc_source = DB.Column(DB.Unicode)
entity_source_pk_field = DB.Column(DB.Unicode)
url_source = DB.Column(DB.Unicode)
meta_create_date = DB.Column(DB.DateTime)
meta_update_date = DB.Column(DB.DateTime)
cor_observer_synthese = DB.Table("cor_observer_synthese",
DB.Column("id_synthese", DB.Integer, ForeignKey("gn_synthese.synthese.id_synthese"), primary_key=True
),
DB.Column("id_role", DB.Integer, ForeignKey(User.id_role), primary_key=True),
schema='gn_synthese',
)
@serializable
class CorObserverSynthese(DB.Model):
__tablename__ = "cor_observer_synthese"
__table_args__ = {"schema": "gn_synthese", "extend_existing": True}
id_synthese = DB.Column(
DB.Integer, ForeignKey("gn_synthese.synthese.id_synthese"), primary_key=True
)
id_role = DB.Column(DB.Integer, ForeignKey(User.id_role), primary_key=True)
corAreaSynthese = DB.Table("cor_area_synthese",
DB.Column("id_synthese", DB.Integer, ForeignKey("gn_synthese.synthese.id_synthese"), primary_key=True),
DB.Column("id_area", DB.Integer, ForeignKey("ref_geo.l_areas.id_area"), primary_key=True),
schema='gn_synthese',
)
@serializable
class VSyntheseDecodeNomenclatures(DB.Model):
__tablename__ = "v_synthese_decode_nomenclatures"
__table_args__ = {"schema": "gn_synthese"}
id_synthese = DB.Column(DB.Integer, primary_key=True)
nat_obj_geo = DB.Column(DB.Unicode)
grp_typ = DB.Column(DB.Unicode)
obs_technique = DB.Column(DB.Unicode)
bio_status = DB.Column(DB.Unicode)
bio_condition = DB.Column(DB.Unicode)
naturalness = DB.Column(DB.Unicode)
exist_proof = DB.Column(DB.Unicode)
valid_status = DB.Column(DB.Unicode)
diffusion_level = DB.Column(DB.Unicode)
life_stage = DB.Column(DB.Unicode)
sex = DB.Column(DB.Unicode)
obj_count = DB.Column(DB.Unicode)
type_count = DB.Column(DB.Unicode)
sensitivity = DB.Column(DB.Unicode)
observation_status = DB.Column(DB.Unicode)
blurring = DB.Column(DB.Unicode)
source_status = DB.Column(DB.Unicode)
occ_behaviour = DB.Column(DB.Unicode)
occ_stat_biogeo = DB.Column(DB.Unicode)
class SyntheseQuery(GeoFeatureCollectionMixin, BaseQuery):
def join_nomenclatures(self):
return self.options(*[joinedload(n) for n in Synthese.nomenclature_fields])
def lateraljoin_last_validation(self):
subquery = (
TValidations.query
.filter(TValidations.uuid_attached_row==Synthese.unique_id_sinp)
.limit(1)
.subquery()
.lateral('last_validation')
)
return self.outerjoin(subquery, sa.true()) \
.options(contains_eager(Synthese.last_validation, alias=subquery))
def filter_by_scope(self, scope, user=None):
if user is None:
user = g.current_user
if scope == 0:
self = self.filter(sa.false())
elif scope in (1, 2):
ors = [
]
datasets = (
TDatasets.query
.filter_by_readable(user)
.with_entities(TDatasets.id_dataset)
.all()
)
self = self.filter(or_(
Synthese.id_digitizer == user.id_role,
Synthese.cor_observers.any(id_role=user.id_role),
Synthese.id_dataset.in_([ds.id_dataset for ds in datasets]),
))
return self
@serializable
@geoserializable(geoCol="the_geom_4326", idCol="id_synthese")
@shapeserializable
class Synthese(DB.Model):
__tablename__ = "synthese"
__table_args__ = {"schema": "gn_synthese"}
query_class = SyntheseQuery
nomenclature_fields = [
'nomenclature_geo_object_nature',
'nomenclature_grp_typ',
'nomenclature_obs_technique',
'nomenclature_bio_status',
'nomenclature_bio_condition',
'nomenclature_naturalness',
'nomenclature_exist_proof',
'nomenclature_valid_status',
'nomenclature_diffusion_level',
'nomenclature_life_stage',
'nomenclature_sex',
'nomenclature_obj_count',
'nomenclature_type_count',
'nomenclature_sensitivity',
'nomenclature_observation_status',
'nomenclature_blurring',
'nomenclature_source_status',
'nomenclature_info_geo_type',
'nomenclature_behaviour',
'nomenclature_biogeo_status',
'nomenclature_determination_method',
]
id_synthese = DB.Column(DB.Integer, primary_key=True)
unique_id_sinp = DB.Column(UUID(as_uuid=True))
unique_id_sinp_grp = DB.Column(UUID(as_uuid=True))
id_source = DB.Column(DB.Integer, ForeignKey(TSources.id_source))
source = relationship(TSources)
id_module = DB.Column(DB.Integer, ForeignKey(TModules.id_module))
module = DB.relationship(TModules)
entity_source_pk_value = DB.Column(DB.Integer) # FIXME varchar in db!
id_dataset = DB.Column(DB.Integer, ForeignKey(TDatasets.id_dataset))
dataset = DB.relationship(TDatasets, backref=DB.backref('synthese_records', lazy='dynamic'))
grp_method = DB.Column(DB.Unicode(length=255))
id_nomenclature_geo_object_nature = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_geo_object_nature = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_geo_object_nature])
id_nomenclature_grp_typ = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_grp_typ = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_grp_typ])
id_nomenclature_obs_technique = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_obs_technique = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_obs_technique])
id_nomenclature_bio_status = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_bio_status = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_bio_status])
id_nomenclature_bio_condition = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_bio_condition = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_bio_condition])
id_nomenclature_naturalness = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_naturalness = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_naturalness])
id_nomenclature_exist_proof = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_exist_proof = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_exist_proof])
id_nomenclature_valid_status = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_valid_status = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_valid_status])
id_nomenclature_exist_proof = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_exist_proof = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_exist_proof])
id_nomenclature_diffusion_level = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_diffusion_level = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_diffusion_level])
id_nomenclature_life_stage = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_life_stage = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_life_stage])
id_nomenclature_sex = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_sex = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_sex])
id_nomenclature_obj_count = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_obj_count = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_obj_count])
id_nomenclature_type_count = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_type_count = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_type_count])
id_nomenclature_sensitivity = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_sensitivity = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_sensitivity])
id_nomenclature_observation_status = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_observation_status = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_observation_status])
id_nomenclature_blurring = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_blurring = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_blurring])
id_nomenclature_source_status = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_source_status = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_source_status])
id_nomenclature_info_geo_type = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_info_geo_type = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_info_geo_type])
id_nomenclature_behaviour = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_behaviour = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_behaviour])
id_nomenclature_biogeo_status = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_biogeo_status = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_biogeo_status])
id_nomenclature_determination_method = db.Column(db.Integer, ForeignKey(TNomenclatures.id_nomenclature))
nomenclature_determination_method = db.relationship(TNomenclatures, foreign_keys=[id_nomenclature_determination_method])
reference_biblio = DB.Column(DB.Unicode(length=5000))
count_min = DB.Column(DB.Integer)
count_max = DB.Column(DB.Integer)
cd_nom = DB.Column(DB.Integer, ForeignKey(Taxref.cd_nom))
taxref = relationship(Taxref)
cd_hab = DB.Column(DB.Integer, ForeignKey(Habref.cd_hab))
habitat = relationship(Habref)
nom_cite = DB.Column(DB.Unicode(length=1000), nullable=False)
meta_v_taxref = DB.Column(DB.Unicode(length=50))
sample_number_proof = DB.Column(DB.UnicodeText)
digital_proof = DB.Column(DB.UnicodeText)
non_digital_proof = DB.Column(DB.UnicodeText)
altitude_min = DB.Column(DB.Integer)
altitude_max = DB.Column(DB.Integer)
depth_min = DB.Column(DB.Integer)
depth_max = DB.Column(DB.Integer)
place_name = DB.Column(DB.Unicode(length=500))
the_geom_4326 = DB.Column(Geometry("GEOMETRY", 4326))
the_geom_point = DB.Column(Geometry("GEOMETRY", 4326))
the_geom_local = DB.Column(Geometry("GEOMETRY", config["LOCAL_SRID"]))
precision = DB.Column(DB.Integer)
id_area_attachment = DB.Column(DB.Integer)
date_min = DB.Column(DB.DateTime, nullable=False)
date_max = DB.Column(DB.DateTime, nullable=False)
validator = DB.Column(DB.Unicode(length=1000))
validation_comment = DB.Column(DB.Unicode)
observers = DB.Column(DB.Unicode(length=1000))
determiner = DB.Column(DB.Unicode(length=1000))
id_digitiser = DB.Column(DB.Integer, ForeignKey(User.id_role))
digitiser = db.relationship(User, foreign_keys=[id_digitiser])
comment_context = DB.Column(DB.UnicodeText)
comment_description = DB.Column(DB.UnicodeText)
additional_data = DB.Column(JSONB)
meta_validation_date = DB.Column(DB.DateTime)
meta_create_date = DB.Column(DB.DateTime)
meta_update_date = DB.Column(DB.DateTime)
last_action = DB.Column(DB.Unicode)
areas = relationship('LAreas', secondary=corAreaSynthese)
validations = relationship(TValidations, backref='attached_row')
last_validation = relationship(last_validation,
uselist=False,
viewonly=True)
medias = relationship(
TMedias,
primaryjoin=(TMedias.uuid_attached_row==foreign(unique_id_sinp)),
uselist=True
)
cor_observers = DB.relationship(User, secondary=cor_observer_synthese)
def get_geofeature(self, recursif=True, fields=None):
return self.as_geofeature("the_geom_4326", "id_synthese", recursif, fields=fields)
def has_instance_permission(self, scope):
if scope == 0:
return False
elif scope in (1, 2):
if g.current_user == self.digitiser:
return True
if g.current_user in self.cor_observers:
return True
return self.dataset.has_instance_permission(scope)
elif scope == 3:
return True
@serializable
class CorAreaSynthese(DB.Model):
__tablename__ = "cor_area_synthese"
__table_args__ = {"schema": "gn_synthese", "extend_existing": True}
id_synthese = DB.Column(DB.Integer, ForeignKey("gn_synthese.synthese.id_synthese"), primary_key=True)
id_area = DB.Column(DB.Integer, ForeignKey("ref_geo.l_areas.id_area"), primary_key=True)
@serializable
class CorSensitivitySynthese(DB.Model):
__tablename__ = "cor_sensitivity_synthese"
__table_args__ = {"schema": "gn_sensitivity"}
uuid_attached_row = DB.Column(UUID(as_uuid=True), primary_key=True)
id_nomenclature_sensitivity = DB.Column(DB.Integer, primary_key=True)
sensitivity_comment = DB.Column(DB.Text)
meta_create_date = DB.Column(DB.DateTime)
meta_update_date = DB.Column(DB.DateTime)
@serializable
class DefaultsNomenclaturesValue(DB.Model):
__tablename__ = "defaults_nomenclatures_value"
__table_args__ = {"schema": "gn_synthese"}
mnemonique_type = DB.Column(DB.Integer, primary_key=True)
id_organism = DB.Column(DB.Integer, primary_key=True)
regne = DB.Column(DB.Unicode, primary_key=True)
group2_inpn = DB.Column(DB.Unicode, primary_key=True)
id_nomenclature = DB.Column(DB.Integer)
@serializable
@geoserializable
class VSyntheseForWebApp(DB.Model):
__tablename__ = "v_synthese_for_web_app"
__table_args__ = {"schema": "gn_synthese"}
id_synthese = DB.Column(
DB.Integer,
ForeignKey("gn_synthese.v_synthese_decode_nomenclatures.id_synthese"),
primary_key=True,
)
unique_id_sinp = DB.Column(UUID(as_uuid=True))
unique_id_sinp_grp = DB.Column(UUID(as_uuid=True))
id_source = DB.Column(DB.Integer)
entity_source_pk_value = DB.Column(DB.Integer)
id_dataset = DB.Column(DB.Integer)
dataset_name = DB.Column(DB.Integer)
id_acquisition_framework = DB.Column(DB.Integer)
count_min = DB.Column(DB.Integer)
count_max = DB.Column(DB.Integer)
cd_nom = DB.Column(DB.Integer)
cd_ref = DB.Column(DB.Unicode)
nom_cite = DB.Column(DB.Unicode)
nom_valide = DB.Column(DB.Unicode)
nom_vern = DB.Column(DB.Unicode)
lb_nom = DB.Column(DB.Unicode)
meta_v_taxref = DB.Column(DB.Unicode)
sample_number_proof = DB.Column(DB.Unicode)
digital_proof = DB.Column(DB.Unicode)
non_digital_proof = DB.Column(DB.Unicode)
altitude_min = DB.Column(DB.Integer)
altitude_max = DB.Column(DB.Integer)
depth_min = DB.Column(DB.Integer)
depth_max = DB.Column(DB.Integer)
place_name = DB.Column(DB.Unicode)
precision = DB.Column(DB.Integer)
the_geom_4326 = DB.Column(Geometry("GEOMETRY", 4326))
date_min = DB.Column(DB.DateTime)
date_max = DB.Column(DB.DateTime)
validator = DB.Column(DB.Unicode)
validation_comment = DB.Column(DB.Unicode)
observers = DB.Column(DB.Unicode)
determiner = DB.Column(DB.Unicode)
id_digitiser = DB.Column(DB.Integer)
comment_context = DB.Column(DB.Unicode)
comment_description = DB.Column(DB.Unicode)
meta_validation_date = DB.Column(DB.DateTime)
meta_create_date = DB.Column(DB.DateTime)
meta_update_date = DB.Column(DB.DateTime)
last_action = DB.Column(DB.Unicode)
id_nomenclature_geo_object_nature = DB.Column(DB.Integer)
id_nomenclature_info_geo_type = DB.Column(DB.Integer)
id_nomenclature_grp_typ = DB.Column(DB.Integer)
grp_method = DB.Column(DB.Unicode)
id_nomenclature_obs_technique = DB.Column(DB.Integer)
id_nomenclature_bio_status = DB.Column(DB.Integer)
id_nomenclature_bio_condition = DB.Column(DB.Integer)
id_nomenclature_naturalness = DB.Column(DB.Integer)
id_nomenclature_exist_proof = DB.Column(DB.Integer)
id_nomenclature_valid_status = DB.Column(DB.Integer)
id_nomenclature_diffusion_level = DB.Column(DB.Integer)
id_nomenclature_life_stage = DB.Column(DB.Integer)
id_nomenclature_sex = DB.Column(DB.Integer)
id_nomenclature_obj_count = DB.Column(DB.Integer)
id_nomenclature_type_count = DB.Column(DB.Integer)
id_nomenclature_sensitivity = DB.Column(DB.Integer)
id_nomenclature_observation_status = DB.Column(DB.Integer)
id_nomenclature_blurring = DB.Column(DB.Integer)
id_nomenclature_source_status = DB.Column(DB.Integer)
id_nomenclature_determination_method = DB.Column(DB.Integer)
id_nomenclature_behaviour = DB.Column(DB.Integer)
reference_biblio = DB.Column(DB.Unicode)
name_source = DB.Column(DB.Unicode)
url_source = DB.Column(DB.Unicode)
st_asgeojson = DB.Column(DB.Unicode)
has_medias = column_property(
exists([TMedias.id_media]).\
where(TMedias.uuid_attached_row==unique_id_sinp)
)
def get_geofeature(self, recursif=False, fields=[]):
return self.as_geofeature("the_geom_4326", "id_synthese", recursif, fields=fields)
# Non utilisé - laissé pour exemple d'une sérialisation ordonnée
def synthese_export_serialization(cls):
"""
Décorateur qui definit une serialisation particuliere pour la vue v_synthese_for_export
Il rajoute la fonction as_dict_ordered qui conserve l'ordre des attributs tel que definit dans le model
(fonctions utilisees pour les exports) et qui redefinit le nom des colonnes tel qu'ils sont nommes en configuration
"""
EXPORT_COLUMNS = config["SYNTHESE"]["EXPORT_COLUMNS"]
# tab of cls attributes from EXPORT COLUMNS
formated_default_columns = [key for key, value in EXPORT_COLUMNS.items()]
# list of tuple (class attribute, serializer)
cls_db_cols_and_serializer = []
# list of attributes of the class which are in the synthese export cnfig
# use for generate shapefiles
cls.db_cols = []
for key in formated_default_columns:
# get the cls attribut:
try:
# get the class atribut from the syntese export config
cls_attri = getattr(cls, key)
# add in serialiser list
if not cls_attri.type.__class__.__name__ == "Geometry":
cls_db_cols_and_serializer.append(
(
cls_attri.key,
SERIALIZERS.get(cls_attri.type.__class__.__name__.lower(), lambda x: x),
)
)
# add in cls.db_cols
cls.db_cols.append(cls_attri)
# execpt if the attribute does not exist
except AttributeError:
pass
def serialize_order_fn(self):
order_dict = OrderedDict()
for item, _serializer in cls_db_cols_and_serializer:
order_dict.update({EXPORT_COLUMNS.get(item): _serializer(getattr(self, item))})
return order_dict
def serialize_geofn(self, geoCol, idCol):
if not getattr(self, geoCol) is None:
geometry = to_shape(getattr(self, geoCol))
else:
geometry = {"type": "Point", "coordinates": [0, 0]}
feature = Feature(
id=str(getattr(self, idCol)), geometry=geometry, properties=self.as_dict_ordered(),
)
return feature
cls.as_dict_ordered = serialize_order_fn
cls.as_geofeature_ordered = serialize_geofn
return cls
@serializable
class VColorAreaTaxon(DB.Model):
__tablename__ = "v_color_taxon_area"
__table_args__ = {"schema": "gn_synthese"}
cd_nom = DB.Column(DB.Integer(), ForeignKey(Taxref.cd_nom), primary_key=True)
id_area = DB.Column(DB.Integer(), ForeignKey(LAreas.id_area), primary_key=True)
nb_obs = DB.Column(DB.Integer())
last_date = DB.Column(DB.DateTime())
color = DB.Column(DB.Unicode())
| {
"content_hash": "bd3f57c7aca85ed63f0fb116d028799e",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 124,
"avg_line_length": 46.0468085106383,
"alnum_prop": 0.6968394787912393,
"repo_name": "PnEcrins/GeoNature",
"id": "031f186dc43ee7ca1d2ecb6edf4c367ad3f7f4d5",
"size": "21647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/geonature/core/gn_synthese/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1931"
},
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "CSS",
"bytes": "763718"
},
{
"name": "HTML",
"bytes": "651"
},
{
"name": "JavaScript",
"bytes": "16182773"
},
{
"name": "PHP",
"bytes": "4058658"
},
{
"name": "PLpgSQL",
"bytes": "893372"
},
{
"name": "Shell",
"bytes": "33147"
}
],
"symlink_target": ""
} |
import os
import crypt
from django.db import models
from django.conf import settings
from django.forms import CharField, Form, PasswordInput
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import get_random_string
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.hashers import make_password
from filebrowser.fields import FileBrowseField
from datetime import datetime, timedelta
class FTPUser(AbstractUser):
company = models.CharField(max_length=50, blank=True)
homedir = FileBrowseField(max_length=256, blank=False)
quota = models.IntegerField(_("Size Quota (MB)"), help_text=_("Size quota for the user (MB)"),default=0)
upload_limit = models.IntegerField(_("Upload Limit (Kb/s)"), help_text=_("Upload limit (Kb/s)"),default=0)
download_limit = models.IntegerField(_("Download Limit (Kb/s)"), help_text=_("Download limit (Kb/s)"),default=0)
ip_address = models.CharField(_("IP Address"), max_length=15, help_text=_("IP Address"),default="*")
expiry_date = models.DateTimeField(_("Expiry date"), null=True, blank=True)
ftppass = models.CharField(max_length=50, blank=False, null=False, editable=False)
ftpuid = models.IntegerField(_("User ID"), help_text=_("Size quota for the user (MB)"),default=0)
ftpgid = models.IntegerField(_("Group ID"), help_text=_("Size quota for the user (MB)"),default=0)
def set_password(self, raw_password):
self.password = make_password(raw_password)
self.set_ftppass(raw_password)
def set_ftppass(self, raw_password):
self.ftppass = crypt.crypt(raw_password, get_random_string(2))
def __unicode__(self):
return self.username
def __repr__(self):
return repr(u'<FTPUser: %s>' % self.username)
class Meta:
verbose_name = 'FTP User'
verbose_name_plural = 'FTP Users'
class FTPClient(FTPUser):
def __init__(self, *args, **kwargs):
self._meta.get_field('ftpuid').default = settings.CLIENT_UID
self._meta.get_field('ftpgid').default = settings.CLIENT_GID
super(FTPUser, self).__init__(*args, **kwargs)
def __repr__(self):
return repr(u'<FTPClient: %s>' % self.username)
def save(self, *args, **kwargs):
self.is_staff = False
self.is_superuser = False
if self.pk is None and settings.USER_EXPIRY_DAYS is not 0:
self.expiry_date = datetime.now()+timedelta(days=settings.USER_EXPIRY_DAYS)
if not self.homedir:
self.homedir = "{}/{}".format(settings.FTP_CHROOT, self.username)
if not os.path.isdir(self.homedir.path):
os.mkdir(self.homedir.path)
super(FTPClient, self).save(*args, **kwargs)
class Meta:
proxy = True
verbose_name = 'FTP Client'
verbose_name_plural = 'FTP Clients'
class FTPStaff(FTPUser):
def __init__(self, *args, **kwargs):
self._meta.get_field('ftpuid').default = settings.STAFF_UID
self._meta.get_field('ftpgid').default = settings.STAFF_GID
super(FTPUser, self).__init__(*args, **kwargs)
def __repr__(self):
return repr(u'<FTPStaff: %s>' % self.username)
def save(self, *args, **kwargs):
self.is_staff = True
if not self.homedir:
self.homedir = settings.FTP_CHROOT
super(FTPStaff, self).save(*args, **kwargs)
try:
g = Group.objects.get(name='FTP Staff')
except DoesNotExist:
pass
else:
self.groups.add(g)
class Meta:
proxy = True
verbose_name = 'FTP Staff'
verbose_name_plural = 'FTP Staff'
| {
"content_hash": "4d5a194fdfd50f326b5a2776ac7e3afe",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 116,
"avg_line_length": 37.09,
"alnum_prop": 0.6443785386896738,
"repo_name": "fim/purefap",
"id": "3b82e775ff7c018180692b14df2e96f10ce30ada",
"size": "3709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "purefap/core/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28974"
}
],
"symlink_target": ""
} |
import os
import sys
import string
import random
from os import environ
from socket import getfqdn
from tempfile import NamedTemporaryFile
from lib.python.utils import detect_debug_mode, assert_command, validate_env
from lib.python.utils import mkdir_p
from lib.python.utils import log_and_stdout, apt_get_update
# Oh my oh my...this is so nasty. Better way/place to handle this?
apt_get_update()
assert_command("apt-get install -y python3-pip",
"Failed to install Python3 pip!")
assert_command("pip3 install PyYAML==3.11", "Failed to install PyYAML!")
import yaml
def install_dependencies():
"""
Install some tooling which may be needed to bootstrap Puppet.
"""
debs = 'wget'
blacklist_debs = 'puppet'
environ['DEBIAN_FRONTEND'] = 'noninteractive'
environ['DEBCONF_INTERACTIVE_SEEN'] = 'true'
apt_get_update()
assert_command('apt-get install -y ' + debs,
'Unable to install required .debs!')
assert_command('apt-get remove --purge -y ' + blacklist_debs,
'Unable to uninstall blacklisted .debs!')
def configure_puppet_external_facts():
"""
Create external Facts from misc RightInputs.
"""
if 'PUPPET_CUSTOM_FACTS' in environ:
# take the envvar apart and reconstitute as dict
validate_env('PUPPET_CUSTOM_FACTS', '^\w+=.+(,\w+=.+)*$')
fact_dict = {}
facts = environ['PUPPET_CUSTOM_FACTS'].split(',')
for fact in facts:
log_and_stdout("fact: {}".format(str(fact)))
(key, value) = fact.split('=', 1)
fact_dict[key] = value
# construct some YAML and dump it into external fact file
try:
mkdir_p('/etc/facter/facts.d')
with open('/etc/facter/facts.d/nextdoor_misc_rightscale_inputs.yaml', 'w') as outfile:
outfile.write(
yaml.dump(fact_dict, explicit_start=True, default_flow_style=False))
except IOError as e:
sys.exit(" *** {} :: {} :: {} *** ".format(e.errno,
e.filename, e.strerror))
def bootstrap_puppet_agent_config():
"""
Adjust various settings in puppet.conf and create an external Facts file
for Puppet-specific stuff.
"""
dmc = '^.+$'
for key, regex in {
'PUPPET_ENVIRONMENT_NAME': dmc,
'PUPPET_SERVER_HOSTNAME': dmc,
'PUPPET_CA_SERVER': dmc,
'PUPPET_ENABLE_REPORTS': '^(true|false)$',
'PUPPET_NODE_NAME': '^(cert|facter)$',
'PUPPET_NODE_NAME_FACT': dmc
}.items():
validate_env(key, regex)
#
# The logic around how a node is identified based on:
#
# * puppet_node_name
# * puppet_node_name_fact
# * puppet_node_name_value
#
# is fairly ridiculous. I think the easiest way to document
# the madness which follows is to draw a table.
#
# Those familiar with paleo-Puppet node classification techniques will
# note that the above variables are just puppet.conf settings prefixed
# with 'puppet_'.
#
# | puppet_node_name | puppet_node_name_fact | puppet_node_name_value |
# |-------------------------------------------------------------------|
# | cert | $ facter hostname | not referenced |
# |-------------------------------------------------------------------|
# | facter | if 'puppet_node' | ^.+$ aka not nuthin' |
# | |------------------------------------------------|
# | | if ! 'puppet_node' | not referenced |
# ---------------------------------------------------------------------
#
puppet_node_name = environ['PUPPET_NODE_NAME']
puppet_node_name_fact = environ['PUPPET_NODE_NAME_FACT']
puppet_node_name_value = ''
log_and_stdout(
"Puppet node name resolution:: puppet_node_name: {}".format(puppet_node_name))
if 'cert' == puppet_node_name:
if 'hostname' != puppet_node_name_fact:
log_and_stdout(
"{} => {} forced to {} => {}".format(
'node_name_fact', puppet_node_name,
'node_name_fact', 'hostname'))
puppet_node_name_fact = 'hostname'
if 'facter' == puppet_node_name:
if 'puppet_node' == puppet_node_name_fact:
validate_env('PUPPET_NODE_NAME_VALUE', dmc)
puppet_node_name_value = environ['PUPPET_NODE_NAME_VALUE']
log_and_stdout(
"puppet_node => {}".format(puppet_node_name_value))
else:
if '' != puppet_node_name_value:
log_and_stdout(
"Ignoring PUPPET_NODE_NAME_VALUE because PUPPET_NAME_FACT != 'puppet_node'")
#
# If all of the validations and fiddling about with puppet_node has
# worked out then let's update the puppet.conf and some external facts.
#
# puppet.conf settings
puppet_settings = {
'environment': environ['PUPPET_ENVIRONMENT_NAME'],
'server': environ['PUPPET_SERVER_HOSTNAME'],
'ca_server': environ['PUPPET_CA_SERVER'],
'report': environ['PUPPET_ENABLE_REPORTS'],
'node_name': puppet_node_name,
'node_name_fact': puppet_node_name_fact,
}
external_facts = {
'puppet_environment': environ['PUPPET_ENVIRONMENT_NAME'],
'puppet_server': environ['PUPPET_SERVER_HOSTNAME'],
'puppet_ca_server': environ['PUPPET_CA_SERVER'],
}
if 'cert' == puppet_node_name:
pass # just here for completeness and transparency
elif 'facter' == puppet_node_name:
if 'puppet_node' == puppet_node_name_fact:
# This could live in puppet.conf as node_name_value but this
# makes it visible via 'facter puppet_node'.
external_facts['puppet_node'] = "{}|{}".format(
puppet_node_name_value, getfqdn())
else:
pass # this here for completeness and transparency
for setting, value in puppet_settings.items():
assert_command(
'/usr/bin/puppet config set {} {} --section agent'.format(
setting, value),
'Failed to set \'{}\' to \'{}\' in puppet.conf!'.format(setting, value))
# Drop some external Facts for Puppet settings
try:
mkdir_p('/etc/facter/facts.d')
with open('/etc/facter/facts.d/nextdoor_puppet.yaml', 'w') as outfile:
outfile.write(
yaml.dump(external_facts,
explicit_start=True,
default_flow_style=False))
except IOError as e:
sys.exit(" *** {} :: {} :: {} *** ".format(
e.errno,
e.filename,
e.strerr))
def install_puppet_agent():
"""
Install the Puppet agent repo and packages.
"""
validate_env('PUPPET_AGENT_VERSION', '^([\w\.\-]+|PC\d+)$')
puppet_version = environ['PUPPET_AGENT_VERSION'].lower()
puppet_repo_package = 'puppetlabs-release-trusty.deb'
puppet_repo_package_url = 'https://apt.puppetlabs.com/' + puppet_repo_package
assert_command("wget -c {}".format(puppet_repo_package_url),
'Failed to fetch Puppet repo package!', cwd='/tmp')
assert_command("dpkg -i {}".format(puppet_repo_package),
'Failed to install Puppet repo package!', cwd='/tmp')
assert_command('apt-get update', "Failed to refresh apt cache!")
assert_command("apt-get install -y puppet-common={} puppet={}".format(
puppet_version, puppet_version), 'Failed to install Puppet!')
def puppet_agent_bootstrapped():
"""
Predicate to detect if Puppet has already been installed.
Returns: boolean True or False
"""
classification_data = '/var/lib/puppet/state/catalog.txt'
# classes.txt only gets dropped on a successful Puppet run.
if (os.path.exists(classification_data)):
return True
else:
return False
def create_rightscale_puppet_tags(secret):
"""
Create the RightScale tags used for Puppet master auto-signing.
"""
validate_env('RS_SELF_HREF', '^.+$')
for tag in ['nd:puppet_state=waiting', "nd:puppet_secret={}".format(secret)]:
cmd = "rsc --rl10 cm15 multi_add /api/tags/multi_add resource_hrefs[]={} tags[]={}".format(
environ['RS_SELF_HREF'], tag)
assert_command(
cmd, "Failed to register RightScale tag \'{}\' for Puppet policy-base signing!".format(tag))
def create_puppet_agent_cert():
"""
Embed Nextdoor information into the Puppet agent CSR/cert.
"""
challenge_password = False
preshared_key = ''.join(random.choice(
string.ascii_letters + string.digits) for _ in range(36))
if "PUPPET_CHALLENGE_PASSWORD" in environ:
validate_env('PUPPET_CHALLENGE_PASSWORD', '^.+$')
challenge_password = environ['PUPPET_CHALLENGE_PASSWORD']
csr_attrs = {'extension_requests': {'pp_preshared_key': preshared_key}}
if challenge_password:
csr_attrs['custom_attributes'] = {
'1.2.840.113549.1.9.7': challenge_password}
try:
with open('/etc/puppet/csr_attributes.yaml', 'wb') as outfile:
outfile.write(
yaml.dump(csr_attrs, explicit_start=True,
default_flow_style=False, encoding='utf-8'))
os.chmod('/etc/puppet/csr_attributes.yaml', 0o644)
except (IOError, OSError) as e:
sys.exit(" *** {} :: {} :: {} *** ".format(e.errno,
e.filename, e.strerror))
create_rightscale_puppet_tags(preshared_key)
def run_puppet_agent():
"""
Kick off a Puppet agent run. With retries to cover eventual convergence.
"""
cmd = "/usr/bin/puppet agent -t --detailed-exitcodes --waitforcert 15"
# These are likely set in puppet.conf before the Puppet agent run however
# its entirely possible that a run will change the contents of puppet.conf
# but not represent a complete convergence. On follow-up runs we thus cannot
# rely on the values specified in puppet.conf. FIXME: make sure all node profiles
# converge on first run. ;)
dmc = '^.+$' # don't much care
for key, value in {
'PUPPET_ENVIRONMENT_NAME': 'environment',
'PUPPET_SERVER_HOSTNAME': 'server',
'PUPPET_CA_SERVER': 'ca_server',
'PUPPET_AGENT_USE_CACHED_CATALOG': 'use_cached_catalog',
}.items():
if key in environ:
validate_env(key, dmc)
cmd = ''.join((cmd, " --{} {}".format(value, environ[key])))
assert_command(cmd, 'Puppet run failed!', retries=5)
def configure_puppet_agent():
"""
Encode various settings in puppet.conf and setup Nextdoor external Facts.
"""
configure_puppet_external_facts()
bootstrap_puppet_agent_config()
def clean_rightscale_tags():
"""
Upon succcesful Puppet convergence, remove the pre-shared key tag used
for autosigning and flip Puppet state tag from 'waiting' to 'signed'.
"""
assert_command("puppet resource rs_tag nd:puppet_state value='signed'",
"Failed when flipping nd:puppet_state value to 'signed'.")
assert_command("puppet resource rs_tag nd:puppet_secret ensure=absent",
"Failed when removing nd:puppet_secret!")
def adjust_hostname_and_domain():
"""
We assume the FQDN has previously been set in /etc/hostname but that
doesn't necessarily mean that required adjustments have been made to
/etc/hosts to align with our needs for DNS domain name. Let's idempotently
make those adjustments.
"""
validate_env('DEFAULT_DOMAIN', '^.+$')
mydomain = environ['DEFAULT_DOMAIN']
log_and_stdout(
"Adjusting domain name for Puppet's use: {}".format(mydomain))
try:
with NamedTemporaryFile() as puppet_code:
code = """
host {{ \"${{::hostname}}.{}\":
ensure => present,
ip => $::ipaddress,
host_aliases => $::hostname,
}}
""".format(mydomain)
puppet_code.write(bytes(code, 'UTF-8'))
puppet_code.flush()
assert_command("puppet apply {}".format(puppet_code.name),
'Failed to munge /etc/hosts entry for correct FQDN!')
except IOError as e:
log_and_stdout("Puppet code tmpfile failed: {}".format(e.value))
finally:
puppet_code.close()
def main():
"""
The Fun Starts Here.
"""
detect_debug_mode()
if not puppet_agent_bootstrapped():
install_dependencies()
install_puppet_agent()
adjust_hostname_and_domain()
configure_puppet_agent()
create_puppet_agent_cert()
run_puppet_agent()
clean_rightscale_tags()
else:
log_and_stdout(
" *** Puppet probably bootstrapped previously. Exiting... *** ")
if '__main__' == __name__:
main()
| {
"content_hash": "0c2f39018253dd499fa59c03977f0b77",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 104,
"avg_line_length": 35.63661202185792,
"alnum_prop": 0.5793145748677452,
"repo_name": "Nextdoor/rightscale_rightlink10_rightscripts",
"id": "ddbea3f8971f523f73daf0893161f505baf0b13d",
"size": "13496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nextdoor/puppet-install.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78894"
},
{
"name": "Ruby",
"bytes": "15367"
},
{
"name": "Shell",
"bytes": "19443"
}
],
"symlink_target": ""
} |
import unittest
from RetweetFilter import RetweetFilter
class TestRetweetFilter(unittest.TestCase):
def testTweetCacheFilter(self):
retweetFilter = RetweetFilter()
firstTweet = retweetFilter.checkAndCacheTweet("I like to play with my cat, my cat is cute")
self.assertEqual(firstTweet, True)
identicalTweet = retweetFilter.checkAndCacheTweet("I like to play with my cat, my cat is cute")
self.assertEqual(identicalTweet, False)
similarTweet = retweetFilter.checkAndCacheTweet("my cat is cute, and will rule the world someday")
self.assertEqual(similarTweet, True)
#for i in range((10 ** 4) * 2):
#self.assertEqual(retweetFilter.checkAndCacheTweet("my cat is cute, and will rule the world someday"), False)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "47914b939b610b12d152504f2e7cdde7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 112,
"avg_line_length": 34.04347826086956,
"alnum_prop": 0.7509578544061303,
"repo_name": "Lily418/CatHack",
"id": "3c92defc500ffbe240ceb64664a9d7287705e203",
"size": "783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/RetweetFilterTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "629"
},
{
"name": "JavaScript",
"bytes": "98672"
},
{
"name": "Python",
"bytes": "7882"
}
],
"symlink_target": ""
} |
import platform, json, os
from utilities import printing_chdir, printing_mkdir, do_cmake
def generate_linux():
with open("settings.json") as settings_file:
settings_json = json.loads(settings_file.read())
current_directory = os.getcwd()
gcc_install_location = settings_json["gcc_install_location"]
gcc_install_location = os.path.abspath(gcc_install_location)
clang_install_location = settings_json["clang_install_location"]
clang_install_location = os.path.abspath(clang_install_location)
bandit_include_path = settings_json["bandit_include_path"]
bandit_include_path = os.path.abspath(bandit_include_path)
build_location = settings_json["build_location"]
build_location = os.path.abspath(build_location)
install_path = settings_json["install_path"]
install_path = os.path.abspath(install_path)
boehm_path = settings_json["boehm_path"]
boehm_path = os.path.abspath(boehm_path)
boost_include_path = settings_json["boost_include_path"]
printing_mkdir(build_location+'/cgc1/')
printing_chdir(build_location+'/cgc1/')
ddict = {}
ddict["BOEHM_LIB"]=boehm_path+'/lib'
ddict["BOEHM_INCLUDE"]=boehm_path+'/include'
ddict["CMAKE_C_COMPILER"] = gcc_install_location+'/gcc'
ddict["CMAKE_CXX_COMPILER"] = gcc_install_location+'/g++'
ddict["CMAKE_CXX_FLAGS"] = "-DMCPPALLOC_THREAD_SAFETY "
ddict["BANDIT_INCLUDE_PATH"] = bandit_include_path
ddict["BOOST_INCLUDEDIR"] = boost_include_path
ddict["CMAKE_INSTALL_PREFIX"] = install_path
ddict["CMAKE_BUILD_TYPE"]="RelWithDebInfo"
printing_mkdir("unixmake_gcc_release")
printing_chdir("unixmake_gcc_release")
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_gcc_debug")
printing_chdir("unixmake_gcc_debug")
ddict["CMAKE_BUILD_TYPE"]="Debug"
do_cmake(current_directory, "Ninja", ddict)
ddict["CMAKE_C_COMPILER"] = clang_install_location+'/clang'
ddict["CMAKE_CXX_COMPILER"] = clang_install_location+'/clang++'
ddict["CMAKE_CXX_FLAGS"] = "-DMCPPALLOC_THREAD_SAFETY -stdlib=libc++"
printing_chdir("../")
printing_mkdir("unixmake_clang_release")
printing_chdir("unixmake_clang_release")
ddict["CMAKE_BUILD_TYPE"]="RelWithDebInfo"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_debug")
printing_chdir("unixmake_clang_debug")
ddict["CMAKE_BUILD_TYPE"]="Debug"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_debug_undef_san")
printing_chdir("unixmake_clang_debug_undef_san")
ddict["CMAKE_BUILD_TYPE"]="Debug"
ddict["CMAKE_CXX_FLAGS"] = "-fsanitize=undefined -fsanitize=integer -fno-omit-frame-pointer -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES -stdlib=libc++"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_release_undef_san")
printing_chdir("unixmake_clang_release_undef_san")
ddict["CMAKE_BUILD_TYPE"]="RelWithDebInfo"
ddict["CMAKE_CXX_FLAGS"] = "-fsanitize=undefined -fsanitize=integer -fno-omit-frame-pointer -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES -stdlib=libc++"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_debug_address_san")
printing_chdir("unixmake_clang_debug_address_san")
ddict["CMAKE_BUILD_TYPE"]="Debug"
ddict["CMAKE_CXX_FLAGS"] = "-fsanitize=address -fsanitize-address-use-after-scope -fno-omit-frame-pointer -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES -stdlib=libc++"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_release_address_san")
printing_chdir("unixmake_clang_release_address_san")
ddict["CMAKE_BUILD_TYPE"]="RelWithDebInfo"
ddict["CMAKE_CXX_FLAGS"] = "-fsanitize=address -fsanitize-address-use-after-scope -fno-omit-frame-pointer -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES -stdlib=libc++"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_debug_thread_san")
printing_chdir("unixmake_clang_debug_thread_san")
ddict["CMAKE_BUILD_TYPE"]="Debug"
ddict["CMAKE_CXX_FLAGS"] = "-fsanitize=thread -fno-omit-frame-pointer -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES -stdlib=libc++"
do_cmake(current_directory, "Ninja", ddict)
printing_chdir("../")
printing_mkdir("unixmake_clang_release_thread_san")
printing_chdir("unixmake_clang_release_thread_san")
ddict["CMAKE_BUILD_TYPE"]="RelWithDebInfo"
ddict["CMAKE_CXX_FLAGS"] = "-fsanitize=thread -fno-omit-frame-pointer -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES -stdlib=libc++"
do_cmake(current_directory, "Ninja", ddict)
ddict["CMAKE_C_COMPILER"] = gcc_install_location+'/gcc'
ddict["CMAKE_CXX_COMPILER"] = gcc_install_location+'/g++'
ddict["CMAKE_CXX_FLAGS"] = "-O0 -fno-inline -fprofile-arcs -ftest-coverage -DMCPPALLOC_NO_INLINES"
ddict["CMAKE_EXE_LINKER_FLAGS"] ="-fprofile-arcs -ftest-coverage"
ddict["CMAKE_SHARED_LINKER_FLAGS"]="-fprofile-arcs -ftest-coverage"
ddict["CMAKE_MODULE_LINKER_FLAGS"]="-fprofile-arcs -ftest-coverage"
printing_chdir("../")
printing_mkdir("unixmake_gcc_gcov")
printing_chdir("unixmake_gcc_gcov")
do_cmake(current_directory, "Ninja", ddict)
generate_linux()
| {
"content_hash": "e6337c49c1af5c0197c89dffbcaf1c4b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 191,
"avg_line_length": 49.8141592920354,
"alnum_prop": 0.6931959495469888,
"repo_name": "DanGrayson/cgc1",
"id": "1464e85395a1c6c0cc68258044ed8b73c31443ad",
"size": "5719",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "generate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5609"
},
{
"name": "C++",
"bytes": "235292"
},
{
"name": "CMake",
"bytes": "6231"
},
{
"name": "Python",
"bytes": "14580"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
"""Support for Joaoapps Join services."""
import logging
from pyjoin import (
get_devices,
ring_device,
send_file,
send_notification,
send_sms,
send_url,
set_wallpaper,
)
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "joaoapps_join"
CONF_DEVICE_ID = "device_id"
CONF_DEVICE_IDS = "device_ids"
CONF_DEVICE_NAMES = "device_names"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_DEVICE_IDS): cv.string,
vol.Optional(CONF_DEVICE_NAMES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
def register_device(hass, api_key, name, device_id, device_ids, device_names):
"""Register services for each join device listed."""
def ring_service(service):
"""Service to ring devices."""
ring_device(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
)
def set_wallpaper_service(service):
"""Service to set wallpaper on devices."""
set_wallpaper(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_file_service(service):
"""Service to send files to devices."""
send_file(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_url_service(service):
"""Service to open url on devices."""
send_url(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_tasker_service(service):
"""Service to open url on devices."""
send_notification(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
text=service.data.get("command"),
)
def send_sms_service(service):
"""Service to send sms from devices."""
send_sms(
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
sms_number=service.data.get("number"),
sms_text=service.data.get("message"),
api_key=api_key,
)
hass.services.register(DOMAIN, name + "ring", ring_service)
hass.services.register(DOMAIN, name + "set_wallpaper", set_wallpaper_service)
hass.services.register(DOMAIN, name + "send_sms", send_sms_service)
hass.services.register(DOMAIN, name + "send_file", send_file_service)
hass.services.register(DOMAIN, name + "send_url", send_url_service)
hass.services.register(DOMAIN, name + "send_tasker", send_tasker_service)
def setup(hass, config):
"""Set up the Join services."""
for device in config[DOMAIN]:
api_key = device.get(CONF_API_KEY)
device_id = device.get(CONF_DEVICE_ID)
device_ids = device.get(CONF_DEVICE_IDS)
device_names = device.get(CONF_DEVICE_NAMES)
name = device.get(CONF_NAME)
name = name.lower().replace(" ", "_") + "_" if name else ""
if api_key:
if not get_devices(api_key):
_LOGGER.error("Error connecting to Join, check API key")
return False
if device_id is None and device_ids is None and device_names is None:
_LOGGER.error(
"No device was provided. Please specify device_id"
", device_ids, or device_names"
)
return False
register_device(hass, api_key, name, device_id, device_ids, device_names)
return True
| {
"content_hash": "06e14e3a90ee09973249141143038e9b",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 81,
"avg_line_length": 30.905797101449274,
"alnum_prop": 0.5671746776084408,
"repo_name": "Teagan42/home-assistant",
"id": "10cbcf6b5c0684a66b889be9375c4a3cc8887319",
"size": "4265",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/joaoapps_join/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from tmc import db
article_tags = db.Table('tags_joiner',
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id')),
db.Column('article_id', db.Integer, db.ForeignKey('articles.id')),
db.PrimaryKeyConstraint('tag_id', 'article_id'))
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
tag = db.Column(db.String, primary_key=True)
blurb = db.Column(db.String)
articles = db.relationship('Article', secondary=article_tags, backref='articles', order_by="desc(Article.id)")
def __repr__(self):
return '<Tag: {}>'.format(self.tag)
class Article(db.Model):
__tablename__ = 'articles'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, unique=True)
url = db.Column(db.String, unique=True)
type = db.Column(db.Enum('blog', 'workshop', name='type'), nullable=False, default='blog', server_default='blog')
blurb = db.Column(db.String)
text = db.Column(db.String, nullable=False)
creation_date = db.Column(db.DateTime, server_default=db.func.now())
update_date = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
parent_id = db.Column(db.Integer, db.ForeignKey('articles.id'))
parent = db.relationship('Article', remote_side=[id], backref='children', order_by='Article.id')
tags = db.relationship('Tag', secondary=article_tags, backref='tags')
def __repr__(self):
return '<Article {}: {}>'.format(self.id, self.title)
def build_date_byline(self):
if self.creation_date < self.update_date:
return 'Published: {}; Updated {}' \
.format(self.creation_date.strftime("%d %B %Y"), self.update_date.strftime("%d %B %Y"))
else:
return 'Published: {}' \
.format(self.creation_date.strftime("%d %B %Y"))
| {
"content_hash": "9a0ec9f986f6d2e09df5c0e874980e2d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 117,
"avg_line_length": 45.23255813953488,
"alnum_prop": 0.6185089974293059,
"repo_name": "dougmiller/theMetaCity",
"id": "bf8da72d9a76e9315a4d4be09d89de646a5175f1",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmc/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28191"
},
{
"name": "HTML",
"bytes": "79220"
},
{
"name": "JavaScript",
"bytes": "26575"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "22241"
},
{
"name": "TypeScript",
"bytes": "21265"
}
],
"symlink_target": ""
} |
from warnings import warn
from rapidsms.conf import settings
from rapidsms.utils.modules import (find_python_files, get_class,
import_class, try_import)
from .exceptions import HandlerError
from .handlers.base import BaseHandler
def get_handlers():
"""
Return a list of the handler classes to use in the current project.
This is the classes whose names are listed in the RAPIDSMS_HANDLERS
setting, but if that's not set, then we fall back to the deprecated
behavior of returning all installed handlers, possibly modified by
the INSTALLED_HANDLERS and/or EXCLUDED_HANDLERS settings.
"""
if hasattr(settings, 'RAPIDSMS_HANDLERS'):
return [import_class(name) for name in settings.RAPIDSMS_HANDLERS]
warn("Please set RAPIDSMS_HANDLERS to the handlers that should "
"be installed. The old behavior of installing all defined "
"handlers, possibly modified by INSTALLED_HANDLERS and/or "
"EXCLUDED_HANDLERS, is deprecated and will be removed",
DeprecationWarning)
handlers = _find_handlers(_apps())
# if we're explicitly selecting handlers, filter out all those which
# are not matched by one (or more) prefixes in INSTALLED_HANDLERS.
if hasattr(settings, 'INSTALLED_HANDLERS') and \
settings.INSTALLED_HANDLERS is not None:
copy = [handler for handler in handlers]
handlers = []
while len(copy) > 0:
for prefix in settings.INSTALLED_HANDLERS:
if copy[-1].__module__.startswith(prefix):
handlers.append(copy[-1])
break
copy.pop()
# likewise, in reverse, for EXCLUDED_HANDLERS.
if hasattr(settings, 'EXCLUDED_HANDLERS') and \
settings.EXCLUDED_HANDLERS is not None:
for prefix in settings.EXCLUDED_HANDLERS:
handlers = [
handler for handler in handlers
if not handler.__module__.startswith(prefix)]
return handlers
def _find_handlers(app_names):
"""
Return a list of all handlers defined in ``app_names``.
"""
handlers = []
for module_name in app_names:
handlers.extend(_handlers(module_name))
return handlers
def _apps():
"""
Return a list of the apps which may contain handlers. This is not
quite as simple as returning ``settings.INSTALLED_APPS``, since:
1. This app (rapidsms.contrib.handlers) should be excluded, because
although it contains handlers, they are intended to be abstract,
not instantiated directly. (I think this is cleaner than marking
them explicitly.)
2. Django contrib apps should be excluded, because the "auth" app
has an unrelated "handlers" module. (If I'd noticed that when I
created this app, I may have named it differently. Sorry.)
3. If any other app defines a "handlers" module, it can be added
to settings.RAPIDSMS_HANDLERS_EXCLUDE_APPS to not be loaded
"""
def _in_exclusions(module_name):
settings_exclusions = getattr(settings,
"RAPIDSMS_HANDLERS_EXCLUDE_APPS", [])
return module_name == "rapidsms.contrib.handlers" \
or module_name.startswith("django.contrib.") \
or module_name in settings_exclusions
return [
module_name
for module_name in settings.INSTALLED_APPS
if not _in_exclusions(module_name)]
def _handlers(module_name):
"""
Return a list of handlers (subclasses of app.handlers.HandlerBase)
defined in the ``handlers`` directory of ``module_name``. Each
Python file is expected to contain a single new-style class, which
can be named arbitrarily. (But probably shouldn't be.)
Return an empty list if no handlers are defined, or the directory
can't be opened. All exceptions raised while importing handlers are
allowed to propagate, to avoid masking errors.
"""
handlers_module = try_import(
"%s.handlers" % module_name)
if handlers_module is None:
return []
if not hasattr(handlers_module, "__path__"):
raise HandlerError(
"Module %s must be a directory." % (handlers_module.__name__))
files = find_python_files(
handlers_module.__path__[0])
module_names = [
"%s.%s" % (handlers_module.__name__, file)
for file in files]
modules = [
try_import(mod_name)
for mod_name in module_names]
return [
get_class(mod, BaseHandler)
for mod in modules if mod]
| {
"content_hash": "93dbfbb80338afb7d6767f5c4c0cab91",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 75,
"avg_line_length": 34.417910447761194,
"alnum_prop": 0.6461405030355594,
"repo_name": "peterayeni/rapidsms",
"id": "c12f667e314059d57abbd698be698f6afd366f04",
"size": "4664",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "rapidsms/contrib/handlers/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27100"
},
{
"name": "HTML",
"bytes": "39126"
},
{
"name": "JavaScript",
"bytes": "16887"
},
{
"name": "Python",
"bytes": "349490"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
} |
from cvxpy.atoms.affine.diag import diag
from cvxpy.reductions.dgp2dcp.atom_canonicalizers.add_canon import add_canon
from cvxpy.reductions.dgp2dcp.util import explicit_sum
def trace_canon(expr, args):
diag_sum = explicit_sum(diag(args[0]))
return add_canon(diag_sum, diag_sum.args)
| {
"content_hash": "8511619c7516f575c5283dc62d76ef0b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 76,
"avg_line_length": 36.625,
"alnum_prop": 0.7747440273037542,
"repo_name": "merraksh/cvxpy",
"id": "eb9c0f48ff462e9dbc6e919ab555065be3832a3d",
"size": "293",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cvxpy/reductions/dgp2dcp/atom_canonicalizers/trace_canon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'tvenc.views.index', name='index'),
url(r'^recorded/$', 'tvenc.views.list_recorded', name='list_recorded'),
url(r'^api/', include('tvenc.api.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "4ade3a456e4fc337dba0c38d9120086a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.6675977653631285,
"repo_name": "chibiegg/tvenc",
"id": "2835a788d9c8a39c23b95c3c50833122e153df51",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tvenc/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16504"
},
{
"name": "Shell",
"bytes": "1619"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import os
from setuptools import find_packages, setup
version = __import__('logtailer').__version__
def read(fname):
# read the contents of a text file
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-logtailer",
version=version,
url='https://github.com/fireantology/django-logtailer',
license='BSD',
platforms=['OS Independent'],
description="Allows to read log files from disk with a tail like web "
"console on Django admin interface. ",
long_description=read('README.rst'),
author='Mauro Rocco',
author_email='fireantology@gmail.com',
packages=find_packages(),
install_requires=(
'Django>=1.8',
),
include_package_data=True,
zip_safe=False,
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| {
"content_hash": "e3ae5f30ea2d5c450d047469c3cb9642",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 74,
"avg_line_length": 33.074074074074076,
"alnum_prop": 0.5968645016797313,
"repo_name": "fireantology/django-logtailer",
"id": "8486a58f4c6af8bafcf14ae7218c13838617a5e4",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4600"
},
{
"name": "HTML",
"bytes": "3298"
},
{
"name": "JavaScript",
"bytes": "34513"
},
{
"name": "Python",
"bytes": "9285"
}
],
"symlink_target": ""
} |
"""
Builder for Nordic nRF51 series ARM microcontrollers.
"""
from os.path import join
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Default,
DefaultEnvironment, SConscript)
env = DefaultEnvironment()
SConscript(env.subst(join("$PIOBUILDER_DIR", "scripts", "basearm.py")))
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildProgram()
#
# Target: Build the .bin file
#
if "uploadlazy" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "firmware.bin")
else:
target_firm = env.ElfToBin(join("$BUILD_DIR", "firmware"), target_elf)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD")
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
upload = env.Alias(["upload", "uploadlazy"], target_firm, env.UploadToDisk)
AlwaysBuild(upload)
#
# Target: Define targets
#
Default([target_firm, target_size])
| {
"content_hash": "df4b67b511336ff4d3361938b90d6416",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 20.148936170212767,
"alnum_prop": 0.6948257655755016,
"repo_name": "mcanthony/platformio",
"id": "caed2f55874d6ac7084d074f3f77baa1398228ec",
"size": "1022",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "platformio/builder/scripts/nordicnrf51.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1897"
},
{
"name": "PowerShell",
"bytes": "2734"
},
{
"name": "Python",
"bytes": "249827"
},
{
"name": "Smarty",
"bytes": "32041"
}
],
"symlink_target": ""
} |
"""Abstract classes."""
from __future__ import absolute_import, unicode_literals
import abc
from collections import Callable
from .five import with_metaclass
__all__ = ['Thenable']
@with_metaclass(abc.ABCMeta)
class Thenable(Callable): # pragma: no cover
"""Object that supports ``.then()``."""
__slots__ = ()
@abc.abstractmethod
def then(self, on_success, on_error=None):
raise NotImplementedError()
@abc.abstractmethod
def throw(self, exc=None, tb=None, propagate=True):
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
raise NotImplementedError()
@classmethod
def __subclasshook__(cls, C):
if cls is Thenable:
if any('then' in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
@classmethod
def register(cls, other):
# overide to return other so `register` can be used as a decorator
type(cls).register(cls, other)
return other
@Thenable.register
class ThenableProxy(object):
"""Proxy to object that supports ``.then()``."""
def _set_promise_target(self, p):
self._p = p
def then(self, on_success, on_error=None):
return self._p.then(on_success, on_error)
def cancel(self):
return self._p.cancel()
def throw1(self, exc=None):
return self._p.throw1(exc)
def throw(self, exc=None, tb=None, propagate=True):
return self._p.throw(exc, tb=tb, propagate=propagate)
@property
def cancelled(self):
return self._p.cancelled
@property
def ready(self):
return self._p.ready
@property
def failed(self):
return self._p.failed
| {
"content_hash": "7856431aa0968ee171d91a417e997ed4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 74,
"avg_line_length": 23.283783783783782,
"alnum_prop": 0.620429483459083,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "5c8d381e9e86154f14c09165805343d2eb1cd414",
"size": "1723",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/vine/abstract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
import pytest
from airflow import DAG
from airflow.exceptions import AirflowException
from airflow.sensors.sql_sensor import SqlSensor
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_sql_dag'
class TestSqlSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
def test_unsupported_conn_type(self):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='redis_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
with self.assertRaises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@pytest.mark.backend("mysql")
def test_sql_sensor_mysql(self):
op1 = SqlSensor(
task_id='sql_sensor_check',
conn_id='mysql_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op2 = SqlSensor(
task_id='sql_sensor_check',
conn_id='mysql_default',
sql="SELECT count(%s) FROM INFORMATION_SCHEMA.TABLES",
parameters=["table_name"],
dag=self.dag
)
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@pytest.mark.backend("postgres")
def test_sql_sensor_postgres(self):
op1 = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op2 = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT count(%s) FROM INFORMATION_SCHEMA.TABLES",
parameters=["table_name"],
dag=self.dag
)
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[None]]
self.assertFalse(op.poke(None))
mock_get_records.return_value = [['None']]
self.assertTrue(op.poke(None))
mock_get_records.return_value = [[0.0]]
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[0]]
self.assertFalse(op.poke(None))
mock_get_records.return_value = [['0']]
self.assertTrue(op.poke(None))
mock_get_records.return_value = [['1']]
self.assertTrue(op.poke(None))
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_fail_on_empty(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
fail_on_empty=True
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_success(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
success=lambda x: x in [1]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertTrue(op.poke(None))
mock_get_records.return_value = [['1']]
self.assertFalse(op.poke(None))
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_failure(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=lambda x: x in [1]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_failure_success(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=lambda x: x in [1],
success=lambda x: x in [2]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
mock_get_records.return_value = [[2]]
self.assertTrue(op.poke(None))
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_failure_success_same(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=lambda x: x in [1],
success=lambda x: x in [1]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_invalid_failure(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=[1],
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_invalid_success(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
success=[1],
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
| {
"content_hash": "5ad5c6ba896d3c69fb0bcb2b17f659cf",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 98,
"avg_line_length": 35.05701754385965,
"alnum_prop": 0.6085324659076692,
"repo_name": "wileeam/airflow",
"id": "cc228e14e3b8713540906fb728647bcc764ac545",
"size": "8780",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sensors/test_sql_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from oslo_log import log
from neutron._i18n import _
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
class LoggerMechanismDriver(api.MechanismDriver):
"""Mechanism driver that logs all calls and parameters made.
Generally used for testing and debugging.
"""
def initialize(self):
pass
def _log_network_call(self, method_name, context):
LOG.info(_("%(method)s called with network settings %(current)s "
"(original settings %(original)s) and "
"network segments %(segments)s"),
{'method': method_name,
'current': context.current,
'original': context.original,
'segments': context.network_segments})
def create_network_precommit(self, context):
self._log_network_call("create_network_precommit", context)
def create_network_postcommit(self, context):
self._log_network_call("create_network_postcommit", context)
def update_network_precommit(self, context):
self._log_network_call("update_network_precommit", context)
def update_network_postcommit(self, context):
self._log_network_call("update_network_postcommit", context)
def delete_network_precommit(self, context):
self._log_network_call("delete_network_precommit", context)
def delete_network_postcommit(self, context):
self._log_network_call("delete_network_postcommit", context)
def _log_subnet_call(self, method_name, context):
LOG.info(_("%(method)s called with subnet settings %(current)s "
"(original settings %(original)s)"),
{'method': method_name,
'current': context.current,
'original': context.original})
def create_subnet_precommit(self, context):
self._log_subnet_call("create_subnet_precommit", context)
def create_subnet_postcommit(self, context):
self._log_subnet_call("create_subnet_postcommit", context)
def update_subnet_precommit(self, context):
self._log_subnet_call("update_subnet_precommit", context)
def update_subnet_postcommit(self, context):
self._log_subnet_call("update_subnet_postcommit", context)
def delete_subnet_precommit(self, context):
self._log_subnet_call("delete_subnet_precommit", context)
def delete_subnet_postcommit(self, context):
self._log_subnet_call("delete_subnet_postcommit", context)
def _log_port_call(self, method_name, context):
network_context = context.network
LOG.info(_("%(method)s called with port settings %(current)s "
"(original settings %(original)s) "
"host %(host)s "
"(original host %(original_host)s) "
"vif type %(vif_type)s "
"(original vif type %(original_vif_type)s) "
"vif details %(vif_details)s "
"(original vif details %(original_vif_details)s) "
"binding levels %(levels)s "
"(original binding levels %(original_levels)s) "
"on network %(network)s "
"with segments to bind %(segments_to_bind)s"),
{'method': method_name,
'current': context.current,
'original': context.original,
'host': context.host,
'original_host': context.original_host,
'vif_type': context.vif_type,
'original_vif_type': context.original_vif_type,
'vif_details': context.vif_details,
'original_vif_details': context.original_vif_details,
'levels': context.binding_levels,
'original_levels': context.original_binding_levels,
'network': network_context.current,
'segments_to_bind': context.segments_to_bind})
def create_port_precommit(self, context):
self._log_port_call("create_port_precommit", context)
def create_port_postcommit(self, context):
self._log_port_call("create_port_postcommit", context)
def update_port_precommit(self, context):
self._log_port_call("update_port_precommit", context)
def update_port_postcommit(self, context):
self._log_port_call("update_port_postcommit", context)
def delete_port_precommit(self, context):
self._log_port_call("delete_port_precommit", context)
def delete_port_postcommit(self, context):
self._log_port_call("delete_port_postcommit", context)
def bind_port(self, context):
self._log_port_call("bind_port", context)
| {
"content_hash": "30fcfb80d731cc92097e27d23b1b8cad",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 73,
"avg_line_length": 40.495726495726494,
"alnum_prop": 0.6072182355424229,
"repo_name": "dims/neutron",
"id": "93b1f53ff5168c3c366e61348cc4924b8648e4e9",
"size": "5378",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8048836"
},
{
"name": "Shell",
"bytes": "14802"
}
],
"symlink_target": ""
} |
"""Implementation of compile_html based on odfpy.
You will need, of course, to install odfpy
"""
import os
import io
import shutil
import lxml.etree as etree
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing
try:
from odf.odf2xhtml import ODF2XHTML
except ImportError:
ODF2XHTML = None
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # NOQA
class CompileODT(PageCompiler):
"""Compile ODT into HTML."""
name = "odt"
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
if ODF2XHTML is None:
req_missing(['odfpy'], 'build this site (compile odt)')
odhandler = ODF2XHTML(True, False)
data = odhandler.odf2xhtml(source)
# Take the CSS from the head and put it in body
doc = etree.fromstring(data)
body = doc.find('{http://www.w3.org/1999/xhtml}body')
for style in doc.findall('*//{http://www.w3.org/1999/xhtml}style'):
style.getparent().remove(style)
# keep only classes:
filtered = []
for line in style.text.splitlines():
if line and line[0] in '.\t}':
filtered.append(line)
style.text = ''.join(filtered)
body.insert(0, style)
with io.open(dest, 'w+', encoding='utf-8') as outf:
outf.write(etree.tostring(body, encoding='unicode'))
def compile_html(self, source, dest, is_two_file=True):
"""Compile the post into HTML (deprecated API)."""
try:
post = self.site.post_per_input_file[source]
except KeyError:
post = None
return compile(source, dest, is_two_file, post, None)
def create_post(self, path, **kw):
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
if onefile:
raise Exception('The one-file format is not supported by this compiler.')
shutil.copyfile(os.path.join(os.path.dirname(__file__), 'empty.odt'), path)
| {
"content_hash": "30a2f4cda14358ba00a72dc5df824f94",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 85,
"avg_line_length": 30.27027027027027,
"alnum_prop": 0.6133928571428572,
"repo_name": "getnikola/plugins",
"id": "aab3e94cfcbd8d15dab04e7f9a17c0b0a22186c0",
"size": "3377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v7/odt/odt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8729"
},
{
"name": "Emacs Lisp",
"bytes": "8804"
},
{
"name": "HTML",
"bytes": "2470"
},
{
"name": "JavaScript",
"bytes": "41087"
},
{
"name": "Python",
"bytes": "1157045"
},
{
"name": "TeX",
"bytes": "844"
}
],
"symlink_target": ""
} |
class GameHangman:
def __init__(self, word, total_tries=10):
self.word = word.lower()
self.right_letters = []
self.wrong_letters = []
self.total_tries = total_tries
self.tries = 0
@property
def tries_left(self):
return self.total_tries - self.tries
@property
def won(self):
return len(self.right_letters) == len(self.word)
@property
def lost(self):
return self.tries_left < 1
def guess(self, letter):
letter = letter.lower()
if letter in self.word:
if letter not in self.right_letters:
self.right_letters.extend(
[letter for x in range(self.word.count(letter))])
return True
else:
if letter not in self.wrong_letters:
self.wrong_letters.append(letter)
self.tries += 1
return False
def get_beautified_string(self):
return " ".join([letter if letter in self.right_letters else "\_" for letter in list(self.word)])
| {
"content_hash": "54efd63e45556dbd63c84ace7c57c3ec",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 105,
"avg_line_length": 29.054054054054053,
"alnum_prop": 0.5572093023255814,
"repo_name": "shikhir-arora/Giesela",
"id": "7e1ea3a8e6eb8d35a16b1c53c2af898d926567ca",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/documentation",
"path": "musicbot/games/game_hangman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1940"
},
{
"name": "Python",
"bytes": "581472"
}
],
"symlink_target": ""
} |
class Selector:
def __init__(self, t, ln):
import cssqc.parser
self.lineno = ln
if t is None:
self.text = []
else:
self.text = t
i = cssqc.parser.CSSQC.getInstance()
if i is not None:
i.register(self.__class__.__name__, self)
def __str__(self):
return ''.join(map(str, self.text))
def __len__(self):
return len(self.text)
def __eq__(self, other):
if type(self) != type(other):
return False
return self.text == other.text \
and self.lineno == other.lineno
def __repr__(self):
return '<Selector>\n ' + '\n '.join(map(repr, self.text)) + '\n</Selector>'
| {
"content_hash": "2939d97404220c0259b6ac792dda0425",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 89,
"avg_line_length": 26.964285714285715,
"alnum_prop": 0.47549668874172185,
"repo_name": "matematik7/CSSQC",
"id": "b172f1cb465d67d2842e9c3adeb35f91895cabfc",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cssyacc/selector.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111603"
},
{
"name": "Python",
"bytes": "199164"
}
],
"symlink_target": ""
} |
import sys
from collections import OrderedDict
from types import MappingProxyType, DynamicClassAttribute
__all__ = ['Enum', 'IntEnum', 'unique']
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, proto):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super().__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
"""
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super().__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicitly checks for it, but of course
# until EnumMeta finishes running the first time the Enum class doesn't exist.
# This is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = {k: classdict[k] for k in classdict._member_names}
for name in classdict._member_names:
del classdict[name]
# check for illegal enum names (any others?)
invalid_names = set(members) & {'mro', }
if invalid_names:
raise ValueError('Invalid enum member name: {0}'.format(
','.join(invalid_names)))
# create a default docstring if one has not been provided
if '__doc__' not in classdict:
classdict['__doc__'] = 'An enumeration.'
# create our new Enum type
enum_class = super().__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in definition order
enum_class._member_map_ = OrderedDict() # name->value map
enum_class._member_type_ = member_type
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = {a for b in bases for a in b.__dict__}
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
for member_name in classdict._member_names:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member._value_ == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if obj_method is not None and obj_method is class_method:
setattr(enum_class, name, enum_method)
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
enum_class.__new_member__ = __new__
enum_class.__new__ = Enum.__new__
return enum_class
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API:
`value` will be the name of the new class.
`names` should be either a string of white-space/comma delimited names
(values will start at `start`), or an iterator/mapping of name, value pairs.
`module` should be set to the module this class is being created in;
if it is not set, an attempt to find that module will be made, but if
it fails the class will not be picklable.
`qualname` should be set to the actual location this class can be found
at in its module; by default it is set to the global scope. If this is
not correct, unpickling will fail in some circumstances.
`type`, if set, will be mixed in as the first base class.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member._name_ in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super().__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name) from None
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __len__(cls):
return len(cls._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a read-only view of the internal mapping.
"""
return MappingProxyType(cls._member_map_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super().__setattr__(name, value)
def _create_(cls, class_name, names=None, *, module=None, qualname=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are incremented by 1 from `start`.
* An iterable of member names. Values are incremented by 1 from `start`.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value pairs.
"""
metacls = cls.__class__
bases = (cls, ) if type is None else (type, cls)
classdict = metacls.__prepare__(class_name, bases)
# special processing needed for names?
if isinstance(names, str):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], str):
names = [(e, i) for (i, e) in enumerate(names, start)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, str):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError) as exc:
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
if qualname is not None:
enum_class.__qualname__ = qualname
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __new_member__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __new_member__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __new_member__ before falling back to
# __new__
for method in ('__new_member__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in {
None,
None.__new__,
object.__new__,
Enum.__new__,
}:
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
class Enum(metaclass=EnumMeta):
"""Generic enumeration.
Derive from this class to define new enumerations.
"""
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member._value_ == value:
return member
raise ValueError("%r is not a valid %s" % (value, cls.__name__))
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__'] + added_behavior)
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self._value_
return cls.__format__(val, format_spec)
def __hash__(self):
return hash(self._name_)
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
# DynamicClassAttribute is used to provide access to the `name` and
# `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@DynamicClassAttribute
def name(self):
"""The name of the Enum member."""
return self._name_
@DynamicClassAttribute
def value(self):
"""The value of the Enum member."""
return self._value_
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = {name: value for name, value in source.items()
if filter(name)}
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator for enumerations ensuring unique member values."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
alias_details = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates])
raise ValueError('duplicate values found in %r: %s' %
(enumeration, alias_details))
return enumeration
| {
"content_hash": "54c647c1327692b4880eae10f278cd1d",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 99,
"avg_line_length": 38.91021126760563,
"alnum_prop": 0.5622370028505498,
"repo_name": "juanyaw/python",
"id": "c28f3452a75372a5a61c068e975ebd44ddd4540f",
"size": "22101",
"binary": false,
"copies": "10",
"ref": "refs/heads/develop",
"path": "cpython/Lib/enum.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "470920"
},
{
"name": "Batchfile",
"bytes": "35551"
},
{
"name": "C",
"bytes": "16518323"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "343272"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "254942"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "Makefile",
"bytes": "25026"
},
{
"name": "Objective-C",
"bytes": "1390263"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24911704"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""
Copyright 2015-2018 Jacob M. Graving <jgraving@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
import pickle
import glob
# disable multithreading in OpenCV for main thread
# to avoid problems after parallelization
cv2.setNumThreads(0)
class CameraCalibration:
def __init__(self, grid_shape=(9, 6)):
""" Class for calculating calibration parameters from calibration images
Parameters
----------
grid_shape : tuple of int
Shape of calibration grid (internal corners)
Returns
-------
self : class
CameraCalibration class instance.
"""
self.grid_shape = grid_shape
def calibrate(self, image_files, imshow=True, delay=500):
""" Calculates calibration parameters from calibration images
Parameters
----------
image_files : str
File path to images (e.g. "/path/to/files/*.jpg")
grid_shape : tuple of int
Size of calibration grid (internal corners)
imshow : bool, (default = True)
Show the calibration images
delay : int, >=1 (default = 500)
Delay in msecs between each image for imshow
Returns
-------
params : dict
Parameters for undistorting images.
"""
image_files = sorted(glob.glob(image_files))
# termination criteria
criteria = (
cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
300,
0.001
)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((self.grid_shape[1] * self.grid_shape[0], 3),
np.float32)
objp[:, :2] = np.mgrid[0:self.grid_shape[0],
0:self.grid_shape[1]].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for filename in image_files:
img = cv2.imread(filename)
if isinstance(img, None):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(
gray,
self.grid_shape,
None,
flags=(cv2.CALIB_CB_ADAPTIVE_THRESH +
cv2.CALIB_CB_FILTER_QUADS +
cv2.CALIB_CB_FAST_CHECK +
cv2.CALIB_CB_NORMALIZE_IMAGE))
# If found, add object points,
# image points (after refining them)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,
corners,
(11, 11),
(-1, -1),
criteria
)
imgpoints.append(corners2)
if imshow:
img = cv2.drawChessboardCorners(img,
self.grid_shape,
corners2,
ret
)
# Draw and display the corners
if imshow:
cv2.imshow('img', img)
cv2.waitKey(delay)
if imshow:
cv2.destroyAllWindows()
for i in range(5):
cv2.waitKey(1)
if len(objpoints) > 0 and len(imgpoints) > 0:
calibration = cv2.calibrateCamera(objpoints,
imgpoints,
gray.shape[::-1],
None, None)
ret, mtx, dist, rvecs, tvecs = calibration
params = {"ret": ret,
"mtx": mtx,
"dist": dist,
"rvecs": rvecs,
"tvecs": tvecs}
total_error = 0
for i in xrange(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i],
rvecs[i],
tvecs[i],
mtx,
dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)
error /= len(imgpoints2)
total_error += error
mean_error = total_error / len(objpoints)
print("Calibration successful! Mean error: ", mean_error)
self.params = params
self.mean_error = mean_error
self.ret = ret
self.mtx = mtx
self.dist = dist
self.rvecs = rvecs
self.tvecs = tvecs
else:
print("No calibration points found!")
self.params = None
return self
def undistort(self, image, crop=True):
""" Returns undistorted image using calibration parameters.
Parameters
----------
image : numpy_array
Image to be undistorted
params : dict
Calibration parameters
crop : bool
Crop the image to the optimal region of interest
Returns
-------
dst : numpy_array
Undistorted image.
"""
h, w = image.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
self.mtx, self.dist, (w, h), 1, (w, h))
# undistort
mapx, mapy = cv2.initUndistortRectifyMap(
self.mtx, self.dist, None, newcameramtx, (w, h), 5)
dst = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR)
# crop the image
if crop:
x, y, w, h = roi
dst = dst[y:y + h, x:x + w]
return dst
def save_calib(self, filename):
""" Saves calibration parameters as '.pkl' file.
Parameters
----------
filename : str
Path to save file, must be '.pkl' extension
Returns
-------
saved : bool
Saved successfully.
"""
if type(self.params) != dict:
raise TypeError("params must be 'dict'")
output = open(filename, 'wb')
pickle.dump(self.params, output, protocol=0)
output.close()
self.saved = True
return self.saved
def load_calib(self, filename):
""" Loads calibration parameters from '.pkl' file.
Parameters
----------
filename : str
Path to load file, must be '.pkl' extension
Returns
-------
params : dict
Parameters for undistorting images.
"""
# read python dict back from the file
pkl_file = open(filename, 'rb')
self.params = pickle.load(pkl_file)
pkl_file.close()
self.ret = self.params["ret"]
self.mtx = self.params["mtx"]
self.dist = self.params["dist"]
self.rvecs = self.params["rvecs"]
self.tvecs = self.params["tvecs"]
self.loaded = True
return self.loaded
| {
"content_hash": "8fc7b1020f4cd524c13bba31cbfa84bb",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 80,
"avg_line_length": 31.233962264150943,
"alnum_prop": 0.4670774435181829,
"repo_name": "jgraving/pinpoint",
"id": "9977df18404e2acd9ff0b1728bc507c73671eae9",
"size": "8277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinpoint/CameraCalibration.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1260711"
},
{
"name": "Python",
"bytes": "88341"
}
],
"symlink_target": ""
} |
r"""Create a dataset for training and evaluating the basic RNN model.
Example usage:
$ bazel build magenta/models/basic_rnn:basic_rnn_create_dataset
$ ./bazel-bin/magenta/models/basic_rnn/basic_rnn_create_dataset \
--input=/tmp/note_sequences.tfrecord \
--output_dir=/tmp/basic_rnn \
--eval_ratio=0.10
See /magenta/models/shared/melody_rnn_create_dataset.py for flag descriptions.
"""
# internal imports
import tensorflow as tf
from magenta.models.basic_rnn import basic_rnn_encoder_decoder
from magenta.models.shared import melody_rnn_create_dataset
def get_pipeline():
return melody_rnn_create_dataset.get_pipeline(
basic_rnn_encoder_decoder.MelodyEncoderDecoder())
def main(unused_argv):
melody_rnn_create_dataset.run_from_flags(get_pipeline())
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| {
"content_hash": "38afd838fbc820d4937324507e71981e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 25.257142857142856,
"alnum_prop": 0.7352941176470589,
"repo_name": "hanzorama/magenta",
"id": "8355267811a44de2263d9a986eb739c8e187650a",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magenta/models/basic_rnn/basic_rnn_create_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "9918"
},
{
"name": "Python",
"bytes": "628506"
},
{
"name": "Shell",
"bytes": "6299"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import sortedm2m.fields
import sortedm2m.operations
class Migration(migrations.Migration):
dependencies = [
('altersortedmanytomanyfield_tests', '0001_initial'),
]
operations = [
sortedm2m.operations.AlterSortedManyToManyField(
model_name='m2mtosortedm2m',
name='m2m',
field=sortedm2m.fields.SortedManyToManyField(help_text=None, to='altersortedmanytomanyfield_tests.Target'),
preserve_default=True,
),
sortedm2m.operations.AlterSortedManyToManyField(
model_name='sortedm2mtom2m',
name='m2m',
field=models.ManyToManyField(to='altersortedmanytomanyfield_tests.Target'),
preserve_default=True,
),
]
| {
"content_hash": "bbc77b36ded41ed8aad6104c0e7b9e0e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 119,
"avg_line_length": 30.925925925925927,
"alnum_prop": 0.6622754491017964,
"repo_name": "gradel/django-sortedm2m",
"id": "6d072596a80894b8df96b2a5ef1d81bbac2df81f",
"size": "859",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sortedm2m_tests/altersortedmanytomanyfield_tests/django17_migrations/0002_alter_m2m_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "904"
},
{
"name": "HTML",
"bytes": "1188"
},
{
"name": "JavaScript",
"bytes": "4039"
},
{
"name": "Makefile",
"bytes": "132"
},
{
"name": "Python",
"bytes": "81701"
}
],
"symlink_target": ""
} |
"""PValue, PCollection: one node of a dataflow graph.
A node of a dataflow processing graph is a PValue. Currently, there is only
one type: PCollection (a potentially very large set of arbitrary values).
Once created, a PValue belongs to a pipeline and has an associated
transform (of type PTransform), which describes how the value will be
produced when the pipeline gets executed.
"""
from __future__ import absolute_import
import collections
import itertools
from builtins import hex
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Generic
from typing import Iterator
from typing import Optional
from typing import Sequence
from typing import TypeVar
from typing import Union
from past.builtins import unicode
from apache_beam import typehints
from apache_beam.internal import pickler
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
if TYPE_CHECKING:
from apache_beam.transforms import sideinputs
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import Windowing
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
__all__ = [
'PCollection',
'TaggedOutput',
'AsSingleton',
'AsIter',
'AsList',
'AsDict',
'EmptySideInput',
]
T = TypeVar('T')
class PValue(object):
"""Base class for PCollection.
Dataflow users should not construct PValue objects directly in their
pipelines.
A PValue has the following main characteristics:
(1) Belongs to a pipeline. Added during object initialization.
(2) Has a transform that can compute the value if executed.
(3) Has a value which is meaningful if the transform was executed.
"""
def __init__(self,
pipeline, # type: Pipeline
tag=None, # type: Optional[str]
element_type=None, # type: Optional[type]
windowing=None, # type: Optional[Windowing]
is_bounded=True,
):
"""Initializes a PValue with all arguments hidden behind keyword arguments.
Args:
pipeline: Pipeline object for this PValue.
tag: Tag of this PValue.
element_type: The type of this PValue.
"""
self.pipeline = pipeline
self.tag = tag
self.element_type = element_type
# The AppliedPTransform instance for the application of the PTransform
# generating this PValue. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
self.is_bounded = is_bounded
if windowing:
self._windowing = windowing
def __str__(self):
return self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return "%s[%s.%s]" % (self.__class__.__name__,
self.producer.full_label if self.producer else None,
self.tag)
def apply(self, *args, **kwargs):
"""Applies a transform or callable to a PValue.
Args:
*args: positional arguments.
**kwargs: keyword arguments.
The method will insert the pvalue as the next argument following an
optional first label and a transform/callable object. It will call the
pipeline.apply() method with this modified argument list.
"""
arglist = list(args)
arglist.insert(1, self)
return self.pipeline.apply(*arglist, **kwargs)
def __or__(self, ptransform):
return self.pipeline.apply(ptransform, self)
class PCollection(PValue, Generic[T]):
"""A multiple values (potentially huge) container.
Dataflow users should not construct PCollection objects directly in their
pipelines.
"""
def __eq__(self, other):
if isinstance(other, PCollection):
return self.tag == other.tag and self.producer == other.producer
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.tag, self.producer))
@property
def windowing(self):
# type: () -> Windowing
if not hasattr(self, '_windowing'):
self._windowing = self.producer.transform.get_windowing(
self.producer.inputs)
return self._windowing
def __reduce_ex__(self, unused_version):
# Pickling a PCollection is almost always the wrong thing to do, but we
# can't prohibit it as it often gets implicitly picked up (e.g. as part
# of a closure).
return _InvalidUnpickledPCollection, ()
@staticmethod
def from_(pcoll):
# type: (PValue) -> PCollection
"""Create a PCollection, using another PCollection as a starting point.
Transfers relevant attributes.
"""
return PCollection(pcoll.pipeline, is_bounded=pcoll.is_bounded)
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.PCollection
return beam_runner_api_pb2.PCollection(
unique_name=self._unique_name(),
coder_id=context.coder_id_from_element_type(self.element_type),
is_bounded=beam_runner_api_pb2.IsBounded.BOUNDED
if self.is_bounded
else beam_runner_api_pb2.IsBounded.UNBOUNDED,
windowing_strategy_id=context.windowing_strategies.get_id(
self.windowing))
def _unique_name(self):
# type: () -> str
if self.producer:
return '%d%s.%s' % (
len(self.producer.full_label), self.producer.full_label, self.tag)
else:
return 'PCollection%s' % id(self)
@staticmethod
def from_runner_api(proto, context):
# type: (beam_runner_api_pb2.PCollection, PipelineContext) -> PCollection
# Producer and tag will be filled in later, the key point is that the
# same object is returned for the same pcollection id.
return PCollection(
None,
element_type=context.element_type_from_coder_id(proto.coder_id),
windowing=context.windowing_strategies.get_by_id(
proto.windowing_strategy_id),
is_bounded=proto.is_bounded == beam_runner_api_pb2.IsBounded.BOUNDED)
class _InvalidUnpickledPCollection(object):
pass
class PBegin(PValue):
"""A pipeline begin marker used as input to create/read transforms.
The class is used internally to represent inputs to Create and Read
transforms. This allows us to have transforms that uniformly take PValue(s)
as inputs.
"""
pass
class PDone(PValue):
"""PDone is the output of a transform that has a trivial result such as Write.
"""
pass
class DoOutputsTuple(object):
"""An object grouping the multiple outputs of a ParDo or FlatMap transform."""
def __init__(self,
pipeline, # type: Pipeline
transform, # type: ParDo
tags, # type: Sequence[str]
main_tag # type: Optional[str]
):
self._pipeline = pipeline
self._tags = tags
self._main_tag = main_tag
self._transform = transform
# The ApplyPTransform instance for the application of the multi FlatMap
# generating this value. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
# Dictionary of PCollections already associated with tags.
self._pcolls = {} # type: Dict[Optional[str], PValue]
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s main_tag=%s tags=%s transform=%s' % (
self.__class__.__name__, self._main_tag, self._tags, self._transform)
def __iter__(self):
# type: () -> Iterator[PValue]
"""Iterates over tags returning for each call a (tag, pvalue) pair."""
if self._main_tag is not None:
yield self[self._main_tag]
for tag in self._tags:
yield self[tag]
def __getattr__(self, tag):
# type: (str) -> PValue
# Special methods which may be accessed before the object is
# fully constructed (e.g. in unpickling).
if tag[:2] == tag[-2:] == '__':
return object.__getattr__(self, tag) # type: ignore
return self[tag]
def __getitem__(self, tag):
# type: (Union[int, str, None]) -> PValue
# Accept int tags so that we can look at Partition tags with the
# same ints that we used in the partition function.
# TODO(gildea): Consider requiring string-based tags everywhere.
# This will require a partition function that does not return ints.
if isinstance(tag, int):
tag = str(tag)
if tag == self._main_tag:
tag = None
elif self._tags and tag not in self._tags:
raise ValueError(
"Tag '%s' is neither the main tag '%s' "
"nor any of the tags %s" % (
tag, self._main_tag, self._tags))
# Check if we accessed this tag before.
if tag in self._pcolls:
return self._pcolls[tag]
assert self.producer is not None
if tag is not None:
self._transform.output_tags.add(tag)
pcoll = PCollection(self._pipeline, tag=tag, element_type=typehints.Any) # type: PValue
# Transfer the producer from the DoOutputsTuple to the resulting
# PCollection.
pcoll.producer = self.producer.parts[0]
# Add this as an output to both the inner ParDo and the outer _MultiParDo
# PTransforms.
if tag not in self.producer.parts[0].outputs:
self.producer.parts[0].add_output(pcoll, tag)
self.producer.add_output(pcoll, tag)
else:
# Main output is output of inner ParDo.
pcoll = self.producer.parts[0].outputs[None]
self._pcolls[tag] = pcoll
return pcoll
class TaggedOutput(object):
"""An object representing a tagged value.
ParDo, Map, and FlatMap transforms can emit values on multiple outputs which
are distinguished by string tags. The DoFn will return plain values
if it wants to emit on the main output and TaggedOutput objects
if it wants to emit a value on a specific tagged output.
"""
def __init__(self, tag, value):
# type: (str, Any) -> None
if not isinstance(tag, (str, unicode)):
raise TypeError(
'Attempting to create a TaggedOutput with non-string tag %s' % (tag,))
self.tag = tag
self.value = value
class AsSideInput(object):
"""Marker specifying that a PCollection will be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate how the PCollection should be made available
as a PTransform side argument (e.g. in the form of an iterable, mapping,
or single value). This class is the superclass of all the various
options, and should not be instantiated directly. (See instead AsSingleton,
AsIter, etc.)
"""
def __init__(self, pcoll):
# type: (PCollection) -> None
from apache_beam.transforms import sideinputs
self.pvalue = pcoll
self._window_mapping_fn = sideinputs.default_window_mapping_fn(
pcoll.windowing.windowfn)
def _view_options(self):
"""Internal options corresponding to specific view.
Intended for internal use by runner implementations.
Returns:
Tuple of options for the given view.
"""
return {'window_mapping_fn': self._window_mapping_fn}
@property
def element_type(self):
return typehints.Any
# TODO(robertwb): Get rid of _from_runtime_iterable and _view_options
# in favor of _side_input_data().
def _side_input_data(self):
# type: () -> SideInputData
view_options = self._view_options()
from_runtime_iterable = type(self)._from_runtime_iterable
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: from_runtime_iterable(iterable, view_options))
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return self._side_input_data().to_runner_api(context)
@staticmethod
def from_runner_api(proto, # type: beam_runner_api_pb2.SideInput
context # type: PipelineContext
):
# type: (...) -> _UnpickledSideInput
return _UnpickledSideInput(
SideInputData.from_runner_api(proto, context))
@staticmethod
def _from_runtime_iterable(it, options):
raise NotImplementedError
def requires_keyed_input(self):
return False
class _UnpickledSideInput(AsSideInput):
def __init__(self, side_input_data):
# type: (SideInputData) -> None
self._data = side_input_data
self._window_mapping_fn = side_input_data.window_mapping_fn
@staticmethod
def _from_runtime_iterable(it, options):
return options['data'].view_fn(it)
def _view_options(self):
return {
'data': self._data,
# For non-fn-api runners.
'window_mapping_fn': self._data.window_mapping_fn,
}
def _side_input_data(self):
return self._data
class SideInputData(object):
"""All of the data about a side input except for the bound PCollection."""
def __init__(self,
access_pattern, # type: str
window_mapping_fn, # type: sideinputs.WindowMappingFn
view_fn
):
self.access_pattern = access_pattern
self.window_mapping_fn = window_mapping_fn
self.view_fn = view_fn
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return beam_runner_api_pb2.SideInput(
access_pattern=beam_runner_api_pb2.FunctionSpec(
urn=self.access_pattern),
view_fn=beam_runner_api_pb2.SdkFunctionSpec(
environment_id=context.default_environment_id(),
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_VIEWFN,
payload=pickler.dumps(self.view_fn))),
window_mapping_fn=beam_runner_api_pb2.SdkFunctionSpec(
environment_id=context.default_environment_id(),
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_WINDOW_MAPPING_FN,
payload=pickler.dumps(self.window_mapping_fn))))
@staticmethod
def from_runner_api(proto, unused_context):
# type: (beam_runner_api_pb2.SideInput, PipelineContext) -> SideInputData
assert proto.view_fn.spec.urn == python_urns.PICKLED_VIEWFN
assert (proto.window_mapping_fn.spec.urn ==
python_urns.PICKLED_WINDOW_MAPPING_FN)
return SideInputData(
proto.access_pattern.urn,
pickler.loads(proto.window_mapping_fn.spec.payload),
pickler.loads(proto.view_fn.spec.payload))
class AsSingleton(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsSingleton(my_side_input) )
selects the latter behavior.
The input PCollection must contain exactly one value per window, unless a
default is given, in which case it may be empty.
"""
_NO_DEFAULT = object()
def __init__(self, pcoll, default_value=_NO_DEFAULT):
# type: (PCollection, Any) -> None
super(AsSingleton, self).__init__(pcoll)
self.default_value = default_value
def __repr__(self):
return 'AsSingleton(%s)' % self.pvalue
def _view_options(self):
base = super(AsSingleton, self)._view_options()
if self.default_value != AsSingleton._NO_DEFAULT:
return dict(base, default=self.default_value)
return base
@staticmethod
def _from_runtime_iterable(it, options):
head = list(itertools.islice(it, 2))
if not head:
return options.get('default', EmptySideInput())
elif len(head) == 1:
return head[0]
raise ValueError(
'PCollection of size %d with more than one element accessed as a '
'singleton view. First two elements encountered are "%s", "%s".' % (
len(head), str(head[0]), str(head[1])))
@property
def element_type(self):
return self.pvalue.element_type
class AsIter(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsIter(my_side_input) ) selects the
former behavor.
"""
def __repr__(self):
return 'AsIter(%s)' % self.pvalue
@staticmethod
def _from_runtime_iterable(it, options):
return it
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: iterable)
@property
def element_type(self):
return typehints.Iterable[self.pvalue.element_type]
class AsList(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but forces materialization of this
PCollection as a list.
Args:
pcoll: Input pcollection.
Returns:
An AsList-wrapper around a PCollection whose one element is a list
containing all elements in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return list(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
list)
class AsDict(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
Args:
pcoll: Input pcollection. All elements should be key-value pairs (i.e.
2-tuples) with unique keys.
Returns:
An AsDict-wrapper around a PCollection whose one element is a dict with
entries for uniquely-keyed pairs in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return dict(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
dict)
class AsMultiMap(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Similar to AsDict, but multiple values may be associated per key, and
the keys are fetched lazily rather than all having to fit in memory.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
"""
@staticmethod
def _from_runtime_iterable(it, options):
# Legacy implementation.
result = collections.defaultdict(list)
for k, v in it:
result[k].append(v)
return result
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
self._window_mapping_fn,
lambda x: x)
def requires_keyed_input(self):
return True
class EmptySideInput(object):
"""Value indicating when a singleton side input was empty.
If a PCollection was furnished as a singleton side input to a PTransform, and
that PCollection was empty, then this value is supplied to the DoFn in the
place where a value from a non-empty PCollection would have gone. This alerts
the DoFn that the side input PCollection was empty. Users may want to check
whether side input values are EmptySideInput, but they will very likely never
want to create new instances of this class themselves.
"""
pass
| {
"content_hash": "5f8576616124761c48d2e74d7a9c9cb2",
"timestamp": "",
"source": "github",
"line_count": 617,
"max_line_length": 94,
"avg_line_length": 33.233387358184764,
"alnum_prop": 0.6803706413069983,
"repo_name": "RyanSkraba/beam",
"id": "1aa89136b6933882d7c45e3e8889fafdb554b626",
"size": "21290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/pvalue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
"""Loaders for deserializing records in the REST API."""
from .marshmallow import json_patch_loader, marshmallow_loader
from ..schemas import RecordSchemaJSONV1
json_v1 = marshmallow_loader(RecordSchemaJSONV1)
"""Simple example loader that will take any JSON."""
json_patch_v1 = json_patch_loader
"""Simple example loader that will take any JSON patch."""
__all__ = (
'json_v1',
'json_patch_loader',
)
| {
"content_hash": "4af1c1b50a691433617041a369041646",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 27.6,
"alnum_prop": 0.7294685990338164,
"repo_name": "tiborsimko/invenio-records-rest",
"id": "84aef63f0cf62de204dc15bbfac5bdd1066d311c",
"size": "649",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "invenio_records_rest/loaders/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255451"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
} |
"""Utility functions for Kalibrate output paring."""
import decimal
from . import sanity
def options_string_builder(option_mapping, args):
"""Return arguments for CLI invocation of kal."""
options_string = ""
for option, flag in option_mapping.items():
if option in args:
options_string += str(" %s %s" % (flag, str(args[option])))
return options_string
def build_kal_scan_band_string(kal_bin, band, args):
"""Return string for CLI invocation of kal, for band scan."""
option_mapping = {"gain": "-g",
"device": "-d",
"error": "-e"}
if not sanity.scan_band_is_valid(band):
err_txt = "Unsupported band designation: %s" % band
raise ValueError(err_txt)
base_string = "%s -v -s %s" % (kal_bin, band)
base_string += options_string_builder(option_mapping, args)
return base_string
def build_kal_scan_channel_string(kal_bin, channel, args):
"""Return string for CLI invocation of kal, for channel scan."""
option_mapping = {"gain": "-g",
"device": "-d",
"error": "-e"}
base_string = "%s -v -c %s" % (kal_bin, channel)
base_string += options_string_builder(option_mapping, args)
return base_string
def herz_me(val):
"""Return integer value for Hz, translated from (MHz|kHz|Hz)."""
result = 0
if isinstance(val, bytes):
val = str(val)
if val.endswith("MHz"):
stripped = val.replace("MHz", "")
strip_fl = float(stripped)
result = strip_fl * 1000000
elif val.endswith("kHz"):
stripped = val.replace("kHz", "")
strip_fl = float(stripped)
result = strip_fl * 1000
elif val.endswith("Hz"):
stripped = val.replace("Hz", "")
result = float(stripped)
return result
def determine_final_freq(base, direction, modifier):
"""Return integer for frequency."""
result = 0
if isinstance(direction, bytes):
direction = direction.decode("utf-8")
if direction == "+":
result = base + modifier
elif direction == "-":
result = base - modifier
return result
def to_eng(num_in):
"""Return number in engineering notation."""
x = decimal.Decimal(str(num_in))
eng_not = x.normalize().to_eng_string()
return eng_not
def determine_scan_band(kal_out):
"""Return band for scan results."""
derived = extract_value_from_output(" Scanning for ", -3, kal_out)
if derived is None:
return "NotFound"
else:
return derived
def determine_device(kal_out):
"""Extract and return device from scan results."""
device = ""
while device == "":
for line in kal_out.splitlines():
line = line.decode("utf-8")
if "Using device " in line:
device = str(line.split(' ', 2)[-1])
if device == "":
device = None
return device
def determine_scan_gain(kal_out):
"""Return gain from scan results."""
return(extract_value_from_output("Setting gain: ", 2, kal_out))
def determine_sample_rate(kal_out):
"""Return sample rate from scan results."""
return extract_value_from_output("Exact sample rate", -2, kal_out)
def extract_value_from_output(canary, split_offset, kal_out):
"""Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal.
"""
retval = ""
while retval == "":
for line in kal_out.splitlines():
line = line.decode("utf-8")
if canary in line:
retval = line.split()[split_offset]
if retval == "":
retval = None
return retval
def determine_avg_absolute_error(kal_out):
"""Return average absolute error from kal output."""
return extract_value_from_output("average absolute error: ",
-2, kal_out)
def determine_chan_detect_threshold(kal_out):
"""Return channel detect threshold from kal output."""
channel_detect_threshold = None
while not channel_detect_threshold:
for line in kal_out.splitlines():
line = line.decode("utf-8")
if "channel detect threshold: " in line:
channel_detect_threshold = str(line.split()[-1])
if not channel_detect_threshold:
print("Unable to parse sample rate")
channel_detect_threshold = None
return channel_detect_threshold
def determine_band_channel(kal_out):
"""Return band, channel, target frequency from kal output."""
band = None
channel = None
tgt_freq = None
while band is None:
for line in kal_out.splitlines():
line = line.decode("utf-8")
if "Using " in line and " channel " in line:
band = line.split()[1]
channel = line.split()[3]
tgt_freq = line.split()[4].replace(
"(", "").replace(")", "")
return(band, channel, tgt_freq)
def parse_kal_scan(kal_out):
"""Parse kal band scan output."""
kal_data = []
scan_band = determine_scan_band(kal_out)
scan_gain = determine_scan_gain(kal_out)
scan_device = determine_device(kal_out)
sample_rate = determine_sample_rate(kal_out)
chan_detect_threshold = determine_chan_detect_threshold(kal_out)
for line in kal_out.splitlines():
line = line.decode("utf-8")
if "chan:" in line:
p_line = line.split(" ")
chan = p_line[1]
modifier = p_line[3]
power = p_line[5]
mod_raw = p_line[4].replace(')\tpower:', '')
base_raw = p_line[2].replace('(', '')
mod_freq = herz_me(mod_raw)
base_freq = herz_me(base_raw)
final_freq = to_eng(determine_final_freq(base_freq, modifier,
mod_freq))
kal_run = {"channel": chan,
"base_freq": base_freq,
"mod_freq": mod_freq,
"modifier": modifier,
"final_freq": final_freq,
"power": power,
"band": scan_band,
"gain": scan_gain,
"device": scan_device,
"sample_rate": sample_rate,
"channel_detect_threshold": chan_detect_threshold}
kal_data.append(kal_run.copy())
return kal_data
def parse_kal_channel(kal_out):
"""Parse kal channel scan output."""
scan_band, scan_channel, tgt_freq = determine_band_channel(kal_out)
kal_data = {"device": determine_device(kal_out),
"sample_rate": determine_sample_rate(kal_out),
"gain": determine_scan_gain(kal_out),
"band": scan_band,
"channel": scan_channel,
"frequency": tgt_freq,
"avg_absolute_error": determine_avg_absolute_error(kal_out),
"measurements" : get_measurements_from_kal_scan(kal_out),
"raw_scan_result": kal_out}
return kal_data
def get_measurements_from_kal_scan(kal_out):
"""Return a list of all measurements from kalibrate channel scan."""
result = []
for line in kal_out.splitlines():
line = line.decode("utf-8")
if "offset " in line:
p_line = line.split(' ')
result.append(p_line[-1])
return result
| {
"content_hash": "6717fa2cbac433d58d3d2f700b63d956",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 76,
"avg_line_length": 34.148648648648646,
"alnum_prop": 0.5621949610869278,
"repo_name": "ashmastaflash/kal-wrapper",
"id": "ac51a438db54bb1d9e4cc155e0110201eddafd0b",
"size": "7581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kalibrate/fn.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18103"
},
{
"name": "Shell",
"bytes": "50"
}
],
"symlink_target": ""
} |
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FamilyNameV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'str'
}
attribute_map = {
'value': 'value'
}
def __init__(self, value=None): # noqa: E501
"""FamilyNameV30Rc2 - a model defined in Swagger""" # noqa: E501
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def value(self):
"""Gets the value of this FamilyNameV30Rc2. # noqa: E501
:return: The value of this FamilyNameV30Rc2. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this FamilyNameV30Rc2.
:param value: The value of this FamilyNameV30Rc2. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FamilyNameV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FamilyNameV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "bc05f58835599630d9dcdd862b1b703d",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 119,
"avg_line_length": 28.01834862385321,
"alnum_prop": 0.5399476096922069,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "fddfee29b3ba8c01f3ecc5f59064cea7e8237a17",
"size": "3071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/family_name_v30_rc2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
"""This module contains SFTP operator."""
import os
from pathlib import Path
from typing import Any
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.ssh.hooks.ssh import SSHHook
class SFTPOperation:
"""Operation that can be used with SFTP/"""
PUT = 'put'
GET = 'get'
class SFTPOperator(BaseOperator):
"""
SFTPOperator for transferring files from remote host to local or vice a versa.
This operator uses ssh_hook to open sftp transport channel that serve as basis
for file transfer.
:param ssh_hook: predefined ssh_hook to use for remote execution.
Either `ssh_hook` or `ssh_conn_id` needs to be provided.
:type ssh_hook: airflow.providers.ssh.hooks.ssh.SSHHook
:param ssh_conn_id: :ref:`ssh connection id<howto/connection:ssh>`
from airflow Connections. `ssh_conn_id` will be ignored if `ssh_hook`
is provided.
:type ssh_conn_id: str
:param remote_host: remote host to connect (templated)
Nullable. If provided, it will replace the `remote_host` which was
defined in `ssh_hook` or predefined in the connection of `ssh_conn_id`.
:type remote_host: str
:param local_filepath: local file path to get or put. (templated)
:type local_filepath: str
:param remote_filepath: remote file path to get or put. (templated)
:type remote_filepath: str
:param operation: specify operation 'get' or 'put', defaults to put
:type operation: str
:param confirm: specify if the SFTP operation should be confirmed, defaults to True
:type confirm: bool
:param create_intermediate_dirs: create missing intermediate directories when
copying from remote to local and vice-versa. Default is False.
Example: The following task would copy ``file.txt`` to the remote host
at ``/tmp/tmp1/tmp2/`` while creating ``tmp``,``tmp1`` and ``tmp2`` if they
don't exist. If the parameter is not passed it would error as the directory
does not exist. ::
put_file = SFTPOperator(
task_id="test_sftp",
ssh_conn_id="ssh_default",
local_filepath="/tmp/file.txt",
remote_filepath="/tmp/tmp1/tmp2/file.txt",
operation="put",
create_intermediate_dirs=True,
dag=dag
)
:type create_intermediate_dirs: bool
"""
template_fields = ('local_filepath', 'remote_filepath', 'remote_host')
def __init__(
self,
*,
ssh_hook=None,
ssh_conn_id=None,
remote_host=None,
local_filepath=None,
remote_filepath=None,
operation=SFTPOperation.PUT,
confirm=True,
create_intermediate_dirs=False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ssh_hook = ssh_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.local_filepath = local_filepath
self.remote_filepath = remote_filepath
self.operation = operation
self.confirm = confirm
self.create_intermediate_dirs = create_intermediate_dirs
if not (self.operation.lower() == SFTPOperation.GET or self.operation.lower() == SFTPOperation.PUT):
raise TypeError(
f"Unsupported operation value {self.operation}, "
f"expected {SFTPOperation.GET} or {SFTPOperation.PUT}."
)
def execute(self, context: Any) -> str:
file_msg = None
try:
if self.ssh_conn_id:
if self.ssh_hook and isinstance(self.ssh_hook, SSHHook):
self.log.info("ssh_conn_id is ignored when ssh_hook is provided.")
else:
self.log.info(
"ssh_hook is not provided or invalid. Trying ssh_conn_id to create SSHHook."
)
self.ssh_hook = SSHHook(ssh_conn_id=self.ssh_conn_id)
if not self.ssh_hook:
raise AirflowException("Cannot operate without ssh_hook or ssh_conn_id.")
if self.remote_host is not None:
self.log.info(
"remote_host is provided explicitly. "
"It will replace the remote_host which was defined "
"in ssh_hook or predefined in connection of ssh_conn_id."
)
self.ssh_hook.remote_host = self.remote_host
with self.ssh_hook.get_conn() as ssh_client:
sftp_client = ssh_client.open_sftp()
if self.operation.lower() == SFTPOperation.GET:
local_folder = os.path.dirname(self.local_filepath)
if self.create_intermediate_dirs:
Path(local_folder).mkdir(parents=True, exist_ok=True)
file_msg = f"from {self.remote_filepath} to {self.local_filepath}"
self.log.info("Starting to transfer %s", file_msg)
sftp_client.get(self.remote_filepath, self.local_filepath)
else:
remote_folder = os.path.dirname(self.remote_filepath)
if self.create_intermediate_dirs:
_make_intermediate_dirs(
sftp_client=sftp_client,
remote_directory=remote_folder,
)
file_msg = f"from {self.local_filepath} to {self.remote_filepath}"
self.log.info("Starting to transfer file %s", file_msg)
sftp_client.put(self.local_filepath, self.remote_filepath, confirm=self.confirm)
except Exception as e:
raise AirflowException(f"Error while transferring {file_msg}, error: {str(e)}")
return self.local_filepath
def _make_intermediate_dirs(sftp_client, remote_directory) -> None:
"""
Create all the intermediate directories in a remote host
:param sftp_client: A Paramiko SFTP client.
:param remote_directory: Absolute Path of the directory containing the file
:return:
"""
if remote_directory == '/':
sftp_client.chdir('/')
return
if remote_directory == '':
return
try:
sftp_client.chdir(remote_directory)
except OSError:
dirname, basename = os.path.split(remote_directory.rstrip('/'))
_make_intermediate_dirs(sftp_client, dirname)
sftp_client.mkdir(basename)
sftp_client.chdir(basename)
return
| {
"content_hash": "ce6587bcf35196a894b5f5d75189bd1d",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 108,
"avg_line_length": 40.41104294478528,
"alnum_prop": 0.599514194625778,
"repo_name": "apache/incubator-airflow",
"id": "3a1d023a9a414145ded452195e68427921a56886",
"size": "7374",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/sftp/operators/sftp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
"""Test Chipsec client actions."""
__author__ = "Thiebaud Weksteen <tweksteen@gmail.com>"
import collections
import sys
import mock
from chipsec.helper import oshelper
from grr.client import vfs
from grr.client.components.chipsec_support.actions import chipsec_types
from grr.lib import flags
from grr.lib import test_lib
class MockUnknownChipsetError(RuntimeError):
pass
class MockSPI(mock.MagicMock):
def get_SPI_region(self, unused_region): # pylint: disable=invalid-name
return (0, 0xffff, 0)
def read_spi(self, unused_offset, size):
return "\xff" * size
class UnsupportedChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unsupported Platform: VID = 0x0000, DID = 0x0000"
raise MockUnknownChipsetError(msg)
class FailingOsHelperChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unable to open /sys/bus/pci/devices/0000:00:00.0/config"
raise oshelper.OsHelperError(msg, -1)
class GRRChipsecTest(test_lib.EmptyActionTest):
"""Generic test class for GRR-Chipsec actions."""
def setUp(self):
# Mock the interface for Chipsec
self.chipsec_mock = mock.MagicMock()
self.chipsec_mock.chipset = mock.MagicMock()
self.chipsec_mock.chipset.UnknownChipsetError = MockUnknownChipsetError
self.chipsec_mock.hal = mock.MagicMock()
self.chipsec_mock.logger = mock.MagicMock()
mock_modules = {
"chipsec": self.chipsec_mock,
"chipsec.hal": self.chipsec_mock.hal,
}
self.chipsec_patch = mock.patch.dict(sys.modules, mock_modules)
self.chipsec_patch.start()
# Import the ClientAction to test with the Chipsec mock in place.
# pylint: disable=g-import-not-at-top, unused-variable
from grr.client.components.chipsec_support.actions import grr_chipsec
# pylint: enable=g-import-not-at-top, unused-variable
# Keep a reference to the module so child classes may mock its content.
self.grr_chipsec_module = grr_chipsec
self.grr_chipsec_module.chipset = self.chipsec_mock.chipset
self.grr_chipsec_module.logger = self.chipsec_mock.logger
def tearDown(self):
self.chipsec_patch.stop()
class TestDumpFlashImage(GRRChipsecTest):
"""Test the client dump flash image action."""
def setUp(self):
super(TestDumpFlashImage, self).setUp()
self.chipsec_mock.hal.spi = mock.MagicMock()
self.chipsec_mock.hal.spi.SPI = MockSPI
self.grr_chipsec_module.spi = self.chipsec_mock.hal.spi
def testDumpFlashImage(self):
"""Test the basic dump."""
args = chipsec_types.DumpFlashImageRequest()
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), "\xff" * 0x10000)
def testDumpFlashImageVerbose(self):
"""Test the basic dump with the verbose mode enabled."""
args = chipsec_types.DumpFlashImageRequest(log_level=1)
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), "\xff" * 0x10000)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpFlashImageUnknownChipset(self):
"""By default, if the chipset is unknown, no exception is raised."""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
def testDumpFlashImageUnknownChipsetVerbose(self):
"""Test unknown chipset with verbose mode.
If the chipset is unknown but verbose enabled, no exception is raised
and at least one response should be returned with non-empty logs.
"""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpFlashImageRequest(log_level=1)
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
self.assertEquals(self.results[0].path.path, "")
def testDumpFlashImageOsHelperErrorChipset(self):
"""If an exception is raised by the helper layer, handle it."""
self.chipsec_mock.chipset.cs = FailingOsHelperChipset
args = chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
class MockACPI(object):
def __init__(self, unused_chipset):
self.tableList = { # pylint: disable=invalid-name
"DSDT": [(0xAABBCCDDEEFF0011)],
"FACP": [(0x1100FFEEDDCCBBAA)],
"XSDT": [(0x1122334455667788)],
"SSDT": [(0x1234567890ABCDEF), (0x2234567890ABCDEF),
(0x3234567890ABCDEF)]
}
# Mimic the behaviour of tableList in Chipsec
# pylint: disable=invalid-name
self.tableList = collections.defaultdict(list, self.tableList)
# pylint: enable=invalid-name
# key: header, content
self.table_content = {
0xAABBCCDDEEFF0011: ("\xFF" * 0xFF, "\xEE" * 0xFF),
0x1100FFEEDDCCBBAA: ("\xEE" * 0xFF, "\xFF" * 0xFF),
0x1122334455667788: ("\xAB" * 0xFF, "\xCD" * 0xFF),
0x1234567890ABCDEF: ("\xEF" * 0xFF, "\xFE" * 0xFF),
0x2234567890ABCDEF: ("\xDC" * 0xFF, "\xBA" * 0xFF),
0x3234567890ABCDEF: ("\xAA" * 0xFF, "\xBB" * 0xFF)
}
def get_ACPI_table(self, name): # pylint: disable=invalid-name
return [self.table_content[address] for address in self.tableList[name]]
class MockACPIReadingRestrictedArea(object):
def __init__(self, unused_chipset):
# Simulate /dev/mem error
raise OSError("Operation not permitted")
def get_ACPI_table(self, unused_name): # pylint: disable=invalid-name
return []
class TestDumpACPITable(GRRChipsecTest):
def setUp(self):
super(TestDumpACPITable, self).setUp()
self.chipsec_mock.hal.acpi = mock.MagicMock()
self.chipsec_mock.hal.acpi.ACPI = MockACPI
self.grr_chipsec_module.acpi = self.chipsec_mock.hal.acpi
def testDumpValidSingleACPITable(self):
"""Tests basic valid ACPI table dump."""
args = chipsec_types.DumpACPITableRequest(table_signature="DSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(len(result.acpi_tables), 1)
self.assertEqual(result.acpi_tables[0].table_address, 0xAABBCCDDEEFF0011)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xFF" * 0xFF + "\xEE" * 0xFF)
def testDumpValidMultipleACPITables(self):
"""Tests valid ACPI table dump that would yield several tables."""
args = chipsec_types.DumpACPITableRequest(table_signature="SSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(len(result.acpi_tables), 3)
self.assertEqual(result.acpi_tables[0].table_address, 0x1234567890ABCDEF)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xEF" * 0xFF + "\xFE" * 0xFF)
self.assertEqual(result.acpi_tables[1].table_address, 0x2234567890ABCDEF)
self.assertEqual(result.acpi_tables[1].table_blob,
"\xDC" * 0xFF + "\xBA" * 0xFF)
self.assertEqual(result.acpi_tables[2].table_address, 0x3234567890ABCDEF)
self.assertEqual(result.acpi_tables[2].table_blob,
"\xAA" * 0xFF + "\xBB" * 0xFF)
def testDumpValidSingleACPITableVerbose(self):
"""Tests valid ACPI table dump with verbose mode enabled."""
args = chipsec_types.DumpACPITableRequest(
table_signature="XSDT", logging=True)
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(result.acpi_tables[0].table_address, 0x1122334455667788)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xAB" * 0xFF + "\xCD" * 0xFF)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpInvalidACPITable(self):
"""Tests dumping invalid ACPI table."""
args = chipsec_types.DumpACPITableRequest(table_signature="INVALID_TABLE")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertNotEquals(len(result.logs), 0)
def testDumpACPITableUnknownChipset(self):
"""By default, if the chipset is unknown, no exception is raised."""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
def testDumpACPITableUnknownChipsetVerbose(self):
"""Tests unknown chipset with verbose mode.
If the chipset is unknown but verbose enabled, no exception is raised
and at least one response should be returned with non-empty logs.
"""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpACPITableRequest(
table_signature="FACP", logging=True)
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
def testDumpACPITableTriggeringDevMemError(self):
"""Tests the condition where OSError is triggered due to using /dev/mem.
No exception should be raised, and the log describing the error should be
returned.
"""
self.chipsec_mock.acpi.ACPI = MockACPIReadingRestrictedArea
args = chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "ae2811934545d03551bebc63e1688a14",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 78,
"avg_line_length": 38.43529411764706,
"alnum_prop": 0.7117641057034997,
"repo_name": "destijl/grr",
"id": "2eef197035239c7e6fcecd07789c5a6a07b88b9f",
"size": "9823",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/client/components/chipsec_support/grr_chipsec_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304794"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26524"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "173692"
},
{
"name": "JavaScript",
"bytes": "63181"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "307091"
},
{
"name": "Python",
"bytes": "6407750"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40334"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
import os
import glob
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocrsite.settings'
from ocrlab import nodes as nodetree_nodes
from nodetree import script, registry
def run(nodelist, outpath):
s = script.Script(nodelist)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
os.environ["NODETREE_WRITE_FILEOUT"] = "1"
out = s.add_node("util.FileOut", "Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
| {
"content_hash": "817501bc41886874564a6cd425058d5c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 57,
"avg_line_length": 24.444444444444443,
"alnum_prop": 0.6102272727272727,
"repo_name": "mikesname/python-ocrlab",
"id": "9c4e898ba22dd21327a1ed89c8d7ecbc6c77b9f6",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocrsite/ocrlab/test_ocr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161890"
}
],
"symlink_target": ""
} |
def compareAndRep(numbers , a , b):
temp = numbers[a]
numbers[a] = numbers[b]
numbers[b] = temp
return numbers
def printList(numbers):
strp = ""
for i in range(0 , len(numbers)):
strp += str(numbers[i])
if(i+1 < len(numbers)):
strp += " "
print strp
N = int(raw_input())
numbers = map(int , raw_input().strip().split(" "))
for i in range(1 , N):
for j in range (0 , i ):
if(numbers[i] < numbers[j]):
numbers = compareAndRep(numbers , i , j)
printList(numbers)
| {
"content_hash": "e48f75398af3be31df2a9bd2e3f2139e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 51,
"avg_line_length": 21.217391304347824,
"alnum_prop": 0.6086065573770492,
"repo_name": "MajidLashgarian/HackerRank",
"id": "d09779b44258db67cf126ad9856a6bb1a0d2cb6f",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InsertationSort2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5362"
},
{
"name": "Python",
"bytes": "6234"
}
],
"symlink_target": ""
} |
import json
import decimal #for rounding totals
##### HOSPITAL ASSOCIATED INFECTIONS #####
f = open( '../src/assets/data/src/HAI_transposed.json', 'rU' )
src = json.load(f)
f.close()
tree = []
ratingClasses = {"No Different than National Benchmark":"normal", "Better than the National Benchmark":"good", "Worse than the National Benchmark":"bad", "Not Available": ""}
for node in src:
hospital = {}
hospital["id"] = node["provider_id"]
hospital["display_name"] = node["ajc_hospital_name"]
hospital["address"] = node["address"]
hospital["city"] = node["city"]
hospital["infections"] = {
"cauti" : {}, "clabsi" : {}, "mrsa" : {}, "ssicolon" : {}, "ssihyst" : {}, "cdiff" : {}
}
#loop through keys looking for the infection substrings and create objects to hold their common properties
for key in node.keys():
tmp = key.lower().split("_")
inf = tmp[0]
val = node[key]
if inf in hospital["infections"]:
param = tmp[1]
hospital["infections"][inf][param] = val
if(param == "lower" and val is None):
hospital["infections"][inf][param] = 0
#how are incidents being calculated?
if(param == "days"):
if(inf == "cauti"):
hospital["infections"][inf]["incidents_label"] = "Urinary catheter days"
elif(inf == "clabsi"):
hospital["infections"][inf]["incidents_label"] = "Central line days"
elif(inf == "mrsa" or inf == "cdiff"):
hospital["infections"][inf]["incidents_label"] = "Patient days"
try:
hospital["infections"][inf]["incidents"] = "{:,d}".format(val)
except:
hospital["infections"][inf]["incidents"] = "Not available" #We'll filter out NaN data points later, but this is displayed and these said "null"
del hospital["infections"][inf][param] #just added this above but whatever
elif(param == "procedures"):
hospital["infections"][inf]["incidents_label"] = "Procedures"
hospital["infections"][inf]["incidents"] = val
del hospital["infections"][inf][param]
if(param == "category"):
hospital["infections"][inf]["ratingClass"] = ratingClasses[val]
tree.append(hospital)
# ft = open( '../src/assets/data/src/infection_avgs_web.json', 'rU')
# src = json.load(ft)
# ft.close()
#
# infDict = {"HAI_1_SIR" : "clabsi", "HAI_2_SIR" : "cauti", "HAI_3_SIR" : "ssicolon", "HAI_4_SIR" : "ssihyst", "HAI_5_SIR" : "mrsa", "HAI_6_SIR" : "cdiff"}
# totals = {"id": "infectionsStateAverages"} #backbone expects an ID and local storage uses it too
#
# for node in src:
# totals[infDict[node["measure"]]] = node["score"]
import urllib2
print "***NOTE: Using ICU only data for CLABSI and CAUTI, revisit in future***"
endpoints = [{"clabsi": "https://data.medicare.gov/resource/qfdj-8fa5.json?measure_id=HAI_1a_SIR&state=GA"}, {"cauti": "https://data.medicare.gov/resource/qfdj-8fa5.json?measure_id=HAI_2a_SIR&state=GA"}, {"ssicolon": "https://data.medicare.gov/resource/qfdj-8fa5.json?measure_id=HAI_3_SIR&state=GA"}, {"ssihyst": "https://data.medicare.gov/resource/qfdj-8fa5.json?measure_id=HAI_4_SIR&state=GA"}, {"mrsa": "https://data.medicare.gov/resource/qfdj-8fa5.json?measure_id=HAI_5_SIR&state=GA"}, {"cdiff": "https://data.medicare.gov/resource/qfdj-8fa5.json?measure_id=HAI_6_SIR&state=GA"}]
totals = {"id": "infectionsStateAverages"} #backbone expects an ID and local storage uses it too
for node in endpoints: #go through each enpoint
for key in node.keys(): #use the key as an ID later
url = urllib2.Request(node[key])
data = json.load(urllib2.urlopen(url))
for item in data:
totals[key] = float(item["score"])
f = open( '../src/assets/data/infections.json', 'w')
f.write(json.dumps({"hospitals": tree, "averages": totals}, indent=2, sort_keys=True))
f.close()
print "hospital infections JSON saved!"
##### HIP/KNEE SURGERIES #####
#rename unintuitive ratio keys and round the averages
f = open( '../src/assets/data/src/hip_knee.json', 'rU' )
src = json.load(f)
f.close()
tree = []
ratingClasses = {"No different than the National Rate":"normal", "Better than the National Rate":"good", "Worse than the National Rate":"bad", "Number of Cases Too Small": ""}
def isNA(string):
return int(string == "Number of Cases Too Small")
for node in src:
hospital = {}
hospital["id"] = node["provider_id"]
hospital["display_name"] = node["ajc_hospital_name"]
hospital["address"] = node["address"]
hospital["city"] = node["city"]
hospital["surgery"] = {
"readmissions" : {}, "complications" : {}
}
#loop through keys looking for the infection substrings and create objects to hold their common properties
for key in node.keys():
tmp = key.lower().split("_")
measure = tmp[0]
if measure in hospital["surgery"]:
param = tmp[1]
if(param != "notes"):
hospital["surgery"][measure][param] = node[key]
if(param == "category"):
hospital["surgery"][measure]["na"] = isNA(node[key])
hospital["surgery"][measure]["ratingClass"] = ratingClasses[node[key]]
tree.append(hospital)
##### Uses this API endpoint because I couldn't find a current national average in the database http://dev.socrata.com/foundry/#/data.medicare.gov/tiin-ktzr
#already imported urllib2 earlier
endpoints = [{"complications": "https://data.medicare.gov/resource/tiin-ktzr.json?measure_id=COMP_HIP_KNEE"}, {"readmissions": "https://data.medicare.gov/resource/vfqj-duc4.json?measure_id=READM_30_HIP_KNEE"}]
totals = {"id": "hipkneeAverages", "national": {}} #backbone expects an ID and local storage uses it too
for node in endpoints: #go through each endpoint
for key in node.keys(): #use the key as an ID later to match it with state level
url = urllib2.Request(node[key])
data = json.load(urllib2.urlopen(url))
for item in data:
totals["national"][key] = float(item["national_rate"])
ft = open( '../src/assets/data/src/hipknee_avgs_web.json', 'rU')
src = json.load(ft)
ft.close()
#would be easy to do this in sql but I want the view to be easy to understand
hipkneeDict = {"ga_readm_avg" : "readmissions", "ga_comp_avg" : "complications"}
for node in src:
for key in node.keys():
totals[hipkneeDict[key]] = node[key]
f = open( '../src/assets/data/surgery.json', 'w')
f.write(json.dumps({"hospitals": tree, "averages": totals}, indent=2, sort_keys=True))
f.close()
print "hospital hipknee JSON saved!"
##### PERINATAL #####
f = open( '../src/assets/data/src/perinatal.json', 'rU' )
src = json.load(f)
f.close()
tree = []
#there's a bunch of stuff in the data not being used in the app, just grab the stuff that will be displayed
names = ["Delivery_Rms", "Birthing_Rms", "LDR_Rms", "LDRP_Rms", "C_Sect", "Live_Births", "total_births", "csect_pct", "avg_delivery_charge", "avg_premature_charge", "early_births_pct", "Beds_New_Born", "Beds_Intermediate", "Beds_Intensive"]
for node in src:
hospital = {}
hospital["id"] = node["provider_id"]
hospital["display_name"] = node["ajc_hospital_name"]
hospital["address"] = node["address"]
hospital["city"] = node["city"]
#loop through keys looking for the infection substrings and create objects to hold their common properties
for key in node.keys():
if key in names:
val = node[key]
hospital[key] = val
tree.append(hospital)
#Gwinnett Medical didn't file their survey until after the data was prepared for download and they are a major player so adding them manually for now
#data from http://www.georgiahealthdata.info/CCSS/AHQPDF2014.php?uid=HOSP366
tree.append({ "Beds_Intensive": 16, "Beds_Intermediate": 8, "Beds_New_Born": 40, "Birthing_Rms": 0, "C_Sect": 1584, "Delivery_Rms": 0, "LDRP_Rms": 0, "LDR_Rms": 19, "Live_Births": 4953, "address": "1000 Medical Center Boulevard", "avg_delivery_charge": 7298, "avg_premature_charge": 37635, "city": "Lawrenceville", "csect_pct": 36, "display_name": "Gwinnett Medical Center", "early_births_pct": 3, "id": "110087", "total_births": 4989})
print "**** MANUALLY ADDED GWINNETT MEDICAL TO PERINATAL DATA -- REMOVE BEFORE UPDATING WITH 2015 DATA ****"
ft = open( '../src/assets/data/src/perinatal_avgs_web.json', 'rU')
src = json.load(ft)
ft.close()
#would be easy to do this in sql but I want the view to be easy to understand
perinatalDict = {"avgC_SectPct" : "csect_pct", "avgDeliveryCharge" : "avg_delivery_charge", "avgPrematureCharge": "avg_premature_charge", "avgBirths": "total_births", "earlyPct": "early_births_pct"}
totals = {"id": "perinatalStateAverages"} #backbone expects an ID and local storage uses it too
for node in src:
for key in node.keys():
totals[perinatalDict[key]] = node[key]
f = open( '../src/assets/data/perinatal.json', 'w')
f.write(json.dumps({"hospitals": tree, "averages": totals}, indent=2, sort_keys=True))
f.close()
print "hospital perinatal JSON saved!"
#####ER Waits#####
f = open( '../src/assets/data/src/ER_waits.json', 'rU' )
src = json.load(f)
f.close()
labels = ["er_inpatient_1", "er_inpatient_2", "er_total_time_avg", "er_time_to_eval", "er_time_to_painmed", "er_left_pct", "er_ctresults_pct"]
for node in src:
hospital = node
for key in node.keys():
if key in labels:
try:
node[key] = int(node[key])
except:
node[key] = node[key] #we'll check for NaN in app to filter these out, usually some string indicating not enough data
###State averages###
#er_volume (EDV) not included in state and national bc it is categorical so you can't average it
endpoints = ["https://data.medicare.gov/resource/apyc-v239.json?measure_id=", "https://data.medicare.gov/resource/isrn-hqyy.json?measure_id="]
keys = ["ED_1b", "ED_2b", "OP_18b", "OP_20", "OP_21", "OP_22", "OP_23"]
param = "&state=GA"
totalsGA = {"id": "erStateAverages", "national": {} } #backbone expects an ID and local storage uses it too
for i, endpoint in enumerate(endpoints, start=0):
for j, label in enumerate(labels): #use the key as an ID later
urlStr = endpoint+keys[j]
if i==0:
urlStr = urlStr+param
url = urllib2.Request(urlStr)
data = json.load(urllib2.urlopen(url))
for item in data:
if i==0:
totalsGA[label] = int(item["score"])
else:
totalsGA["national"][label] = int(item["score"])
f = open( '../src/assets/data/er.json', 'w')
f.write(json.dumps({"hospitals": src, "averages": totalsGA}, indent=2, sort_keys=True))
f.close()
print "hospital ER waits JSON saved!" | {
"content_hash": "ae40d81294a0eaac00e741cdb15df8fc",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 583,
"avg_line_length": 44.864197530864196,
"alnum_prop": 0.6341955604476243,
"repo_name": "NewsappAJC/hospitalCheckup",
"id": "bc59ae34388912dbafa53654bec2a8fc05290b52",
"size": "11100",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "py/organizeJSON.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9042"
},
{
"name": "HTML",
"bytes": "18643"
},
{
"name": "JavaScript",
"bytes": "195529"
},
{
"name": "Python",
"bytes": "11100"
}
],
"symlink_target": ""
} |
"""
This is the Docutils (Python Documentation Utilities) package.
Package Structure
=================
Modules:
- __init__.py: Contains component base classes, exception classes, and
Docutils version information.
- core.py: Contains the ``Publisher`` class and ``publish_*()`` convenience
functions.
- frontend.py: Runtime settings (command-line interface, configuration files)
processing, for Docutils front-ends.
- io.py: Provides a uniform API for low-level input and output.
- nodes.py: Docutils document tree (doctree) node class library.
- statemachine.py: A finite state machine specialized for
regular-expression-based text filters.
- urischemes.py: Contains a complete mapping of known URI addressing
scheme names to descriptions.
- utils.py: Contains the ``Reporter`` system warning class and miscellaneous
utilities.
Subpackages:
- languages: Language-specific mappings of terms.
- parsers: Syntax-specific input parser modules or packages.
- readers: Context-specific input handlers which understand the data
source and manage a parser.
- transforms: Modules used by readers and writers to modify DPS
doctrees.
- writers: Format-specific output translators.
"""
__docformat__ = 'reStructuredText'
__version__ = '0.8'
"""``major.minor.micro`` version number. The micro number is bumped for API
changes, for new functionality, and for interim project releases. The minor
number is bumped whenever there is a significant project release. The major
number will be bumped when the project is feature-complete, and perhaps if
there is a major change in the design."""
__version_details__ = 'snapshot 2010-09-01, r6395'
"""Extra version details (e.g. 'snapshot 2005-05-29, r3410', 'repository',
'release'), modified automatically & manually."""
class ApplicationError(StandardError): pass
class DataError(ApplicationError): pass
class SettingsSpec:
"""
Runtime setting specification base class.
SettingsSpec subclass objects used by `docutils.frontend.OptionParser`.
"""
settings_spec = ()
"""Runtime settings specification. Override in subclasses.
Defines runtime settings and associated command-line options, as used by
`docutils.frontend.OptionParser`. This is a tuple of:
- Option group title (string or `None` which implies no group, just a list
of single options).
- Description (string or `None`).
- A sequence of option tuples. Each consists of:
- Help text (string)
- List of option strings (e.g. ``['-Q', '--quux']``).
- Dictionary of keyword arguments sent to the OptionParser/OptionGroup
``add_option`` method.
Runtime setting names are derived implicitly from long option names
('--a-setting' becomes ``settings.a_setting``) or explicitly from the
'dest' keyword argument.
Most settings will also have a 'validator' keyword & function. The
validator function validates setting values (from configuration files
and command-line option arguments) and converts them to appropriate
types. For centralfitestoque, the ``docutils.frontend.validate_boolean``
function, **required by all boolean settings**, converts true values
('1', 'on', 'yes', and 'true') to 1 and false values ('0', 'off',
'no', 'false', and '') to 0. Validators need only be set once per
setting. See the `docutils.frontend.validate_*` functions.
See the optparse docs for more details.
- More triples of group title, description, options, as many times as
needed. Thus, `settings_spec` tuples can be simply concatenated.
"""
settings_defaults = None
"""A dictionary of defaults for settings not in `settings_spec` (internal
settings, intended to be inaccessible by command-line and config file).
Override in subclasses."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
defined in other components. Override in subclasses."""
relative_path_settings = ()
"""Settings containing filesystem paths. Override in subclasses.
Settings listed here are to be interpreted relative to the current working
directory."""
config_section = None
"""The name of the config file section specific to this component
(lowercase, no brackets). Override in subclasses."""
config_section_dependencies = None
"""A list of names of config file sections that are to be applied before
`config_section`, in order (from general to specific). In other words,
the settings in `config_section` are to be overlaid on top of the settings
from these sections. The "general" section is assumed implicitly.
Override in subclasses."""
class TransformSpec:
"""
Runtime transform specification base class.
TransformSpec subclass objects used by `docutils.transforms.Transformer`.
"""
def get_transforms(self):
"""Transforms required by this class. Override in subclasses."""
if self.default_transforms != ():
import warnings
warnings.warn('default_transforms attribute deprecated.\n'
'Use get_transforms() method instead.',
DeprecationWarning)
return list(self.default_transforms)
return []
# Deprecated; for compatibility.
default_transforms = ()
unknown_reference_resolvers = ()
"""List of functions to try to resolve unknown references. Unknown
references have a 'refname' attribute which doesn't correspond to any
target in the document. Called when the transforms in
`docutils.tranforms.references` are unable to find a correct target. The
list should contain functions which will try to resolve unknown
references, with the following signature::
def reference_resolver(node):
'''Returns boolean: true if resolved, false if not.'''
If the function is able to resolve the reference, it should also remove
the 'refname' attribute and mark the node as resolved::
del node['refname']
node.resolved = 1
Each function must have a "priority" attribute which will affect the order
the unknown_reference_resolvers are run::
reference_resolver.priority = 100
Override in subclasses."""
class Component(SettingsSpec, TransformSpec):
"""Base class for Docutils components."""
component_type = None
"""Name of the component type ('reader', 'parser', 'writer'). Override in
subclasses."""
supported = ()
"""Names for this component. Override in subclasses."""
def supports(self, format):
"""
Is `format` supported by this component?
To be used by transforms to ask the dependent component if it supports
a certain input context or output format.
"""
return format in self.supported
| {
"content_hash": "9827f6812757b38f87ba5337fcee4dca",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 81,
"avg_line_length": 34.74,
"alnum_prop": 0.6971790443293034,
"repo_name": "akiokio/centralfitestoque",
"id": "ee78b43c56e6a0705cb9fb668b924987fca2145d",
"size": "7112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/.pycharm_helpers/docutils/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "253279"
},
{
"name": "JavaScript",
"bytes": "253299"
},
{
"name": "Python",
"bytes": "6144500"
},
{
"name": "Ruby",
"bytes": "168219"
},
{
"name": "Shell",
"bytes": "21"
}
],
"symlink_target": ""
} |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime
from itsdangerous import URLSafeTimedSerializer
from enum import Enum
import AppConfig
Base = declarative_base()
login_serializer = URLSafeTimedSerializer(AppConfig.APPSECRETKEY)
class UserPrivileges(Enum):
USER = 1
ADMIN = 2
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key=True)
text = Column(String)
title = Column(String)
createDate = Column(DateTime)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String)
password = Column(String)
type = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return "<User(username='%s', password='%s')>" % (self.username, self.password)
def is_admin(self):
return UserPrivileges(self.type) == UserPrivileges.ADMIN
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def get_auth_token(self):
data = [str(self.id), self.password]
return login_serializer.dumps(data) | {
"content_hash": "b562de22f1ce7f0ec0605e87cc80b86d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 23.85,
"alnum_prop": 0.6582809224318659,
"repo_name": "mandrive/FlaskTest",
"id": "b5e5c217921d6376b82cff02a34037a380c2d467",
"size": "1431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbmodels/Models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "9909"
}
],
"symlink_target": ""
} |
import inspect
from pprint import pprint
import example
pprint(inspect.getmembers(example.B, inspect.isfunction))
| {
"content_hash": "925dc9b5618d9dad7f5e3dcbbd5a44a0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 57,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.8275862068965517,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "7ee9a8b8ae242dab3019f8cc9e7d3cba9dabc3c9",
"size": "116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_language_tools/inspect_getmembers_class_methods_b.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
"""
.. module:: freebayes
:platform: Unix, OSX
:synopsis: A wrapper module for calling ScanIndel.
.. moduleauthor:: Daniel Gaston <daniel.gaston@dal.ca>
"""
from ddb_ngsflow import pipeline
def run_flt3_itdseek(job, config, name):
"""Run ITDseek without a matched normal sample
:param config: The configuration dictionary.
:type config: dict.
:param name: sample name.
:type name: str.
:returns: str -- The output vcf file name.
"""
itdseek_vcf = "{}.flt3.itdseek.vcf".format(name)
itdseek_logfile = "{}.flt3.itdseek.log".format(name)
itdseek_command = ["{}".format(config['itdseek']['bin']),
"{}.rg.sorted.bam".format(name),
"{}".format(config['reference']),
"{}".format(config['samtools-0.19']['bin']),
">",
"{}".format(itdseek_vcf)]
job.fileStore.logToMaster("ITDSeek Command: {}\n".format(itdseek_command))
pipeline.run_and_log_command(" ".join(itdseek_command), itdseek_logfile)
return itdseek_vcf
| {
"content_hash": "c53fa37e483b8231d02201346e7af94a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 31.91176470588235,
"alnum_prop": 0.5898617511520737,
"repo_name": "dgaston/ddbio-ngsflow",
"id": "409c10726420a266b59161e26730972f055dcae8",
"size": "1085",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ddb_ngsflow/variation/sv/itdseek.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70905"
}
],
"symlink_target": ""
} |
import os
from serenata_toolbox import log, settings
from serenata_toolbox.datasets.downloader import Downloader
from serenata_toolbox.datasets.local import LocalDatasets
class Datasets:
"""
This is a wrapper for three different classes that together handle the
datasets locally and the download of new ones).
Datasets class takes one argument: the path to the local directory of the
dataset files (e.g. data/ or /tmp/serenata-data). The argument is optional
and the default value is data/ (following the default usage in the main
repo, serenata-de-amor).
Inside this object there are two main objects: local and downloader:
* `Datasets.local` handles listing all local datasets through the property
`Datasets.local.all` (hint: it's a generator) and deleting local datasets
with the method `Datasets.local.delete(filename)`;
* `Datasets.downloader` implements a async manager to download files from
the remote bucket. It's `Datasets.downloader.download(files)` take the
path for a single file (str) as argument or an iterable of paths (str).
:param local_directory: (str) path to local directory of the datasets
:param timeout: (float) timeout parameter to Downloader,
None or 0 disables timeout check.
"""
def __init__(self, local_directory=None, timeout=None):
if not local_directory:
local_directory = 'data'
self.local = LocalDatasets(local_directory)
self.downloader = Downloader(local_directory, timeout=timeout)
# shortcuts & retrocompatibility
def fetch(filename, destination_path):
datasets = Datasets(destination_path)
return datasets.downloader.download(filename)
def fetch_latest_backup(destination_path, force_all=False):
datasets = Datasets(destination_path)
if force_all:
files = datasets.downloader.LATEST
else:
files = tuple(
f for f in datasets.downloader.LATEST
if not os.path.exists(os.path.join(destination_path, f))
)
if not files:
log.info('You already have all the latest datasets! Nothing to download.')
return datasets.downloader.download(files)
| {
"content_hash": "2a9737611b8237f54e07a2ba558328cd",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 82,
"avg_line_length": 35.5,
"alnum_prop": 0.7105860972285325,
"repo_name": "datasciencebr/serenata-toolbox",
"id": "287fea258e54c23feac5a4c87153e3eb583bd0c7",
"size": "2201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serenata_toolbox/datasets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95910"
}
],
"symlink_target": ""
} |
import pandas as pd
import pemi
class SaSqlSourcePipe(pemi.Pipe):
def __init__(self, *, sql, engine, schema=None, result=True, chunk_size=None):
super().__init__()
self.sql = sql
self.engine = engine
self.schema = schema
self.result = result
self.chunk_size = chunk_size
self.target(
pemi.PdDataSubject,
name='main',
schema=self.schema
)
def _get_result(self, conn):
sql_df = pd.DataFrame()
if self.chunk_size is None:
sql_df = pd.read_sql(self.sql, conn)
else:
for chunk in pd.read_sql(self.sql, conn, chunksize=self.chunk_size):
sql_df = sql_df.append(chunk, ignore_index=True)
return sql_df
def extract(self):
pemi.log.info("Executing SQL '%s' via:\n%s", self.name, self.sql)
data = None
with self.engine.connect() as conn:
if self.result:
data = self._get_result(conn)
else:
conn.execute(self.sql)
return data
def parse(self, data):
pemi.log.info("Parsing '%s' results", self.name)
if data is None:
return None
if self.schema is None:
self.targets['main'].df = data
else:
mapper = data.mapping(
[(name, name, field.coerce) for name, field in self.schema.items()],
on_error='raise'
)
self.targets['main'].df = mapper.mapped
return self.targets['main'].df
def flow(self):
self.parse(self.extract())
| {
"content_hash": "6561a960548d510c09f82eeaff3d7b5d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 84,
"avg_line_length": 26.387096774193548,
"alnum_prop": 0.5287286063569682,
"repo_name": "inside-track/pemi",
"id": "68ff1a4f5b3a37fc7d3654254be9c5b12183fa02",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pemi/pipes/sa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "816"
},
{
"name": "Dockerfile",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "275978"
}
],
"symlink_target": ""
} |
import os
from errno import EEXIST
from os.path import isdir
import requests
from django.apps import apps
from django.core.management.color import no_style
from django.core.serializers import deserialize
from django.db import connection
from django.db.models import ImageField
def puts(value):
try:
verbosity = int(os.environ.get("SYNCTOOL_VERBOSITY", 1))
except ValueError:
verbosity = 1
if verbosity > 0:
print(value)
def sync_data(url, api_token, clean=False, reset=True, images=False,
media_url=None):
puts("Loading data from {0}".format(url))
response = requests.get(url, auth=(api_token, ""))
if not response.ok:
puts(response.content)
raise RuntimeError(
"%s server error while contacting api." % response.status_code
)
app_labels = set()
model_labels = set()
models = []
puts("Saving data")
for obj in deserialize("json", response.content):
app_labels.add(obj.object._meta.app_label)
label = "%s.%s" % (
obj.object._meta.app_label, obj.object._meta.model_name,
)
if label not in model_labels:
model = apps.get_model(label)
model_labels.add(label)
models.append(model)
if clean:
puts("Removing entries for model %s" % label)
model.objects.all().delete()
obj.save()
if reset:
for app_label in app_labels:
reset_sequence(app_label)
if images:
for model in models:
for field in model._meta.fields:
if isinstance(field, ImageField):
get_images(media_url, model.objects.all(), field.name)
def get_reset_command(app_label):
app_config = apps.get_app_config(app_label)
models = app_config.get_models(include_auto_created=True)
statements = connection.ops.sequence_reset_sql(no_style(), models)
return "\n".join(statements)
def reset_sequence(app_label):
"""
Reset the primary key sequence for the tables in an application.
This is necessary if any local edits have happened to the table.
"""
puts("Resetting primary key sequence for {0}".format(app_label))
cursor = connection.cursor()
cmd = get_reset_command(app_label)
cursor.execute(cmd)
def get_images(base_url, queryset, field):
puts("Syncing images for %s %s" % (queryset.model.__name__, field))
for obj in queryset:
download(base_url, getattr(obj, field))
def download(base_url, source_image):
if not source_image:
return
if os.path.exists(source_image.path):
return
upload_directory = os.path.dirname(source_image.path)
if not os.path.isdir(upload_directory):
mkdir(upload_directory)
endpoint = "%s%s" % (base_url, source_image)
puts("Downloading %s" % endpoint)
response = requests.get(endpoint, stream=True)
if not response.ok:
puts(
"%s response. Unable to download image." % response.status_code,
)
return
with open(source_image.path, "wb") as f:
for chunk in response.iter_content(1024):
f.write(chunk)
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if not (e.errno == EEXIST and isdir(path)):
raise
| {
"content_hash": "7de5b3ae14844115a397ad4df1748672",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 25.83076923076923,
"alnum_prop": 0.6203097081596188,
"repo_name": "prestontimmons/django-synctool",
"id": "92d4a790ec5d1b131a521352f3a1a7600bdc2104",
"size": "3358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synctool/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18119"
}
],
"symlink_target": ""
} |
from files import VariableByte
from unittest import TestCase
class TestVariableByte(TestCase):
def setUp(self):
self.numbers_input = [777, 1234, 2551, 16]
self.byte_input = b'\x06\x89\t\xd2\x13\xf7\x90'
def test_encoding(self):
res = b'\x06\x89\t\xd2\x13\xf7\x90'
self.assertEqual(res, VariableByte.encoding(self.numbers_input))
def test_decoding(self):
res = [777, 1234, 2551, 16]
self.assertEqual(res, VariableByte.decoding(self.byte_input)) | {
"content_hash": "23d38dab10a2b2d1a2fc632e4395aefa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 29.88235294117647,
"alnum_prop": 0.6653543307086615,
"repo_name": "Xia0ben/IQPlayground",
"id": "1a84426fb319345feefe76ea7a0e681bb682c1a6",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_variableByte.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73975"
}
],
"symlink_target": ""
} |
"""Volume v1 Snapshot action implementations"""
import copy
import functools
import logging
from cliff import columns as cliff_columns
from osc_lib.cli import format_columns
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class VolumeIdColumn(cliff_columns.FormattableColumn):
"""Formattable column for volume ID column.
Unlike the parent FormattableColumn class, the initializer of the
class takes volume_cache as the second argument.
osc_lib.utils.get_item_properties instantiate cliff FormattableColumn
object with a single parameter "column value", so you need to pass
a partially initialized class like
``functools.partial(VolumeIdColumn, volume_cache)``.
"""
def __init__(self, value, volume_cache=None):
super(VolumeIdColumn, self).__init__(value)
self._volume_cache = volume_cache or {}
def human_readable(self):
"""Return a volume name if available
:rtype: either the volume ID or name
"""
volume_id = self._value
volume = volume_id
if volume_id in self._volume_cache.keys():
volume = self._volume_cache[volume_id].display_name
return volume
class CreateVolumeSnapshot(command.ShowOne):
_description = _("Create new volume snapshot")
def get_parser(self, prog_name):
parser = super(CreateVolumeSnapshot, self).get_parser(prog_name)
parser.add_argument(
'snapshot_name',
metavar='<snapshot-name>',
help=_('Name of the new snapshot'),
)
parser.add_argument(
'--volume',
metavar='<volume>',
help=_('Volume to snapshot (name or ID) '
'(default is <snapshot-name>)'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description of the snapshot'),
)
parser.add_argument(
'--force',
dest='force',
action='store_true',
default=False,
help=_('Create a snapshot attached to an instance. '
'Default is False'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = parsed_args.volume
if not parsed_args.volume:
volume = parsed_args.snapshot_name
volume_id = utils.find_resource(volume_client.volumes,
volume).id
snapshot = volume_client.volume_snapshots.create(
volume_id,
parsed_args.force,
parsed_args.snapshot_name,
parsed_args.description
)
snapshot._info.update(
{'properties':
format_columns.DictColumn(snapshot._info.pop('metadata'))}
)
return zip(*sorted(snapshot._info.items()))
class DeleteVolumeSnapshot(command.Command):
_description = _("Delete volume snapshot(s)")
def get_parser(self, prog_name):
parser = super(DeleteVolumeSnapshot, self).get_parser(prog_name)
parser.add_argument(
'snapshots',
metavar='<snapshot>',
nargs="+",
help=_('Snapshot(s) to delete (name or ID)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
result = 0
for i in parsed_args.snapshots:
try:
snapshot_id = utils.find_resource(
volume_client.volume_snapshots, i).id
volume_client.volume_snapshots.delete(snapshot_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete snapshot with "
"name or ID '%(snapshot)s': %(e)s"),
{'snapshot': i, 'e': e})
if result > 0:
total = len(parsed_args.snapshots)
msg = (_("%(result)s of %(total)s snapshots failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListVolumeSnapshot(command.Lister):
_description = _("List volume snapshots")
def get_parser(self, prog_name):
parser = super(ListVolumeSnapshot, self).get_parser(prog_name)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_('Include all projects (admin only)'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
parser.add_argument(
'--name',
metavar='<name>',
default=None,
help=_('Filters results by a name.')
)
parser.add_argument(
'--status',
metavar='<status>',
choices=['available', 'error', 'creating', 'deleting',
'error-deleting'],
help=_("Filters results by a status. "
"('available', 'error', 'creating', 'deleting'"
" or 'error-deleting')")
)
parser.add_argument(
'--volume',
metavar='<volume>',
default=None,
help=_('Filters results by a volume (name or ID).')
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
if parsed_args.long:
columns = ['ID', 'Display Name', 'Display Description', 'Status',
'Size', 'Created At', 'Volume ID', 'Metadata']
column_headers = copy.deepcopy(columns)
column_headers[6] = 'Volume'
column_headers[7] = 'Properties'
else:
columns = ['ID', 'Display Name', 'Display Description', 'Status',
'Size']
column_headers = copy.deepcopy(columns)
# Always update Name and Description
column_headers[1] = 'Name'
column_headers[2] = 'Description'
# Cache the volume list
volume_cache = {}
try:
for s in volume_client.volumes.list():
volume_cache[s.id] = s
except Exception:
# Just forget it if there's any trouble
pass
VolumeIdColumnWithCache = functools.partial(VolumeIdColumn,
volume_cache=volume_cache)
volume_id = None
if parsed_args.volume:
volume_id = utils.find_resource(
volume_client.volumes, parsed_args.volume).id
search_opts = {
'all_tenants': parsed_args.all_projects,
'display_name': parsed_args.name,
'status': parsed_args.status,
'volume_id': volume_id,
}
data = volume_client.volume_snapshots.list(
search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Metadata': format_columns.DictColumn,
'Volume ID': VolumeIdColumnWithCache},
) for s in data))
class SetVolumeSnapshot(command.Command):
_description = _("Set volume snapshot properties")
def get_parser(self, prog_name):
parser = super(SetVolumeSnapshot, self).get_parser(prog_name)
parser.add_argument(
'snapshot',
metavar='<snapshot>',
help=_('Snapshot to modify (name or ID)')
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New snapshot name')
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New snapshot description')
)
parser.add_argument(
"--no-property",
dest="no_property",
action="store_true",
help=_("Remove all properties from <snapshot> "
"(specify both --no-property and --property to "
"remove the current properties before setting "
"new properties.)"),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Property to add/change for this snapshot '
'(repeat option to set multiple properties)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
snapshot = utils.find_resource(volume_client.volume_snapshots,
parsed_args.snapshot)
result = 0
if parsed_args.no_property:
try:
key_list = snapshot.metadata.keys()
volume_client.volume_snapshots.delete_metadata(
snapshot.id,
list(key_list),
)
except Exception as e:
LOG.error(_("Failed to clean snapshot properties: %s"), e)
result += 1
if parsed_args.property:
try:
volume_client.volume_snapshots.set_metadata(
snapshot.id, parsed_args.property)
except Exception as e:
LOG.error(_("Failed to set snapshot property: %s"), e)
result += 1
kwargs = {}
if parsed_args.name:
kwargs['display_name'] = parsed_args.name
if parsed_args.description:
kwargs['display_description'] = parsed_args.description
if kwargs:
try:
snapshot.update(**kwargs)
except Exception as e:
LOG.error(_("Failed to update snapshot display name "
"or display description: %s"), e)
result += 1
if result > 0:
raise exceptions.CommandError(_("One or more of the "
"set operations failed"))
class ShowVolumeSnapshot(command.ShowOne):
_description = _("Display volume snapshot details")
def get_parser(self, prog_name):
parser = super(ShowVolumeSnapshot, self).get_parser(prog_name)
parser.add_argument(
'snapshot',
metavar='<snapshot>',
help=_('Snapshot to display (name or ID)')
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
snapshot = utils.find_resource(volume_client.volume_snapshots,
parsed_args.snapshot)
snapshot._info.update(
{'properties':
format_columns.DictColumn(snapshot._info.pop('metadata'))}
)
return zip(*sorted(snapshot._info.items()))
class UnsetVolumeSnapshot(command.Command):
_description = _("Unset volume snapshot properties")
def get_parser(self, prog_name):
parser = super(UnsetVolumeSnapshot, self).get_parser(prog_name)
parser.add_argument(
'snapshot',
metavar='<snapshot>',
help=_('Snapshot to modify (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
help=_('Property to remove from snapshot '
'(repeat option to remove multiple properties)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
snapshot = utils.find_resource(
volume_client.volume_snapshots, parsed_args.snapshot)
if parsed_args.property:
volume_client.volume_snapshots.delete_metadata(
snapshot.id,
parsed_args.property,
)
| {
"content_hash": "d674d4be28ccc13d1ab21534d73d280e",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 78,
"avg_line_length": 33.81944444444444,
"alnum_prop": 0.5420123203285421,
"repo_name": "dtroyer/python-openstackclient",
"id": "966db48ff1e7dc6085977ba38bf27b33306cdfe5",
"size": "12788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstackclient/volume/v1/volume_snapshot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4040230"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','es.oyatsu.avc.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','EsOyatsuAvcModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| {
"content_hash": "7c0be0ff24da598574b95b2092fdd7cc",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 125,
"avg_line_length": 30.44843049327354,
"alnum_prop": 0.6905743740795287,
"repo_name": "reydelamirienda/Titanium.UIActivityViewController",
"id": "2a0e7a82c181cfad44fe1d12c6306effa3a93672",
"size": "6790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1173"
},
{
"name": "Objective-C",
"bytes": "11252"
},
{
"name": "Python",
"bytes": "8897"
}
],
"symlink_target": ""
} |
import percolation as P, social as S, rdflib as r, builtins as B, re, datetime, os, shutil
c=P.utils.check
class GDFTriplification:
"""Produce a linked data publication tree from a standard GDF file.
INPUTS:
======
=> the data directory path
=> the file name (filename_friendship) of the friendship network
=> the file name (filename_interaction) of the interaction network
=> the final path (final_path) for the tree of files to be created
=> a path to the script that is initializing this class (scriptpath)
=> the numeric id (numericid) of the facebook user or group of the network(s)
=> the string id (stringid) of the facebook user or group of the network(s)
=> the facebook link (fb_link) of the user or group
=> the network is from a user (ego==True) or a group (ego==False)
=> a umbrella directory (umbrella_dir) on which more of data is being published
OUTPUTS:
=======
the tree in the directory fpath."""
def __init__(self,data_path="../data/fb/",filename_friendship="foo.gdf",filename_interaction="foo_interaction.gdf",
final_path="./fb/",scriptpath=None,numericid=None,stringid=None,fb_link=None,isego=None,umbrella_dir=None):
day,month,year=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d).gdf",fname)[0]
datetime=datetime.date(*[int(i) for i in (year,month,day)])
datetime_string=datetime_snapshot.isoformat()
self.snapshotid=filename_friendship[:-4]+"_fb"
self.snapshot=NS.po.FacebookSnapshot+"#"+self.snapshotid
self.isfriendship= bool(filename_friendship)
self.isinteraction=bool(filename_interaction)
fnet=S.fb.read.readGDF(data_path+fname) # return networkx graph
fnet_=rdfGDFFriendshipNetwork(fnet) # return rdflib graph
if B.interaction:
inet=S.fb.readGDF(dpath+fnamei) # return networkx graph
inet_=rdfInteractionNetwork(inet) # return rdflib graph
else:
inet_=0
for i in locals():
if i !="self":
exec("self.{}={}".format(i,locals()[i]))
meta=makeMetadata(fnet_,inet_) # return rdflib graph with metadata about the structure
writeAllFB(fnet_,inet_,meta) # write linked data tree
def writeAllFB(self,fnet,inet,mnet):
fpath_="{}{}/".format(self.final_path,aname)
if self.friendship:
P.rdf.writeAll(fnet,self.snapshotid+"Friendship",fpath_,False,1)
if self.interaction:
P.rdf.writeAll(inet,self.snapshotid+"Interaction",fpath_)
# copia o script que gera este codigo
if not os.path.isdir(self.final_path+"scripts"):
os.mkdir(self.final_path+"scripts")
shutil.copy(self.scriptpath,self.final_path+"scripts/")
# copia do base data
if not os.path.isdir(self.final_path+"base"):
os.mkdir(self.final_path+"base")
shutil.copy(self.data_path+self.filename_friendships,self.final_path+"base/")
if self.isinteraction:
shutil.copy(self.data_path+self.filename_interaction,self.final_path+"base/")
tinteraction="""\n{} individuals with metadata {}
and {} interactions with metadata {} constitute the interaction
network in file:
{}
or
{}
(anonymized: {}).""".format( self.ninteracted,str(self.interactedvars),
self.ninteractions,str(self.interactionvars),
self.online_prefix+"/rdf/"+self.irdf,
self.online_prefix+"/rdf/"+self.ittl,
self.interactions_anononymized)
originals="{}/data/{}\n{}/data/{}".format(self.online_prefix,self.filename_friendships,
self.online_prefix,self.filename_interactions)
else:
tinteraction=""
originals="{}/data/{}".format(self.online_prefix,self.filename_friendships)
P.rdf.writeAll(mnet,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {snapid} collected around {date}.
{nf} individuals with metadata {fvars}
and {nfs} friendships constitute the friendship network in file:
{frdf} \nor \n{fttl}
(anonymized: {fan}).
{tinteraction}
Metadata for discovery is in file:
{mrdf} \nor \n{mttl}
Original files:
{origs}
Ego network: {ise}
Friendship network: {isf}
Interaction network: {isi}
All files should be available at the git repository:
{ava}
\n""".format(
snapid=self.snapshotid,date=self.datetime_string,
nf=self.nfriends,fvars=str(self.fvars),
nfs=self.nfriendships,
frdf=self.frdf,fttl=self.fttl,
fan=self.friendships_anonymized,
tinteraction=tinteraction,
self.mrdf,
self.mttl,
originals,
ise=self.isego,
isf=self.isfriendship,
isi=self.isinteraction,
ava=self.available_dir
))
def makeMetadata(self,fnet,inet):
if self.groupid and self.groupid2 and (self.groupid!=self.groupid2):
raise ValueError("Group IDS are different")
tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]])
snapshot=P.rdf.IC([tg2],P.rdf.ns.po.FacebookSnapshot,aname,"Snapshot {}".format(self.snapshotid))
foo={"uris":[],"vals":[]}
if self.isego:
if self.numericid:
foo["uris"].append(NS.fb.userNumericID)
foo["vals"].append(self.numericid)
if self.stringid:
foo["uris"].append(NS.fb.userStringID)
foo["vals"].append(self.stringid)
else:
if self.numericid:
foo["uris"].append(NS.fb.groupNumericID)
foo["vals"].append(self.numericID)
if self.sid:
foo["uris"].append(NS.groupStringID)
foo["vals"].append(self.stringID)
if self.groupuid:
foo["uris"].append(NS.fb.groupNumericID)
foo["vals"].append(self.groupuid)
if self.fb_link:
if type(self.fb_link) not in (type([2,3]),type((2,3))):
foo["uris"].append(NS.fb.fbLink)
foo["vals"].append(self.fb_link)
else:
for link in self.fb_link:
foo["uris"].append(NS.fb.fbLink)
foo["vals"].append(link)
self.online_prefix=online_prefix="https://raw.githubusercontent.com/OpenLinkedSocialData/{}master/".format(umbrella_dir)
if self.friendship:
foo["uris"]+=[
NS.fb.onlineOriginalFriendshipFile,
NS.fb.originalFriendshipFilename,
NS.po.onlineFriendshipXMLFile,
NS.po.onlineFriendshipTTLFile,
NS.po.FriendshipXMLFilename,
NS.po.FriendshipTTLFilename,
]+\
[NS.fb.nFriends,
NS.fb.nFriendships,
NS.fb.friendshipsAnonymized ]+\
[NS.fb.frienshipParticipantAttribute]*len(self.friendsvars)
self.ffile=ffile="{}/base/{}".format(online_prefix,self.filename_friendship)
self.frdf=frdf="{}Friendship.rdf".format(self.snapshotid)
self.fttl=fttl="{}Friendship.ttl".format(self.snapshotid)
foo["vals"]+=[
ffile,
self.filename_friendships,
online_prefix+"/rdf/"+frdf,
online_prefix+"/rdf/"+fttl,
frdf,
fttl,
[self.nfriends,
self.nfriendships,
self.friendships_anonymized]+list(self.friendsvars)
if self.interaction:
foo["uris"]+=[
NS.void.voidFile,
NS.fb.onlineOriginalInteractionFile,
NS.fb.originalInteractionFilename,
NS.U("http://example.com/void.ttl#MyDataset"),
NS.po.onlineInteractionXMLFile,
NS.po.onlineinteractionTTLFile,
NS.po.interactionXMLFilename,
NS.po.interactionTTLFilename,
]+\
[ NS.fb.nInteracted,
NS.fb.nInteractions,
NS.fb.interactionsAnonymized ]+\
[ NS.fb.interactionParticipantAttribute ]*len(self.interactionsvars)
ifile="{}/base/{}".format(online_prefix,self.snapshotid)
irdf="{}Interaction.rdf".format(online_prefix,self.snapshotid)
ittl="{}Interaction.ttl".format(online_prefix,self.snapshotid)
foo["vals"]+=[
NS.OpenLinkedSocialData
ifile
self.filename_interactions,
online_prefix+"/rdf/"+irdf,
online_prefix+"/rdf/"+ittl,
irdf,
ittl,
[
self.ninteractions,
self.ninteracted,
self.interactions_anonymized]+list(self.interactionsvars)
foo["uris"]+=[
NS.fb.ego,
NS.fb.friendship,
NS.fb.interaction,
]
foo["vals"]+=[self.ego,self.friendship,self.interaction]
#https://github.com/OpenLinkedSocialData/fbGroups/tree/master/AdornoNaoEhEnfeite29032013_fb
self.available_dir=available_dir=online_prefix+self.snapshotid
mrdf="{}Meta.rdf".format(self.snapshotid)
mttl="{}Meta.ttl".format(self.snapshotid)
desc="facebook network from {} . Ego: {}. Friendship: {}. Interaction: {}.\
nfriends: {}; nfrienships: {}; ninteracted: {}; ninteractions: ".format(
self.name,self.isego,self.isfriendship,self.isinteraction,
self.nfriends,self.nfriendships,self.ninteracted,self.ninteractions)
P.rdf.link([tg2],snapshot,[
NS.po.triplifiedIn,
NS.po.createdAt,
NS.po.donatedBy,
NS.po.availableAt,
NS.po.onlineMetaXMLFile,
NS.po.onlineMetaTTLFile,
NS.po.MetaXMLFilename,
NS.po.MetaTTLFilename,
NS.po.acquiredThrough,
NS.po.socialProtocolTag,
NS.rdfs.comment,
]+foo["uris"],
[
datetime.datetime.now(),
self.datetime,
self.snapshotid[:-4],
available_dir,
online_prefix+"/rdf/"+mrdf,
online_prefix+"/rdf/"+mttl,
mrdf,
mttl,
"Netvizz",
"Facebook"
desc,
]+foo["vals"],
"Snapshot {}".format(self.snapshot))
ind2=P.rdf.IC([tg2],NS.po.Platform,"Facebook")
P.rdf.linkClasses([tg2],snapshot,"Snapshot {}".format(aname),
[NS.po.socialProtocol],
[ind2],
)
return tg2
def rdfGDFFriendshipNetwork(self,fnet):
tg=P.rdf.makeBasicGraph([["po","fb"],[NS.po,NS.fb]])
if sum([("user" in i) for i in fnet["individuals"]["label"]])==len(fnet["individuals"]["label"]):
# nomes falsos, ids espurios
self.friendships_anonymized=True
else:
self.friendships_anonymized=False
tkeys=list(fnet["individuals"].keys())
if "groupid" in tkeys:
self.groupid=fnet["individuals"]["groupid"][0]
tkeys.remove("groupid")
else:
self.groupid=None
iname= tkeys.index("name")
ilabel=tkeys.index("label")
if self.friendships_anonymized:
self.friendsvars=[trans(i) for j,i in enumerate(tkeys) if j not in (ilabel,iname)]
else:
self.friendsvars=[trans(i) for j,i]
insert={"uris":[],"vals":[]}
for tkey in tkeys:
insert["uris"]+=[eval("NS.fb."+trans(tkey))]
insert["vals"]+=[fnet["individuals"][tkey]]
self.nfriends=len(insert["vals"][0])
insert_uris=insert["uris"][:]
for vals_ in zip(*insert["vals"]): # cada participante recebe valores na ordem do insert_uris
name_="{}-{}".format(self.snapshotid,vals_[iname])
if friendships_anonymized:
insert_uris_=[el for i,el in enumerate(insert_uris) if i not in (ilabel,iname) and vals_[i]]
vals_=[el for i,el in enumerate(vals_) if (i not in (ilabel,iname)) and el]
else:
insert_uris_=[el for i,el in enumerate(insert_uris) if vals_[i]]
vals_=[el for i,el in vals_ if el]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name_)
P.rdf.link([tg],ind,insert_uris_,vals_)
P.rdf.link_([tg],ind,[NS.po.snapshot],[snapshot])
if self.friendships_anonymized:
B.friends_vars=[trans(i) for j,i in enumerate(tkeys) if j not in (ilabel,iname)]
else:
B.friends_vars=[trans(i) for i in tkeys]
c("escritos participantes")
friendships_=[fnet["relations"][i] for i in ("node1","node2")]
i=0
for uid1,uid2 in zip(*friendships_):
uids=[r.URIRef(NS.fb.Participant+"#{}-{}".format(self.snapshotid,vals_[iname])) for i in (uid1,uid2)]
P.rdf.link_([tg],uids[0],[P.rdf.ns.fb.friend],[uids[1]])
# make friendship
flabel="{}-{}-{}".format(self.snapshotid,uids[0],uids[1])
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,flabel)
P.rdf.link_([tg],ind,flabel,[NS.po.snapshot]+[NS.fb.member]*2,
[snapshot]+uids)
if (i%1000)==0:
c("friendships",i)
i+=1
self.nfriendships=len(friendships_[0])
c("escritas amizades")
return tg
def rdfInteractionNetwork(fnet):
tg=P.rdf.makeBasicGraph([["po","fb"],[NS.po,NS.fb]])
if sum([("user" in i) for i in fnet["individuals"]["label"]])==len(fnet["individuals"]["label"]):
# nomes falsos, ids espurios
self.interactions_anonymized=True
else:
self.interactions_anonymized=False
tkeys=list(fnet["individuals"].keys())
if "groupid" in tkeys:
self.groupid2=fnet["individuals"]["groupid"][0]
tkeys.remove("groupid")
else:
self.groupid2=None
iname= tkeys.index("name")
ilabel=tkeys.index("label")
if self.interactions_anonymized:
self.varsfriendsinteraction=[trans(i) for j,i in enumerate(tkeys) if j not in (ilabel,iname)]
else:
self.varsiriendsinteraction=[trans(i) for i in tkeys]
insert={"uris":[],"vals":[]}
for tkey in tkeys:
insert["uris"]+=[eval("NS."+trans(tkey))]
insert["vals"]+=[fnet["individuals"][tkey]]
self.ninteracted=len(insert["vals"][0])
insert_uris=foo["uris"][:]
for vals_ in zip(*insert["vals"]):
name_="{}-{}".format(self.snapshotid,vals_[iname])
if self.interactions_anonymized:
insert_uris_=[el for i,el in enumerate(insert_uris) if i not in (ilabel,iname) and vals_[i]]
vals_=[el for i,el in enumerate(vals_) if i not in (ilabel,iname)]
else:
insert_uris_=[el for i,el in enumerate(insert_uris) if vals_[i]]
vals_=[el for i,el in vals_ if el]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name_)
P.rdf.link([tg],ind,insert_uris,vals_)
P.rdf.link_([tg],ind,[NS.po.snapshot],[snapshot])
c("escritos participantes")
self.interactionsvars=["node1","node2","weight"]
interactions_=[fnet["relations"][i] for i in self.interactionsvars]
self.ninteractions=len(interactions_[0])
i=0
for uid1,uid2,weight in zip(*interactions_):
weight_=int(weight)
if weight_-weight != 0:
raise ValueError("float weights in fb interaction networks?")
iid="{}-{}-{}".format(self.snapshotid,uid1,uid2)
ind=P.rdf.IC([tg],NS.Interaction,iid)
uids=[r.URIRef(NS.fb.Participant+"#{}-{}".format(self.snapshotid,i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,[NS.fb.iFrom,P.rdf.ns.fb.iTo]+[NS.po.snapshot],uids+[self.snapshot])
P.rdf.link([tg],ind,[ NS.fb.weight],[weight_])
if (i%1000)==0:
c("interactions: ", i)
i+=1
c("escritas interações")
return tg
def trans(tkey):
if tkey=="name":
return "uid"
if tkey=="label":
return "name"
return tkey
| {
"content_hash": "b0910d5e1bb4beb2ed5d1833445c4fe2",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 128,
"avg_line_length": 49.14477211796247,
"alnum_prop": 0.5138290327859909,
"repo_name": "ttm/socialLegacy",
"id": "033702a36202f23072abcd60a21617d924032cba",
"size": "18333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social/fb/gdf2rdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190129"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.