repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wadobo/congressus
|
congressus/tickets/templatetags/tickets.py
|
1
|
1531
|
import re
from django import template
from django.utils.translation import ugettext as _
from django.utils.html import mark_safe
register = template.Library()
@register.simple_tag
def ticket_seat_class(session, layout, seat, row, col):
row = str(row)
col = str(col)
if seat == 'R':
return 'seat-R'
elif seat == '_':
return 'seat-_'
holded_type = session.is_seat_holded(layout, row, col)
if holded_type:
return 'seat-' + re.sub('[CP]', 'H', holded_type)
if session.is_seat_available(layout, row, col):
return 'seat-L'
return 'seat-R'
@register.simple_tag(takes_context=True)
def scene_span(context, session, map):
flag = 'scenedraw-%s' % session.id
if flag in context:
return ''
context.dicts[0][flag] = True
rows = (map.scene_bottom - map.scene_top) + 1
cols = (map.scene_right - map.scene_left) + 1
html = '<td class="scene" rowspan="%s" colspan="%s"> %s </td>' % (rows, cols, _('scene'))
return mark_safe(html)
@register.simple_tag
def key(data, key, prefix="", default=''):
k = key
if prefix:
k = prefix + str(key)
return data.get(k, default)
@register.simple_tag
def get_value(dic, key):
if not isinstance(dic, dict):
return
return dic.get(key)
@register.simple_tag
def get_free_seats(dic, session_id, layout):
if not isinstance(dic, dict):
return
free = dic.get((session_id, layout.id))
if free is None:
free = layout.free()
return free
|
agpl-3.0
| 2,939,241,391,327,671,000
| 21.514706
| 93
| 0.614631
| false
| 3.150206
| false
| false
| false
|
michaelkirk/QGIS
|
python/plugins/processing/algs/lidar/lastools/lasnoise.py
|
1
|
3645
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lasnoise.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasnoise(LAStoolsAlgorithm):
ISOLATED = "ISOLATED"
STEP_XY = "STEP_XY"
STEP_Z = "STEP_Z"
OPERATION = "OPERATION"
OPERATIONS = ["classify", "remove"]
CLASSIFY_AS = "CLASSIFY_AS"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasnoise')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterNumber(lasnoise.ISOLATED,
self.tr("isolated if surrounding cells have only"), 0, None, 5))
self.addParameter(ParameterNumber(lasnoise.STEP_XY,
self.tr("resolution of isolation grid in xy"), 0, None, 4.0))
self.addParameter(ParameterNumber(lasnoise.STEP_Z,
self.tr("resolution of isolation grid in z"), 0, None, 4.0))
self.addParameter(ParameterSelection(lasnoise.OPERATION,
self.tr("what to do with isolated points"), lasnoise.OPERATIONS, 0))
self.addParameter(ParameterNumber(lasnoise.CLASSIFY_AS,
self.tr("classify as"), 0, None, 7))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasnoise")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
isolated = self.getParameterValue(lasnoise.ISOLATED)
commands.append("-isolated")
commands.append(str(isolated))
step_xy = self.getParameterValue(lasnoise.STEP_XY)
commands.append("-step_xy")
commands.append(str(step_xy))
step_z = self.getParameterValue(lasnoise.STEP_Z)
commands.append("-step_z")
commands.append(str(step_z))
operation = self.getParameterValue(lasnoise.OPERATION)
if operation != 0:
commands.append("-remove_noise")
else:
commands.append("-classify_as")
classify_as = self.getParameterValue(lasnoise.CLASSIFY_AS)
commands.append(str(classify_as))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
gpl-2.0
| -8,790,487,615,896,949,000
| 42.915663
| 82
| 0.590672
| false
| 4.298349
| false
| false
| false
|
pirate/bookmark-archiver
|
archivebox/extractors/readability.py
|
1
|
4294
|
__package__ = 'archivebox.extractors'
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional
import json
from ..index.schema import Link, ArchiveResult, ArchiveError
from ..system import run, atomic_write
from ..util import (
enforce_types,
download_url,
is_static_file,
)
from ..config import (
TIMEOUT,
CURL_BINARY,
SAVE_READABILITY,
DEPENDENCIES,
READABILITY_VERSION,
)
from ..logging_util import TimedProgress
@enforce_types
def get_html(link: Link, path: Path) -> str:
"""
Try to find wget, singlefile and then dom files.
If none is found, download the url again.
"""
canonical = link.canonical_outputs()
abs_path = path.absolute()
sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]]
document = None
for source in sources:
try:
with open(abs_path / source, "r", encoding="utf-8") as f:
document = f.read()
break
except (FileNotFoundError, TypeError):
continue
if document is None:
return download_url(link.url)
else:
return document
@enforce_types
def should_save_readability(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
if is_static_file(link.url):
return False
out_dir = out_dir or Path(link.link_dir)
if not overwrite and (out_dir / 'readability').exists():
return False
return SAVE_READABILITY
@enforce_types
def save_readability(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult:
"""download reader friendly version using @mozilla/readability"""
out_dir = Path(out_dir or link.link_dir)
output_folder = out_dir.absolute() / "readability"
output = "readability"
# Readability Docs: https://github.com/mozilla/readability
status = 'succeeded'
# fake command to show the user so they have something to try debugging if get_html fails
cmd = [
CURL_BINARY,
link.url
]
readability_content = None
timer = TimedProgress(timeout, prefix=' ')
try:
document = get_html(link, out_dir)
temp_doc = NamedTemporaryFile(delete=False)
temp_doc.write(document.encode("utf-8"))
temp_doc.close()
if not document or len(document) < 10:
raise ArchiveError('Readability could not find HTML to parse for article text')
cmd = [
DEPENDENCIES['READABILITY_BINARY']['path'],
temp_doc.name,
]
result = run(cmd, cwd=out_dir, timeout=timeout)
try:
result_json = json.loads(result.stdout)
assert result_json and 'content' in result_json
except json.JSONDecodeError:
raise ArchiveError('Readability was not able to archive the page', result.stdout + result.stderr)
output_folder.mkdir(exist_ok=True)
readability_content = result_json.pop("textContent")
atomic_write(str(output_folder / "content.html"), result_json.pop("content"))
atomic_write(str(output_folder / "content.txt"), readability_content)
atomic_write(str(output_folder / "article.json"), result_json)
# parse out number of files downloaded from last line of stderr:
# "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
output_tail = [
line.strip()
for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:]
if line.strip()
]
hints = (
'Got readability response code: {}.'.format(result.returncode),
*output_tail,
)
# Check for common failure cases
if (result.returncode > 0):
raise ArchiveError('Readability was not able to archive the page', hints)
except (Exception, OSError) as err:
status = 'failed'
output = err
cmd = [cmd[0], './{singlefile,dom}.html']
finally:
timer.end()
return ArchiveResult(
cmd=cmd,
pwd=str(out_dir),
cmd_version=READABILITY_VERSION,
output=output,
status=status,
index_texts=[readability_content] if readability_content else [],
**timer.stats,
)
|
mit
| 4,898,612,251,076,805,000
| 30.807407
| 110
| 0.622729
| false
| 3.935839
| false
| false
| false
|
tysonholub/twilio-python
|
twilio/rest/taskrouter/v1/workspace/workspace_statistics.py
|
1
|
10107
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class WorkspaceStatisticsList(ListResource):
""" """
def __init__(self, version, workspace_sid):
"""
Initialize the WorkspaceStatisticsList
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList
"""
super(WorkspaceStatisticsList, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, }
def get(self):
"""
Constructs a WorkspaceStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
"""
return WorkspaceStatisticsContext(self._version, workspace_sid=self._solution['workspace_sid'], )
def __call__(self):
"""
Constructs a WorkspaceStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
"""
return WorkspaceStatisticsContext(self._version, workspace_sid=self._solution['workspace_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.WorkspaceStatisticsList>'
class WorkspaceStatisticsPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the WorkspaceStatisticsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The SID of the Workspace
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsPage
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsPage
"""
super(WorkspaceStatisticsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of WorkspaceStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance
"""
return WorkspaceStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.WorkspaceStatisticsPage>'
class WorkspaceStatisticsContext(InstanceContext):
""" """
def __init__(self, version, workspace_sid):
"""
Initialize the WorkspaceStatisticsContext
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace to fetch
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
"""
super(WorkspaceStatisticsContext, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, }
self._uri = '/Workspaces/{workspace_sid}/Statistics'.format(**self._solution)
def fetch(self, minutes=values.unset, start_date=values.unset,
end_date=values.unset, task_channel=values.unset,
split_by_wait_time=values.unset):
"""
Fetch a WorkspaceStatisticsInstance
:param unicode minutes: Only calculate statistics since this many minutes in the past
:param datetime start_date: Only calculate statistics from on or after this date
:param datetime end_date: Only calculate statistics from this date and time and earlier
:param unicode task_channel: Only calculate statistics on this TaskChannel.
:param unicode split_by_wait_time: A comma separated list of values that describes the thresholds to calculate statistics on
:returns: Fetched WorkspaceStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance
"""
params = values.of({
'Minutes': minutes,
'StartDate': serialize.iso8601_datetime(start_date),
'EndDate': serialize.iso8601_datetime(end_date),
'TaskChannel': task_channel,
'SplitByWaitTime': split_by_wait_time,
})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return WorkspaceStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.WorkspaceStatisticsContext {}>'.format(context)
class WorkspaceStatisticsInstance(InstanceResource):
""" """
def __init__(self, version, payload, workspace_sid):
"""
Initialize the WorkspaceStatisticsInstance
:returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance
"""
super(WorkspaceStatisticsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'realtime': payload.get('realtime'),
'cumulative': payload.get('cumulative'),
'account_sid': payload.get('account_sid'),
'workspace_sid': payload.get('workspace_sid'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'workspace_sid': workspace_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
"""
if self._context is None:
self._context = WorkspaceStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._context
@property
def realtime(self):
"""
:returns: n object that contains the real-time statistics for the Workspace
:rtype: dict
"""
return self._properties['realtime']
@property
def cumulative(self):
"""
:returns: An object that contains the cumulative statistics for the Workspace
:rtype: dict
"""
return self._properties['cumulative']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def workspace_sid(self):
"""
:returns: The SID of the Workspace
:rtype: unicode
"""
return self._properties['workspace_sid']
@property
def url(self):
"""
:returns: The absolute URL of the Workspace statistics resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self, minutes=values.unset, start_date=values.unset,
end_date=values.unset, task_channel=values.unset,
split_by_wait_time=values.unset):
"""
Fetch a WorkspaceStatisticsInstance
:param unicode minutes: Only calculate statistics since this many minutes in the past
:param datetime start_date: Only calculate statistics from on or after this date
:param datetime end_date: Only calculate statistics from this date and time and earlier
:param unicode task_channel: Only calculate statistics on this TaskChannel.
:param unicode split_by_wait_time: A comma separated list of values that describes the thresholds to calculate statistics on
:returns: Fetched WorkspaceStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance
"""
return self._proxy.fetch(
minutes=minutes,
start_date=start_date,
end_date=end_date,
task_channel=task_channel,
split_by_wait_time=split_by_wait_time,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.WorkspaceStatisticsInstance {}>'.format(context)
|
mit
| -1,229,389,616,420,329,200
| 34.588028
| 132
| 0.646977
| false
| 4.575373
| false
| false
| false
|
awsdocs/aws-doc-sdk-examples
|
python/example_code/rekognition/rekognition_collections.py
|
1
|
13421
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Rekognition to
create a collection that contains faces indexed from a series of images. The
collection is then searched for faces that match a reference face.
The usage demo in this file uses images in the .media folder. If you run this code
without cloning the GitHub repository, you must first download the image files from
https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/python/example_code/rekognition/.media
"""
import logging
from pprint import pprint
import boto3
from botocore.exceptions import ClientError
from rekognition_objects import RekognitionFace
from rekognition_image_detection import RekognitionImage
logger = logging.getLogger(__name__)
class RekognitionCollection:
"""
Encapsulates an Amazon Rekognition collection. This class is a thin wrapper
around parts of the Boto3 Amazon Rekognition API.
"""
def __init__(self, collection, rekognition_client):
"""
Initializes a collection object.
:param collection: Collection data in the format returned by a call to
create_collection.
:param rekognition_client: A Boto3 Rekognition client.
"""
self.collection_id = collection['CollectionId']
self.collection_arn, self.face_count, self.created = self._unpack_collection(
collection)
self.rekognition_client = rekognition_client
@staticmethod
def _unpack_collection(collection):
"""
Unpacks optional parts of a collection that can be returned by
describe_collection.
:param collection: The collection data.
:return: A tuple of the data in the collection.
"""
return (
collection.get('CollectionArn'),
collection.get('FaceCount', 0),
collection.get('CreationTimestamp'))
def to_dict(self):
"""
Renders parts of the collection data to a dict.
:return: The collection data as a dict.
"""
rendering = {
'collection_id': self.collection_id,
'collection_arn': self.collection_arn,
'face_count': self.face_count,
'created': self.created
}
return rendering
def describe_collection(self):
"""
Gets data about the collection from the Amazon Rekognition service.
:return: The collection rendered as a dict.
"""
try:
response = self.rekognition_client.describe_collection(
CollectionId=self.collection_id)
# Work around capitalization of Arn vs. ARN
response['CollectionArn'] = response.get('CollectionARN')
(self.collection_arn, self.face_count,
self.created) = self._unpack_collection(response)
logger.info("Got data for collection %s.", self.collection_id)
except ClientError:
logger.exception("Couldn't get data for collection %s.", self.collection_id)
raise
else:
return self.to_dict()
def delete_collection(self):
"""
Deletes the collection.
"""
try:
self.rekognition_client.delete_collection(CollectionId=self.collection_id)
logger.info("Deleted collection %s.", self.collection_id)
self.collection_id = None
except ClientError:
logger.exception("Couldn't delete collection %s.", self.collection_id)
raise
def index_faces(self, image, max_faces):
"""
Finds faces in the specified image, indexes them, and stores them in the
collection.
:param image: The image to index.
:param max_faces: The maximum number of faces to index.
:return: A tuple. The first element is a list of indexed faces.
The second element is a list of faces that couldn't be indexed.
"""
try:
response = self.rekognition_client.index_faces(
CollectionId=self.collection_id, Image=image.image,
ExternalImageId=image.image_name, MaxFaces=max_faces,
DetectionAttributes=['ALL'])
indexed_faces = [
RekognitionFace({**face['Face'], **face['FaceDetail']})
for face in response['FaceRecords']]
unindexed_faces = [
RekognitionFace(face['FaceDetail'])
for face in response['UnindexedFaces']]
logger.info(
"Indexed %s faces in %s. Could not index %s faces.", len(indexed_faces),
image.image_name, len(unindexed_faces))
except ClientError:
logger.exception("Couldn't index faces in image %s.", image.image_name)
raise
else:
return indexed_faces, unindexed_faces
def list_faces(self, max_results):
"""
Lists the faces currently indexed in the collection.
:param max_results: The maximum number of faces to return.
:return: The list of faces in the collection.
"""
try:
response = self.rekognition_client.list_faces(
CollectionId=self.collection_id, MaxResults=max_results)
faces = [RekognitionFace(face) for face in response['Faces']]
logger.info(
"Found %s faces in collection %s.", len(faces), self.collection_id)
except ClientError:
logger.exception(
"Couldn't list faces in collection %s.", self.collection_id)
raise
else:
return faces
def search_faces_by_image(self, image, threshold, max_faces):
"""
Searches for faces in the collection that match the largest face in the
reference image.
:param image: The image that contains the reference face to search for.
:param threshold: The match confidence must be greater than this value
for a face to be included in the results.
:param max_faces: The maximum number of faces to return.
:return: A tuple. The first element is the face found in the reference image.
The second element is the list of matching faces found in the
collection.
"""
try:
response = self.rekognition_client.search_faces_by_image(
CollectionId=self.collection_id, Image=image.image,
FaceMatchThreshold=threshold, MaxFaces=max_faces)
image_face = RekognitionFace({
'BoundingBox': response['SearchedFaceBoundingBox'],
'Confidence': response['SearchedFaceConfidence']
})
collection_faces = [
RekognitionFace(face['Face']) for face in response['FaceMatches']]
logger.info("Found %s faces in the collection that match the largest "
"face in %s.", len(collection_faces), image.image_name)
except ClientError:
logger.exception(
"Couldn't search for faces in %s that match %s.", self.collection_id,
image.image_name)
raise
else:
return image_face, collection_faces
def search_faces(self, face_id, threshold, max_faces):
"""
Searches for faces in the collection that match another face from the
collection.
:param face_id: The ID of the face in the collection to search for.
:param threshold: The match confidence must be greater than this value
for a face to be included in the results.
:param max_faces: The maximum number of faces to return.
:return: The list of matching faces found in the collection. This list does
not contain the face specified by `face_id`.
"""
try:
response = self.rekognition_client.search_faces(
CollectionId=self.collection_id, FaceId=face_id,
FaceMatchThreshold=threshold, MaxFaces=max_faces)
faces = [RekognitionFace(face['Face']) for face in response['FaceMatches']]
logger.info(
"Found %s faces in %s that match %s.", len(faces), self.collection_id,
face_id)
except ClientError:
logger.exception(
"Couldn't search for faces in %s that match %s.", self.collection_id,
face_id)
raise
else:
return faces
def delete_faces(self, face_ids):
"""
Deletes faces from the collection.
:param face_ids: The list of IDs of faces to delete.
:return: The list of IDs of faces that were deleted.
"""
try:
response = self.rekognition_client.delete_faces(
CollectionId=self.collection_id, FaceIds=face_ids)
deleted_ids = response['DeletedFaces']
logger.info(
"Deleted %s faces from %s.", len(deleted_ids), self.collection_id)
except ClientError:
logger.exception("Couldn't delete faces from %s.", self.collection_id)
raise
else:
return deleted_ids
class RekognitionCollectionManager:
"""
Encapsulates Amazon Rekognition collection management functions.
This class is a thin wrapper around parts of the Boto3 Amazon Rekognition API.
"""
def __init__(self, rekognition_client):
"""
Initializes the collection manager object.
:param rekognition_client: A Boto3 Rekognition client.
"""
self.rekognition_client = rekognition_client
def create_collection(self, collection_id):
"""
Creates an empty collection.
:param collection_id: Text that identifies the collection.
:return: The newly created collection.
"""
try:
response = self.rekognition_client.create_collection(
CollectionId=collection_id)
response['CollectionId'] = collection_id
collection = RekognitionCollection(response, self.rekognition_client)
logger.info("Created collection %s.", collection_id)
except ClientError:
logger.exception("Couldn't create collection %s.", collection_id)
raise
else:
return collection
def list_collections(self, max_results):
"""
Lists collections for the current account.
:param max_results: The maximum number of collections to return.
:return: The list of collections for the current account.
"""
try:
response = self.rekognition_client.list_collections(MaxResults=max_results)
collections = [
RekognitionCollection({'CollectionId': col_id}, self.rekognition_client)
for col_id in response['CollectionIds']]
except ClientError:
logger.exception("Couldn't list collections.")
raise
else:
return collections
def usage_demo():
print('-'*88)
print("Welcome to the Amazon Rekognition face collection demo!")
print('-'*88)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
rekognition_client = boto3.client('rekognition')
images = [
RekognitionImage.from_file(
'.media/pexels-agung-pandit-wiguna-1128316.jpg', rekognition_client,
image_name='sitting'),
RekognitionImage.from_file(
'.media/pexels-agung-pandit-wiguna-1128317.jpg', rekognition_client,
image_name='hopping'),
RekognitionImage.from_file(
'.media/pexels-agung-pandit-wiguna-1128318.jpg', rekognition_client,
image_name='biking')]
collection_mgr = RekognitionCollectionManager(rekognition_client)
collection = collection_mgr.create_collection('doc-example-collection-demo')
print(f"Created collection {collection.collection_id}:")
pprint(collection.describe_collection())
print("Indexing faces from three images:")
for image in images:
collection.index_faces(image, 10)
print("Listing faces in collection:")
faces = collection.list_faces(10)
for face in faces:
pprint(face.to_dict())
input("Press Enter to continue.")
print(f"Searching for faces in the collection that match the first face in the "
f"list (Face ID: {faces[0].face_id}.")
found_faces = collection.search_faces(faces[0].face_id, 80, 10)
print(f"Found {len(found_faces)} matching faces.")
for face in found_faces:
pprint(face.to_dict())
input("Press Enter to continue.")
print(f"Searching for faces in the collection that match the largest face in "
f"{images[0].image_name}.")
image_face, match_faces = collection.search_faces_by_image(images[0], 80, 10)
print(f"The largest face in {images[0].image_name} is:")
pprint(image_face.to_dict())
print(f"Found {len(match_faces)} matching faces.")
for face in match_faces:
pprint(face.to_dict())
input("Press Enter to continue.")
collection.delete_collection()
print('Thanks for watching!')
print('-'*88)
if __name__ == '__main__':
usage_demo()
|
apache-2.0
| 4,713,168,682,652,689,000
| 38.12828
| 102
| 0.617167
| false
| 4.346179
| false
| false
| false
|
atarax82/lotto-project
|
project/monitor.py
|
1
|
2948
|
import os
import sys
#import time
import signal
import threading
import atexit
import queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in list(sys.modules.values()):
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if not path in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()
_lock.release()
|
gpl-3.0
| -7,386,364,848,896,357,000
| 25.097345
| 76
| 0.585142
| false
| 4.140449
| false
| false
| false
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/agents/documents.py
|
1
|
1455
|
from elasticsearch_dsl import Date, InnerDoc, Keyword, Nested, Text
from ESSArch_Core.agents.models import Agent
from ESSArch_Core.search.documents import DocumentBase
from ESSArch_Core.tags.documents import autocomplete_analyzer
class AgentNameDocument(InnerDoc):
main = Text()
part = Text()
description = Text()
start_date = Date()
end_date = Date()
@classmethod
def from_obj(cls, obj):
doc = AgentNameDocument(
main=obj.main,
part=obj.part,
description=obj.description,
start_date=obj.start_date,
end_date=obj.end_date,
)
return doc
class AgentDocument(DocumentBase):
id = Keyword()
task_id = Keyword()
names = Nested(AgentNameDocument)
start_date = Date()
end_date = Date()
@classmethod
def get_model(cls):
return Agent
@classmethod
def from_obj(cls, obj):
if obj.task is None:
task_id = None
else:
task_id = str(obj.task.pk)
doc = AgentDocument(
_id=str(obj.pk),
id=str(obj.pk),
task_id=task_id,
names=[
AgentNameDocument.from_obj(name) for name in obj.names.iterator()
],
start_date=obj.start_date,
end_date=obj.end_date,
)
return doc
class Index:
name = 'agent'
analyzers = [autocomplete_analyzer]
|
gpl-3.0
| 3,981,038,037,303,409,700
| 23.661017
| 81
| 0.57457
| false
| 3.869681
| false
| false
| false
|
MasterGowen/moonrain
|
moonrain/projects/views.py
|
1
|
3432
|
import json
from django.shortcuts import render, redirect
from django.views.generic.edit import UpdateView, DeleteView
from .models import Project
from ..videos.models import Video, VideosSequence
from ..videos.views import new_sequence, get_sequence
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.context_processors import csrf
from .forms import ProjectForm
def projects_list_all(request):
projects = []
for project in Project.objects.all():
if project.permission == 'public':
projects.append(project)
@login_required
def for_users(request, project, projects):
if project.permission == 'for_users':
projects.append(project)
for_users(request, project, projects)
@login_required
def for_staff(request, project, projects):
if project.permission == 'for_staff':
if request.user == project.author or str(request.user) in str(project.users()):
projects.append(project)
for_staff(request, project, projects)
projects = list(reversed(projects))
paginator = Paginator(projects, 10)
page = request.GET.get('page')
try:
projects = paginator.page(page)
except PageNotAnInteger:
projects = paginator.page(1)
except EmptyPage:
projects = paginator.page(paginator.num_pages)
return render(request, 'projects/index.html', {'projects': projects, 'pages': range(1, (paginator.num_pages + 1))})
def detail(request, project_id):
try:
project = Project.objects.get(id=project_id)
jsonSequence = json.loads(get_sequence(request, project))
except ObjectDoesNotExist:
raise Http404
videos_ids = jsonSequence['sequence'].split(',')
videos = []
for video_id in videos_ids:
if video_id != 'None':
video = Video.objects.get(id=video_id)
videos.append(video)
if project.permission == 'public':
return render(request, 'projects/project.html', {'project': project, 'videos': videos})
elif project.permission == 'for_users' \
and request.user:
return render(request, 'projects/project.html', {'project': project, 'videos': videos})
elif project.permission == 'for_staff' \
and request.user == project.author \
or str(request.user) \
in str(project.users()):
return render(request, 'projects/project.html', {'project': project, 'videos': videos})
else:
return HttpResponse(status=403)
def new_project(request):
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
project = form.save(commit=False)
project.author_id = request.user.id
project.save()
new_sequence(request, project.id)
return redirect(project)
args = {}
args.update(csrf(request))
args['form'] = ProjectForm()
return render(request, 'projects/new.html', args)
class ProjectDelete(DeleteView):
model = Project
fields = []
success_url = '/projects/'
class ProjectUpdate(UpdateView):
model = Project
fields = ['name', 'comments', 'tags', 'permission']
|
gpl-2.0
| 8,194,564,417,942,022,000
| 32.656863
| 119
| 0.648893
| false
| 4.180268
| false
| false
| false
|
tbetcke/PyBEM2D
|
examples/circscatt.py
|
1
|
1272
|
import pybem2d.core.bases as pcb
import pybem2d.core.segments as pcs
import pybem2d.core.quadrules as pcq
import pybem2d.core.kernels as pck
import pybem2d.core.mesh as pcm
import pybem2d.core.assembly as pca
import pybem2d.core.evaluation as pce
import pybem2d.core.visualization as pcv
import numpy as np
k=10
nelems=50
dirs=np.array([1.0,0])
# Define the mesh
circle=pcs.Arc(3,0,0,2*np.pi,1.0)
d=pcm.Domain([circle])
mesh=pcm.Mesh([d])
mesh.discretize(nelems)
quadrule=pcq.GaussQuadrature() # A standard Gauss Quadrature with default parameters
mToB=pcb.Legendre.legendreBasis(mesh,2) # A basis of Legendre polynomials of degree 2
kernel=pck.AcousticCombined(k,k) # The combined potential layer
singleLayer=pck.AcousticSingleLayer(k)
assembly=pca.Assembly(mToB,quadrule)
rhsfun=lambda t,x,n: 2j*k*np.exp(1j*k*(dirs[0]*x[0]+dirs[1]*x[1]))*(dirs[0]*n[0]+dirs[1]*n[1]-1)
rhs=assembly.projFun([rhsfun])
mKernel=assembly.getKernel(kernel)
mIdentity=assembly.getIdentity()
op=mIdentity+2*mKernel
print op.shape
coeffs=np.linalg.solve(op,rhs)
#ev=pce.Evaluator(mToB,singleLayer,quadrule)
#v=pcv.Visualizer(ev,[-3,5,-3,3],200,200,incWave=lambda x: np.exp(1j*k*(x[0]*dirs[0]+x[1]*dirs[1])))
#v.fullField(-coeffs[:,0])
x,f=pce.evalDensity(mToB,coeffs[:,0])
|
mit
| 7,395,326,172,782,290,000
| 22.127273
| 100
| 0.750786
| false
| 2.359926
| false
| true
| false
|
mjafin/bcbio-nextgen
|
bcbio/variation/bedutils.py
|
1
|
7691
|
"""Utilities for manipulating BED files.
"""
import os
import shutil
import sys
import subprocess
import toolz as tz
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import vcfutils
def get_sort_cmd():
"""Retrieve GNU coreutils sort command, using version-sort if available.
Recent versions of sort have alpha-numeric sorting, which provides
more natural sorting of chromosomes (chr1, chr2) instead of (chr1, chr10).
This also fixes versions of sort, like 8.22 in CentOS 7.1, that have broken
sorting without version sorting specified.
https://github.com/chapmanb/bcbio-nextgen/issues/624
https://github.com/chapmanb/bcbio-nextgen/issues/1017
"""
has_versionsort = subprocess.check_output("sort --help | grep version-sort; exit 0", shell=True).strip()
if has_versionsort:
return "sort -V"
else:
return "sort"
def check_bed_contigs(in_file, data):
"""Ensure BED file contigs match the reference genome.
"""
contigs = set([])
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(("#", "track", "browser")) and line.strip():
contigs.add(line.split()[0])
ref_contigs = set([x.name for x in ref.file_contigs(dd.get_ref_file(data))])
if len(contigs - ref_contigs) / float(len(contigs)) > 0.25:
raise ValueError("Contigs in BED file %s not in reference genome:\n %s\n"
% (in_file, list(contigs - ref_contigs)) +
"This is typically due to chr1 versus 1 differences in BED file and reference.")
def clean_file(in_file, data, prefix="", bedprep_dir=None):
"""Prepare a clean sorted input BED file without headers
"""
if in_file:
if not bedprep_dir:
bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep"))
out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file))).replace(".gz", "")
if not utils.file_uptodate(out_file, in_file):
check_bed_contigs(in_file, data)
with file_transaction(data, out_file) as tx_out_file:
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd()
cmd = ("{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | "
"grep -v ^# | "
"{py_cl} -x 'bcbio.variation.bedutils.remove_bad(x)' | "
"{sort_cmd} -k1,1 -k2,2n > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare cleaned BED file", data)
vcfutils.bgzip_and_index(out_file, data.get("config", {}), remove_orig=False)
return out_file
def sort_merge(in_file, data):
"""Sort and merge a BED file, collapsing gene names.
"""
out_file = "%s-sort.bed" % os.path.splitext(in_file)[0]
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd()
cmd = ("{cat_cmd} {in_file} | {sort_cmd} -k1,1 -k2,2n | "
"bedtools merge -i - -c 4 -o distinct > {tx_out_file}")
do.run(cmd.format(**locals()), "Sort BED file", data)
return out_file
def remove_bad(line):
"""Remove non-increasing BED lines which will cause variant callers to choke.
"""
parts = line.strip().split("\t")
if line.strip() and len(parts) > 2 and int(parts[2]) > int(parts[1]):
return line
else:
return None
def merge_overlaps(in_file, data, distance=None, out_dir=None):
"""Merge bed file intervals to avoid overlapping regions.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
config = data["config"]
if in_file:
bedtools = config_utils.get_program("bedtools", config,
default="bedtools")
work_dir = tz.get_in(["dirs", "work"], data)
if out_dir:
bedprep_dir = out_dir
elif work_dir:
bedprep_dir = utils.safe_makedir(os.path.join(work_dir, "bedprep"))
else:
bedprep_dir = os.path.dirname(in_file)
out_file = os.path.join(bedprep_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(in_file))[0]))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
distance = "-d %s" % distance if distance else ""
cmd = "{bedtools} merge {distance} -i {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Prepare merged BED file", data)
vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False)
return out_file
def population_variant_regions(items):
"""Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases.
"""
import pybedtools
if len(items) == 1:
return dd.get_variant_regions(items[0])
else:
paired = vcfutils.get_paired(items)
if paired:
return dd.get_variant_regions(paired.tumor_data)
else:
vrs = []
for data in items:
vr_bed = dd.get_variant_regions(data)
if vr_bed:
vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed))
vrs.sort(reverse=True)
if vrs:
return vrs[0][1]
def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
Per-merges inputs to avoid needing to call multiple times during later parallel steps.
"""
clean_vr = clean_file(utils.get_in(data, ("config", "algorithm", "variant_regions")), data)
merged_vr = merge_overlaps(clean_vr, data)
data["config"]["algorithm"]["variant_regions"] = clean_vr
data["config"]["algorithm"]["variant_regions_merged"] = merged_vr
return data
def combine(in_files, out_file, config):
"""Combine multiple BED files into a single output.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for in_file in in_files:
with open(in_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file
def intersect_two(f1, f2, work_dir, data):
"""Intersect two regions, handling cases where either file is not present.
"""
f1_exists = f1 and utils.file_exists(f1)
f2_exists = f2 and utils.file_exists(f2)
if not f1_exists and not f2_exists:
return None
elif f1_exists and not f2_exists:
return f1
elif f2_exists and not f1_exists:
return f2
else:
out_file = os.path.join(work_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(f1))[0]))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "bedtools intersect -a {f1} -b {f2} > {tx_out_file}"
do.run(cmd.format(**locals()), "Intersect BED files", data)
return out_file
|
mit
| -970,185,428,665,828,700
| 41.727778
| 115
| 0.606683
| false
| 3.415187
| true
| false
| false
|
JeroenZegers/Nabu-MSSS
|
nabu/neuralnetworks/loss_computers/ms_loss.py
|
1
|
2597
|
"""@file ms_loss.py
contains the MsLoss.
Temporary naming of file and class"""
import loss_computer
import tensorflow as tf
class MsLoss(loss_computer.LossComputer):
"""A loss computer that calculates the loss"""
def __call__(self, targets, logits, seq_length):
# target is actually only required for it's shape to derive the number of active speakers
multi_targets = targets['multi_targets']
nr_act_spk = multi_targets.get_shape()[-1]
# seq_length = seq_length['bin_est']
logits = logits['act_logit']
logits = tf.squeeze(logits, axis=-1)
nr_spk = logits.get_shape()[1]
batch_size = logits.get_shape()[0]
if self.lossconf['activation'] == 'sigmoid':
logits = tf.sigmoid(logits)
else:
raise BaseException('Other activations not yet implemented')
if len(logits.get_shape()) != 3:
raise BaseException('Hardcoded some stuff for 3 dimensions')
second_dim = logits.get_shape()[1]
seq_length = seq_length['features'] # have to do this better
max_len = tf.shape(logits)[-1]
tmp = []
for utt_ind in range(batch_size):
tmp.append(
tf.expand_dims(
tf.concat(
[tf.ones([second_dim, seq_length[utt_ind]]), tf.zeros([second_dim, max_len - seq_length[utt_ind]])], -1), 0))
# seq_length_mask[utt_ind, :seq_length[utt_ind]] = 1
seq_length_mask = tf.concat(tmp, 0)
logits = logits * seq_length_mask
if self.lossconf['av_time'] == 'True':
logits = tf.reduce_sum(logits, 2)
logits = tf.divide(logits, tf.expand_dims(tf.to_float(seq_length), -1))
targets = tf.concat([tf.ones([batch_size, nr_act_spk]), tf.zeros([batch_size, nr_spk-nr_act_spk])], -1)
loss = tf.reduce_sum(tf.square(logits - targets))
norm = tf.to_float(batch_size * nr_spk)
return loss, norm
def oldcall(self, targets, logits, seq_length):
# target is actually only required for it's shape to derive the number of active speakers
multi_targets = targets['multi_targets']
nr_act_spk = multi_targets.get_shape()[-1]
# seq_length = seq_length['bin_est']
logits = logits['act_logit']
logits = tf.squeeze(logits, axis=-1)
nr_spk = logits.get_shape()[1]
batch_size = logits.get_shape()[0]
if self.lossconf['activation'] == 'sigmoid':
logits = tf.sigmoid(logits)
else:
raise BaseException('Other activations not yet implemented')
if self.lossconf['av_time'] == 'True':
logits = tf.reduce_mean(logits, 2)
targets = tf.concat([tf.ones([batch_size, nr_act_spk]), tf.zeros([batch_size, nr_spk-nr_act_spk])], -1)
loss = tf.reduce_sum(tf.square(logits - targets))
norm = tf.to_float(batch_size * nr_spk)
return loss, norm
|
mit
| 8,943,644,888,906,579,000
| 32.727273
| 115
| 0.672699
| false
| 2.901676
| false
| false
| false
|
ikosenn/cray-cray
|
fummy.py
|
1
|
2626
|
"""
Author: ikosenn
This is a program to eliminate stale git branches.
It checks last commits and based on the staleness threshold
eliminates all stale branches
Another CL function is provided to eliminate all available branches.
You can also remove all branches that have already been merged to
the main branch
"""
import os
from datetime import datetime
import click
from sarge import capture_stdout
import pytz
from dateutil.parser import parse
DEFAULT_BRANCH = 'master'
# helper functions
def get_time_difference(time):
"""
Computes the difference with todays time
"""
timezone = "Africa/Nairobi"
branch_time = parse(time)
current_time = datetime.now(pytz.timezone(timezone))
diff_days = (current_time - branch_time)
return diff_days.days
def cwd(path):
os.chdir(path)
@click.command()
@click.option(
'--threshold', '-t',
default=10,
prompt='What number of days should the threshold be? [10 days]')
@click.option(
'branches', '--branch', '-b', default=DEFAULT_BRANCH,
prompt='What branches should be excluded? [master]', multiple=True)
@click.option(
'--path', '-p', prompt='File path to the git repo?',
type=click.Path(exists=True))
def fummy(threshold, branches, path):
cwd(path)
all_branches = capture_stdout('git branch')
# remove spaces and any blank spaces
temp = all_branches.stdout.text.replace(
'*', '').replace(' ', '').split('\n')
for branch in temp:
if branch and branch not in branches:
click.echo('Processing branch: {}'.format(branch))
p = capture_stdout(
'git show {} --format="%cI" --no-patch'.format(branch))
diff_days = get_time_difference(p.stdout.text)
if diff_days > threshold:
click.echo('Deleting {}'.format(branch))
p = capture_stdout(
'git branch -D {}'.format(branch))
click.echo(p.stdout.text)
@click.command()
@click.option('--filename', type=click.Path(exists=True))
@click.option('--default', '-d', default=DEFAULT_BRANCH)
def kill_merged(default):
"""
Start by checking out to the master branch and then finding out the
branches already merged to master and eliminating the buggage
"""
# git branch --merged master
pass
@click.group()
def cli():
"""
Command Line Interface tools loader for ``fummy``
These utilities help with deleting git branches older than the specified
period
"""
pass
cli.add_command(fummy)
if __name__ == '__main__':
cli()
|
mit
| -3,121,794,362,118,456,300
| 25
| 76
| 0.639756
| false
| 3.978788
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-monitor/azure/mgmt/monitor/operations/action_groups_operations.py
|
1
|
19086
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class ActionGroupsOperations(object):
"""ActionGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-04-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-01"
self.config = config
def create_or_update(
self, resource_group_name, action_group_name, action_group, custom_headers=None, raw=False, **operation_config):
"""Create a new action group or update an existing one.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group: The action group to create or use for the update.
:type action_group: ~azure.mgmt.monitor.models.ActionGroupResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ActionGroupResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.ActionGroupResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(action_group, 'ActionGroupResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ActionGroupResource', response)
if response.status_code == 201:
deserialized = self._deserialize('ActionGroupResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, action_group_name, custom_headers=None, raw=False, **operation_config):
"""Get an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ActionGroupResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.ActionGroupResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ActionGroupResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, action_group_name, custom_headers=None, raw=False, **operation_config):
"""Delete an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_by_subscription_id(
self, custom_headers=None, raw=False, **operation_config):
"""Get a list of all action groups in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ActionGroupResource
:rtype:
~azure.mgmt.monitor.models.ActionGroupResourcePaged[~azure.mgmt.monitor.models.ActionGroupResource]
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Get a list of all action groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ActionGroupResource
:rtype:
~azure.mgmt.monitor.models.ActionGroupResourcePaged[~azure.mgmt.monitor.models.ActionGroupResource]
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def enable_receiver(
self, resource_group_name, action_group_name, receiver_name, custom_headers=None, raw=False, **operation_config):
"""Enable a receiver in an action group. This changes the receiver's
status from Disabled to Enabled.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param receiver_name: The name of the receiver to resubscribe.
:type receiver_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
enable_request = models.EnableRequest(receiver_name=receiver_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(enable_request, 'EnableRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 409]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
mit
| -7,601,957,632,301,776,000
| 46.125926
| 152
| 0.648643
| false
| 4.537803
| true
| false
| false
|
molly/women-social-reformers-on-wikipedia
|
gather.py
|
1
|
2451
|
# Copyright (c) 2015–2016 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import requests
def load_list(filename):
"""Load the list of women from file."""
with open(filename, "r", encoding="utf-8") as f:
lines = f.readlines()
return format_names([line.strip() for line in lines])
def format_names(women):
"""Format the names for searching."""
formatted = []
for name in women:
split = name.split(",")
formatted.append(" ".join([split[1].strip(), split[0].strip()]) if len(split) == 2 else split[0].strip())
return formatted
def search(women):
"""Do the search on the list of women."""
for woman in women:
find_page(woman)
def find_page(search_term):
"""Attempt to find a matching Wikipedia article for the given woman."""
api_params = {"action": "opensearch",
"search": search_term,
"limit": 1,
"namespace": 0,
"format": "json"}
r = requests.get(api_url, params=api_params, headers=headers)
if r:
print(r.json())
else:
print(None)
def main():
women = load_list("women.txt")
search(women)
if __name__ == "__main__":
headers = {'user-agent': "women-social-reformers-on-wikipedia: https://github.com/molly/women-social-reformers-"
"on-wikipedia"}
api_url = "https://en.wikipedia.org/w/api.php"
main()
|
mit
| 6,424,813,447,681,501,000
| 36.692308
| 116
| 0.665578
| false
| 3.905901
| false
| false
| false
|
plin1112/pysimm
|
pysimm/cassandra.py
|
1
|
67253
|
# ******************************************************************************
# pysimm.cassandra module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2017 Alexander Demidov, Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from StringIO import StringIO
from subprocess import call, Popen, PIPE
import os
import re
import numpy as np
import random
import logging
import types
from collections import Iterable, OrderedDict
from pysimm import system
from string import ascii_uppercase
from pydoc import locate
DATA_PATH = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../dat/csndra_data'))
KCALMOL_2_K = 503.22271716452
CASSANDRA_EXEC = os.environ.get('CASSANDRA_EXEC')
# Creating a logger instance and send its output to console 'deafault'
logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S',
format='%(asctime)s [%(levelname)s]: %(message)s')
DEFAULT_PARAMS = {
'Temperature_Info': 300,
'Pressure_Info': 1,
'Rcutoff_Low': 0.1
}
class MCSimulation(object):
"""pysimm.cassandra.MCSimulation
Object containing the settings and the logic necessary to partially set-up an abstract Monte Carlo simulation
to be submitted to the CASSANDRA software. The object also will include the simulation results once the simulations
are finished.
Attributes:
mc_sst (:class:`~pysimm.cassandra.McSystem`) : describes all molecules to be inserted by CASSANDRA
init_sst (:class:`~pysimm.system.System`) : describes the optional initial fixed molecular configuration for MC
simulations (default: empty cubic box with 1 nm side length). If the particles in the system are not
attributed with the flag `is_fixed` all of them are considered to be fixed, and will be marked with this
flag, otherwise all particles with is_fixed=False will be removed.
Keyword Args:
out_folder (str) : the relative path of the simulation results (all .dat, .mcf, as well as .chk, ... files will
go there). If the folder does not exist it will be created with 0755 permissions.
props_file (str) : the name of the .inp file.
Note:
Other keyword arguments that are accepted are the GCMC simulation settings. The keywords of the settings
are the same as they are described in CASSANDRA specification but without # symbol.
**For example**: the keyword argument `Run_Name='my_simulation'` will set `#Run_Name` setting in CASSANDRA
input file to `my_simulation` value
Parameters:
props (dictionary) : include all simulation settings to be written to the CASSANDRA .inp file
input (str) : text stream that will be written to the CASSANDRA .inp file
tot_sst (:class:`~pysimm.system.System`) : object containing the results of CASSANDRA simulations
"""
def __init__(self, mc_sst=None, init_sst=None, **kwargs):
global DATA_PATH
# Initializing CASSANDRA input stream, empty at the beginning
self.input = ''
# Initializing dictionary that contains records that directly will be sent to the .inp file
self.props = OrderedDict()
self.logger = logging.getLogger('MC Simulation')
# Reading default properties of the GCMC simulations
def_dat = Cassandra(system.System()).read_input(os.path.join(DATA_PATH, 'mc_default.inp'))
tmp = kwargs.get('out_folder') # Folder for the results and temporary files
if tmp:
self.out_folder = tmp
if os.path.isabs(tmp):
self.out_folder = os.path.relpath(tmp)
else:
self.out_folder = os.getcwd()
if not os.path.exists(self.out_folder):
os.makedirs(self.out_folder, mode=0755)
prefix = kwargs.get('Run_Name', def_dat['Run_Name'])
self.props['Run_Name'] = InpSpec('Run_Name', os.path.join(self.out_folder, prefix), '')
self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', ''))
# Simple (one-value) dynamic properties
self.props['Temperature_Info'] = InpSpec('Temperature_Info',
kwargs.get('Temperature_Info'), DEFAULT_PARAMS['Temperature_Info'])
self.props['Pair_Energy'] = InpSpec('Pair_Energy', kwargs.get('Pair_Energy'), def_dat['Pair_Energy'])
self.props['Rcutoff_Low'] = InpSpec('Rcutoff_Low', kwargs.get('Rcutoff_Low'), def_dat['Rcutoff_Low'])
self.props['Mixing_Rule'] = InpSpec('Mixing_Rule', kwargs.get('Mixing_Rule'), def_dat['Mixing_Rule'])
self.props['Seed_Info'] = InpSpec('Seed_Info', kwargs.get('Seed_Info'),
[random.randint(int(1e+7), int(1e+8 - 1)),
random.randint(int(1e+7), int(1e+8 - 1))])
# Multiple-value one/many line dynamic properties
self.props['Run_Type'] = InpSpec('Run_Type', kwargs.get('Run_Type'), def_dat['Run_Type'])
self.props['Charge_Style'] = InpSpec('Charge_Style', kwargs.get('Charge_Style'), def_dat['Charge_Style'])
self.props['VDW_Style'] = InpSpec('VDW_Style', kwargs.get('VDW_Style'), def_dat['VDW_Style'])
self.props['Simulation_Length_Info'] = InpSpec('Simulation_Length_Info', kwargs.get('Simulation_Length_Info'),
def_dat['Simulation_Length_Info'],
**{'write_headers': True, 'new_line': True})
self.props['CBMC_Info'] = InpSpec('CBMC_Info', kwargs.get('CBMC_Info'), def_dat['CBMC_Info'],
**{'write_headers': True, 'new_line': True})
self.props['Box_Info'] = InpSpec('Box_Info', kwargs.get('Box_Info'), def_dat['Box_Info'], **{'new_line': True})
self.props['Property_Info 1'] = InpSpec('Property_Info 1', kwargs.get('Property_Info'), None, **{'new_line': True})
# Setting the simulation total system
if init_sst:
self.tot_sst = init_sst.copy()
self.tot_sst.center('box', [0, 0, 0], True) # the center of the calculation box should be at origin
else:
self.logger.warning('The frame generating system for Monte-Carlo simulations is not set. '
'Creating empty cubic box of 1 nm size')
self.tot_sst = system.System()
self.tot_sst.forcefield = 'trappe/amber'
self.tot_sst.dim = system.Dimension(dx=10, dy=10, dz=10)
# Molecule configuration files describing all species of the system.
# They are **absolutely** needed to start calculation
mol_files = OrderedDict()
# Some necessary verification of obtained system
# TODO: check the forcefield to be sure that it is claas 1
if False:
self.logger.error('CASSANDRA supports only 1-st class force fields')
exit(1)
self.tot_sst.zero_charge() # the sum of the charges should necessary be 0
# Creating the system of fixed molecules
self.fxd_sst_mcfile = None
self.fxd_sst = kwargs.get('fixed_sst')
if self.tot_sst.particles:
tmp = self.tot_sst.copy()
for p in tmp.particles:
if not p.is_fixed:
tmp.particles.remove(p.tag)
tmp.remove_spare_bonding()
self.fxd_sst = tmp
self.fxd_sst_mcfile = os.path.join(self.out_folder, 'fixed_syst.mcf')
mol_files['file1'] = [self.fxd_sst_mcfile, 1]
# Setting up the Monte Carlo system
self.mc_sst = mc_sst
if mc_sst:
mc_sst.file_store = self.out_folder
mol_files = mc_sst.update_props(mol_files)
if kwargs.get('Molecule_Files'):
mol_files = OrderedDict(sorted(kwargs.get('Molecule_Files').items()))
# Raising an error and stop execution if no MCF information in one or another way is provided
if (mc_sst is None) and (not kwargs.get('Molecule_Files')):
self.logger.error('The molecular configuration files of gas molecules for simulation are not set. '
'Nothing to simulate. Exiting...')
exit(0)
self._n_spec = len(mol_files)
self.props['Nbr_Species'] = InpSpec('Nbr_Species', self._n_spec, self._n_spec)
self.props['Molecule_Files'] = InpSpec('Molecule_Files', mol_files, None, **{'new_line': True})
# Synchronzing "start type" .inp record
self.fxd_sst_xyz = ''
pops_list = [0] * self._n_spec
start_type = 'make_config'
if self.fxd_sst:
pops_list[0] = 1
self.fxd_sst_xyz = os.path.join(self.out_folder, 'fixed_syst.xyz')
start_type = 'read_config'
start_conf_dict = OrderedDict([('start_type', start_type), ('species', pops_list),
('file_name', self.fxd_sst_xyz)])
self.props['Start_Type'] = InpSpec('Start_Type', kwargs.get('Start_Type'), start_conf_dict)
# Synchronzing Fragment files:
frag_files = OrderedDict()
if mc_sst:
mc_sst.temperature = self.props['Temperature_Info'].value
frag_files = mc_sst.update_frag_record(frag_files)
if kwargs.get('Fragment_Files'):
frag_files = OrderedDict(sorted(kwargs.get('Fragment_Files').items()))
if (mc_sst is None) and (not kwargs.get('Fragment_Files')):
self.logger.error('Cannot set the fragment files of gas molecules for simulation')
exit(1)
self.props['Fragment_Files'] = InpSpec('Fragment_Files', frag_files, None, **{'new_line': True})
def write(self):
"""pysimm.cassandra.MCSimulation.write
Iterates through the :class:`~MCSimulation.props` dictionary creating the text for correct CASSANDRA input
"""
for key in self.props.keys():
if self.props[key].value is not None:
self.input += '{:}\n'.format(self.props[key].to_string())
self.input += '\nEND'
# Initializing output stream
self.logger.info('Writing CASSANDRA .inp file to "{:}"...'.format(self.props_file))
out_stream = open(self.props_file, 'w')
out_stream.write('{:}'.format(self.input))
out_stream.close()
self.logger.info('File: "{:}" was created sucsessfully'.format(self.props_file))
def group_by_id(self, group_key='matrix'):
"""pysimm.cassandra.MCSimulation.group_by_id
Method groups the atoms of the system :class:`~MCSimulation.tot_sst` by a certain property. Will iterate through
all atoms in the system and return indexes of only those atoms that match the property. Currently supports 3
properties defined by the input keyword argument argument.
Keyword Args:
group_key (str): text constant defines the property to match. Possible keywords are:
(1) `matrix` -- (default) indexes of the atoms in :obj:`~MCSimulation.fxd_sst`
(2) `rigid` -- indexes of all atoms that have rigid atomic bonds. It is assumed here that rigid and
nonrigid atoms can interact only through intermolecular forces
(3) `nonrigid` -- opposite of previous, indexes of all atoms that have nonrigid atomic bonds
Returns:
str:
string in format `a1:b1 a2:b2 ...` where all indexes inside `[ak, bk]` belongs to the selected group
and array of the form `[[a1, b1], [a2, b2], ...]`
"""
fxd_sst_idxs = []
if self.fxd_sst:
fxd_sst_idxs = range(1, len(self.fxd_sst.particles) + 1)
# Behaviour depending on type of particles to check
check = lambda x: x
if group_key.lower() == 'nonrigid':
check = lambda x: not x.is_rigid
elif group_key.lower() == 'rigid':
check = lambda x: x.is_rigid
elif group_key.lower() == 'matrix':
check = lambda x: x.tag in fxd_sst_idxs
idx_array = [[-1, -1]]
for p in self.tot_sst.particles:
if check(p):
if idx_array[-1][0] > 0:
if abs(p.tag - idx_array[-1][1]) > 1:
idx_array.append([p.tag, p.tag])
else:
idx_array[-1][1] = p.tag
else:
idx_array[-1] = [p.tag, p.tag]
idx_string = ''
for t in idx_array:
if t[1] - t[0] > 1:
idx_string += str(t[0]) + ':' + str(t[1]) + ' '
return idx_string, idx_array
def upd_simulation(self):
"""pysimm.cassandra.MCSimulation.upd_simulation
Updates the :class:`~MCSimulation.tot_sst` field using the `MCSimulation.props['Run_Name'].chk` file. Will try
to parse the checkpoint file and read the coordinates of the molecules inserted by CASSANDRA. If neither of the
molecules from the :class:`~MCSimulation.mc_sst` can be fit to the text that was read the method will raise an
exception. The fitting method: :class:`~McSystem.make_system` assumes that different molecules inserted by
CASSANDRA have the same order of the atoms.
"""
fname = '{:}{:}'.format(self.props['Run_Name'].value, '.chk')
self.logger.info('Updating MC system from the CASSANDRA {:} file...'.format(fname))
if os.path.isfile(fname):
try:
with open(fname, 'r') as inp:
lines = inp.read()
# Define the starting index of the lines with inserted atoms
start_ind = lines.find('total number of molecules')
end_ind = start_ind + lines[start_ind:-1].find('****', 1)
count_info = lines[start_ind:end_ind].split('\n')
offset = 1
if self.fxd_sst:
tmp = count_info[1].split()
offset += int(tmp[1]) * len(self.fxd_sst.particles)
# Grab the lines with inserted atoms
start_ind = lines.find('coordinates for all the boxes')
all_coord_lines = lines[start_ind:-1].split('\n')
inp.close()
gas_lines = all_coord_lines[offset:]
if len(gas_lines) > 0:
if self.fxd_sst:
self.tot_sst = self.fxd_sst.copy()
self.tot_sst.add(self.mc_sst.make_system(gas_lines), change_dim=False)
self.logger.info('Simulation system successfully updated')
else:
self.logger.info('Final MC configuration has 0 new particles the initial system remains the same')
except IndexError:
self.logger.error('Cannot fit the molecules from the CASSANDRA file to the PySIMM system')
else:
self.logger.error('Cannot find the CASSANDRA checkpoint file to update simulation. '
'Probably it cannot be written by CASSANDRA to the place you specified')
def __check_params__(self):
"""pysimm.cassandra.MCSimulation.__check_params__
Private method designed for update the fields of the simulation object to make them conformed with each other
"""
# Sync the simulation box parameters
dx, dy, dz = self.tot_sst.dim.size()
if (dx == dy) and (dy == dz):
box_type = 'cubic'
box_dims = str(dx)
else:
box_type = 'orthogonal'
box_dims = '{0:} {1:} {2:}'.format(dx, dy, dz)
upd_vals = OrderedDict([('box_count', 1),
('box_type', box_type),
('box_size', box_dims)])
if ('Box_Info' in self.props.keys()) and isinstance(self.props['Box_Info'], InpSpec):
self.props['Box_Info'] = InpSpec('Box_Info', upd_vals, None, **{'new_line': True})
else:
self.props['Box_Info'] = upd_vals
tmp = self.props['Box_Info'].value['box_size'].split()
if self.props['Box_Info'].value['box_type'] == 'cubic':
tmp = tmp + tmp + tmp
self.tot_sst.dim = system.Dimension(dx=float(tmp[0]), dy=float(tmp[1]), dz=float(tmp[2]))
# Sync of the volume change frequency in equilibration regime
if 'Prob_Volume' in self.props.keys():
if self.props['Prob_Volume'] is None:
self.props['Run_Type'].value['steps'] = self.props['Run_Type'].value['steps'][0]
def __write_chk__(self, out_file):
"""pysimm.cassandra.MCSimulation.__write_chk__
Creates the CASSANDRA checkpoint file basing on the information from the `~MCSimulation.tot_sst` field
"""
# Initializing output stream
if out_file == 'string':
out_stream = StringIO()
else:
out_stream = open(out_file, 'w+')
blk_separ = ' {:*^75}\n'
# Writing Translation/rotation/... info
out_stream.write(blk_separ.format('Translation,rotation, dihedral, angle distortion'))
tmplate = '{t[0]$$}{t[1]$$}{t[2]$$}{t[3]$$}{t[4]$$}\n'
molecules = self.props['Molecule_Files'].value
for m, i in zip(molecules, range(len(molecules))):
out_stream.write(tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0]))
out_stream.write(tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0]))
out_stream.write('{t[0]:>23.14E}{t[2]:>23.14E}{t[2]:>23.14E}\n'.format(t=[0, 0, 0]))
out_stream.write('{0:>12d}{0:>12d}\n'.format(0, 0))
# Small section with total # of MC trials -- it is 0 at the beginning
out_stream.write(blk_separ.format('# of MC steps'))
out_stream.write('{:>12d}\n'.format(0))
# Writing Box-info information
out_stream.write(blk_separ.format('Box info'))
tmp = self.props['Box_Info'].value['box_size']
x, y, z = 0, 0, 0
bx_type = None
if isinstance(tmp, types.ListType):
if len(tmp) > 3:
x, y, z = tmp[0], tmp[1], tmp[2]
elif isinstance(tmp, int) or isinstance(tmp, float):
x, y, z = tmp, tmp, tmp
else:
exit(0)
# First 0 here correspond to the # of trials
out_stream.write('{0:>12d}\n{1:<18.10f}\n{2:}\n'.format(0, x * y * z, self.props['Box_Info'].value['box_type']))
tmpl = '{t[0]&&}{t[1]&&}{t[2]&&}\n'
tmp = np.diag([x, y, z])
for lines in tmp:
out_stream.write((tmpl.replace('&&', ':^22.14f')).format(t=lines))
tmp = np.diag([1 / x, 1 / y, 1 / z])
for lines in tmp:
out_stream.write((tmpl.replace('&&', ':^22.8f')).format(t=lines))
out_stream.write('{:>18.12f}\n'.format(0))
# Creating seeds
out_stream.write(blk_separ.format('SEEDS'))
out_stream.write('{t[0]:>12d}{t[1]:>12d}{t[2]:>12d}\n{t[3]:>12d}{t[4]:>12d}\n'.format(
t=np.random.random_integers(int(1e+7), int(1e+8 - 1), 5)))
# Writing total number of molecules by species
out_stream.write(blk_separ.format('Info for total number of molecules'))
out_stream.write('{0:>11d}{1:>11d}\n'.format(1, 1)) # Currentely only one polymer "molecule" in the simulation
for i in range(1, len(molecules)):
out_stream.write('{0:>11d}{1:>11d}\n'.format(i + 1, 0))
out_stream.write(blk_separ.format('Writing coordinates of all boxes'))
# Writing coordinates of atoms in all boxes
line_template = '{l[0]:<5}{l[1]:<25.15f}{l[2]:<25.15f}{l[3]:<25.15f}{l[4]:>10d}\n'
for parts in self.tot_sst.particles:
try:
out_stream.write(line_template.format(l=[parts.type.name, parts.x, parts.y, parts.z, 1]))
except:
continue
out_stream.close()
class GCMC(MCSimulation):
"""pysimm.cassandra.GCMC
Initiates the specific type of Monte Carlo simulations for CASSANDRA: simulations using Grand-Canonical ensemble of
particles (constant volume-temperature-chemical potential, muVT). See :class:`~pysimm.cassandra.MCSimulation`
for the detailed description of the properties.
"""
def __init__(self, mc_sst=None, init_sst=None, **kwargs):
MCSimulation.__init__(self, mc_sst, init_sst, **kwargs)
self.logger.name = 'GCMC'
self.props['Sim_Type'] = InpSpec('Sim_Type', 'GCMC', 'gcmc')
# Path for all intermediate Cassandra files and results
self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', 'gcmc_input.inp'))
add = 0
if self.fxd_sst and self.fxd_sst.particles.count:
add = 1
self.props['Chemical_Potential_Info'] = InpSpec('Chemical_Potential_Info', kwargs.get('chem_pot'),
-30 * (self._n_spec - add))
# Order of the next four items is IMPORTANT! Check the CASSANDRA spec file for further info
def_init_prob = 0.25
limits = [0.3] * self._n_spec
if self.fxd_sst:
limits[0] = 0
self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'),
OrderedDict([('tot_prob', def_init_prob),
('limit_vals', limits)]),
**{'new_line': True, 'indicator': 'start'})
tps = ['cbmc'] * self._n_spec
if self.fxd_sst:
tps[0] = 'none'
self.props['Prob_Insertion'] = InpProbSpec('Prob_Insertion', kwargs.get('Prob_Insertion'),
OrderedDict([('tot_prob', def_init_prob), ('types', tps)]),
**{'new_line': True})
self.props['Prob_Deletion'] = InpProbSpec('Prob_Deletion', kwargs.get('Prob_Deletion'), def_init_prob)
max_ang = [180] * self._n_spec
if self.fxd_sst:
max_ang[0] = 0
self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'),
OrderedDict([('tot_prob', def_init_prob), ('limit_vals', max_ang)]),
**{'new_line': True, 'indicator': 'end'})
class NVT(MCSimulation):
"""pysimm.cassandra.NVT
Initiates the specific type of Monte Carlo simulations for CASSANDRA: simulations using Canonical ensemble of
particles (constant volume-temperature-number of particles, NVT). See :class:`~pysimm.cassandra.MCSimulation`
for the detailed description of the properties.
"""
def __init__(self, mc_sst=None, init_sst=None, **kwargs):
MCSimulation.__init__(self, mc_sst, init_sst, **kwargs)
self.logger.name = 'NVT'
self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', 'nvt-mc_input.inp'))
self.props['Sim_Type'] = InpSpec('Sim_Type', 'nvt_mc', 'nvt_mc')
move_probs = [1, 1, 1]
limits = [0.3] * self._n_spec
if self.fxd_sst:
limits[0] = 0
self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'),
OrderedDict([('tot_prob', move_probs[0]),
('limit_vals', limits)]),
**{'new_line': True, 'indicator': 'start'})
sub_probs = [1] * self._n_spec
if self.fxd_sst:
sub_probs[0] = 0
sm = sum(sub_probs)
sub_probs = [s / sm for s in sub_probs]
self.props['Prob_Regrowth'] = InpProbSpec('Prob_Regrowth', kwargs.get('Prob_Regrowth'),
OrderedDict([('tot_prob', move_probs[1]), ('sub_probs', sub_probs)]),
**{'new_line': True})
max_ang = [180] * self._n_spec
if self.fxd_sst:
max_ang[0] = 0
self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'),
OrderedDict([('tot_prob', move_probs[2]), ('limit_vals', max_ang)]),
**{'new_line': True, 'indicator': 'end'})
class NPT(MCSimulation):
"""pysimm.cassandra.NPT
Initiates the specific type of Monte Carlo simulations for CASSANDRA: simulations using Isobaric-Isothermal ensemble
of particles (NPT). See :class:`~pysimm.cassandra.MCSimulation` for the detailed description of the properties.
"""
def __init__(self, mc_sst=None, init_sst=None, **kwargs):
MCSimulation.__init__(self, mc_sst, init_sst, **kwargs)
# Initialising object attributes
self.logger.name = 'NPT'
self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', 'npt-mc_input.inp'))
# Initialising simulation-specific props attribute
self.props['Sim_Type'] = InpSpec('Sim_Type', 'npt_mc', 'npt_mc')
self.props['Pressure_Info'] = InpSpec('Pressure_Info',
kwargs.get('Pressure_Info'), DEFAULT_PARAMS['Pressure_Info'])
move_probs = [.34, .02, .32, .32]
limits = [0.3] * self._n_spec
if self.fxd_sst:
limits[0] = 0
self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'),
OrderedDict([('tot_prob', move_probs[0]),
('limit_vals', limits)]),
**{'new_line': True, 'indicator': 'start'})
vol_margins = 0.1 * self.props['Box_Info'].value['box_size']
self.props['Prob_Volume'] = InpProbSpec('Prob_Volume', kwargs.get('Prob_Volume'),
OrderedDict([('tot_prob', move_probs[1]), ('types', vol_margins)]),
**{'new_line': True})
sub_probs = [1] * self._n_spec
if self.fxd_sst:
sub_probs[0] = 0
sm = sum(sub_probs)
sub_probs = [s / sm for s in sub_probs]
self.props['Prob_Regrowth'] = InpProbSpec('Prob_Regrowth', kwargs.get('Prob_Regrowth'),
OrderedDict([('tot_prob', move_probs[2]), ('sub_probs', sub_probs)]),
**{'new_line': True})
max_ang = [180] * self._n_spec
if self.fxd_sst:
max_ang[0] = 0
self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'),
OrderedDict([('tot_prob', move_probs[3]), ('limit_vals', max_ang)]),
**{'new_line': True, 'indicator': 'end'})
class InpSpec(object):
"""pysimm.cassandra.InpSpec
Represents the most common object used for carrying one logical unit of the CASSANDRA simulation options
Parameters:
key (str) : the keyword of the simulation option (literally the string that goes after the # sign in
CASSANDRA .inp file)
value (object) : numerical or text values of the particular simulation option structured in a certain way.
Here goes only the values that are wished to be changed (it might be just one field of a big dictionary)
default (object) : the most complete default description of the simulation option
Keyword Args:
write_headers (boolean): if the :obj:`~value` is dictionary defines whether the dictionary keys should be
written to the output
new_line (boolean): if the :obj:`~value` is iterable defines whether each new element will be written to
the new line
"""
def __init__(self, key, value, default, **kwargs):
self.key = key
self.write_headers = kwargs.get('write_headers')
self.is_new_line = kwargs.get('new_line')
self.value = value
if value:
if isinstance(default, types.DictType):
# Add from default structure all properties that were not defined by user
for ky in value.keys():
default[ky] = value[ky]
self.value = default
else:
self.value = value
elif value == []:
self.value = []
else:
# If nothing was passed write default
self.value = default
def to_string(self):
"""pysimm.cassandra.InpSpec.to_string
Creates the proper text representation of the property stored in the :obj:`~value` field
Returns:
str:
formatted text string
"""
if self.value is not None:
result = '# {:}\n'.format(self.key)
# Strings
if isinstance(self.value, types.StringTypes):
result += str(self.value)
# Dictionaries
elif isinstance(self.value, types.DictType):
for ks in list(self.value.keys()):
if self.write_headers:
result += ks + ' '
tmp = self.value[ks]
if (isinstance(tmp, Iterable)) & (not isinstance(tmp, types.StringTypes)):
result += ' '.join(str(p) for p in tmp)
else:
result += str(tmp)
if self.is_new_line:
result += '\n'
else:
result += ' '
result = result[:-1] # Remove the very last new line character
# Lists
elif isinstance(self.value, Iterable):
for elem in self.value:
if isinstance(elem, Iterable):
subresult = ''
for subelem in elem:
subresult = subresult + str(subelem) + ' '
else:
subresult = str(elem) + ' '
result += subresult
# Simple types
else:
result += str(self.value)
result += '\n!{:^^20}\n'.format('')
return result
class InpProbSpec(InpSpec):
"""pysimm.cassandra.InpSpec
Extension of the :class:`~InpSpec` class that takes into account special representation of the movement
probabilities in the CASSANDRA input file.
"""
def __init__(self, key, value, default, **kwargs):
super(InpProbSpec, self).__init__(key, value, default, **kwargs)
def to_string(self):
tmp = super(InpProbSpec, self).to_string()
if self.key == 'Prob_Translation':
tmp = '# Move_Probability_Info\n\n' + tmp
elif self.key == 'Prob_Rotation':
tmp += '\n# Done_Probability_Info\n'
return tmp
class McSystem(object):
"""pysimm.cassandra.McSystem
Wrapper around the list of :class:`~pysimm.system.System` objects. Each element in the list represents single
molecule of a different specie that will be used during MC simulations. Additionally, the object is responsible for
creating .dat and .mcf files needed for the simulation and reading back the CASSANDRA simulation results.
Attributes:
sst (list of :class:`~pysimm.system.System`) : items representing single molecules of different species to be
inserted by CASSANDRA. If the sst is a list (not a single value) it is assumed that all of the following
properties are synchronized with it by indexes.
chem_pot (list of int) : chemical potential for each specie [Joule/mol]
Keyword Args:
max_ins (list of int) : defines the highest possible number of molecules of corresponding specie.
Basing on these values CASSANDRA allocates memory for simulations. (default: 5000).
is_rigid (list of boolean): defines whether the atoms in the particular molecule should be marked as rigid
or not. **Important!** In current implementation the module doesn't support flexible molecule angles, so
the `is_rigid=False` is designed to be used exclusively for **single bead** molecules.
Parameters:
made_ins (list of int) : number of particles of each specie inserted by CASSANDRA.
mcf_file (list of str) : defines full relative names of molecule configuration files **(.mcf)** required by
CASSANDRA. Files will be created automatically.
frag_file (list of str) : defines full relative names of possible relative configuration files **(.dat)**
required by CASSANDRA. Files will be created automatically.
"""
def __init__(self, sst, **kwargs):
self.logger = logging.getLogger('MC_SYSTEM')
self.sst = make_iterable(sst)
for sst in self.sst:
# Checking that the force-field of the input system is of the class-1 as it is direct CASSANDRA restriction
if isinstance(sst, system.System):
sst.zero_charge()
sst.add_particle_bonding()
if sst.ff_class:
if not (sst.ff_class == '1'):
self.logger.error('Currently cassandra supports only with **Type-I** force fields. '
'The PYSIMM systems you provided are of the different types'
'Exiting...')
exit(1)
else:
self.logger.info('The Force-Field type of the system is not defined. '
'Assuming it is **Type-1** force field')
sst.ff_class = '1'
if not all([pt.name for pt in sst.particle_types]):
self.logger.error('The name of at least one particle type in MC system is not defined. '
'Will not be able to map particles back after the CASSANDRA simulations. '
'\nPlease, setup the names for all particle types for your MC system')
exit(1)
# Decorating the system with bonds_fixed flag and angle_fixed flag
for bt in sst.bond_types:
bt.is_fixed = True
for at in sst.angle_types:
if at.k > 70:
at.is_fixed = True
self.file_store = os.getcwd()
self.max_ins = make_iterable(kwargs.get('max_ins', 5000))
self.is_rigid = make_iterable(kwargs.get('is_rigid', [True] * len(self.sst)))
self.made_ins = [0] * len(self.sst)
self.mcf_file = []
self.frag_file = []
self.temperature = None
def update_props(self, props):
"""pysimm.cassandra.McSystem.update_props
For each specie in the system creates the .mcf file required for CASSANDRA simulation.
Args:
props (dictionary) : contains the .mcf file names and maximally allowed number of molecules insertions.
The dictionary is to be assigned to 'Molecule_Files' property of the MC simulation
Returns:
props: updated input dictionary
"""
# Generate correct .mcf files
al_ind = 0
for (sstm, count) in zip(self.sst, range(len(self.sst))):
fullfile = os.path.join(self.file_store, '{:}{:}{:}'.format('particle', str(count + 1), '.mcf'))
for p_type in sstm.particle_types:
if p_type.elem and (not p_type.real_elem):
p_type.real_elem = p_type.elem
p_type.elem = ascii_uppercase[int(al_ind / 10)] + str(al_ind % 10)
al_ind += 1
McfWriter(sstm, fullfile).write()
self.mcf_file.append(fullfile)
# Make the files list to be returned
offset = len(props)
for (mcf, ins, count) in zip(self.mcf_file, self.max_ins, range(1 + offset, len(self.mcf_file) + 1 + offset)):
props['file' + str(count)] = [mcf, ins]
return props
def update_frag_record(self, frag_record):
"""pysimm.cassandra.McSystem.update_frag_record
For each specie in the system creates the single configuration .dat file required for CASSANDRA simulation.
Args:
frag_record: dictionary containing the .dat file names and their ids. The dictionary is to be assigned to
'Molecule_Files' property of the MC simulation
Returns:
dictionary:
updated dictionary
"""
# Generating the structure files
if self.temperature is None:
self.temperature = 300
for (sstm, count) in zip(self.sst, range(len(self.sst))):
fullfile = os.path.join(self.file_store, '{:}{:}{:}'.format('particle', str(count + 1), '.dat'))
with open(fullfile, 'w') as out:
frag_count = 1
out.write('{:>12d}\n'.format(frag_count))
out.write('{:>21f}{:>21f}\n'.format(self.temperature, 0))
tmplte = '{:<10}{:<24f}{:<24f}{:<24f}\n'
for prt in sstm.particles:
out.write(tmplte.format(prt.type.elem, prt.x, prt.y, prt.z))
self.frag_file.append(fullfile)
# Generating the files list
for (frags, count) in zip(self.frag_file, range(1, len(self.frag_file) + 1)):
frag_record['file' + str(count)] = [frags, count]
return frag_record
def make_system(self, text_output):
"""pysimm.cassandra.McSystem.make_system
Parses the checkpoint (.chk) file made by CASSANDRA and creates new molecules basing on the new coordinates
information. Assumes that all atoms of a certain molecule are listed in .chk file together (molecule
identifiers are not mixed).
Note:
The logic of comparison of the xyz-like text record from the .chk file with the
:class:`~pysimm.system.System` object is most straightforward: It is the consecutive comparison of particle
names and first letters (before the white space) in the text record. In this implementation order matters!
For example, for CO2, if in the system atoms are ordered as C-O-O and in the text they are ordered as
O-C-O fit will fail.
Args:
text_output (str): text stream from the CASSANDRA .chk file containing the coordinates of newly inserted
molecules
Returns:
:class:`~pysimm.system.System` : object containing all newly inserted molecules
"""
tmp_sst = None
count = 0 # counter of the lines in the input file
sys_idx = 0 # counter of the gas molecules to lookup
while count < len(text_output):
tmp = self.sst[sys_idx].copy()
dictn = text_output[count:(len(tmp.particles) + count)]
if self.__fit_atoms__(tmp, dictn):
for p in tmp.particles:
vals = dictn[p.tag - 1].split()
# Read the coordinates from the text output of the CASSANDRA simulation
p.x, p.y, p.z = map(float, vals[1:4])
# Force velocities of the particles to be 0
p.vx, p.vy, p.vz = 0.0, 0.0, 0.0
p.molecule.syst_tag = 0
if self.is_rigid[sys_idx]:
for p in tmp.particles:
p.is_rigid = True
if tmp_sst:
tmp_sst.add(tmp)
else:
tmp_sst = tmp.copy()
self.made_ins[sys_idx] += 1
count += len(tmp.particles)
sys_idx = 0
else:
sys_idx += 1
if sys_idx >= len(self.sst):
self.logger.error('Wasn\'t able to read CASSANDRA .chk file. '
'Please check either MC-simulation provided to PySIMM or the CASSANDRA '
'checkpoint file ')
exit(1)
if tmp_sst:
tmp_sst.update_tags()
tmp_sst.objectify()
return tmp_sst
def __fit_atoms__(self, molec, text_lines):
"""pysimm.cassandra.McSystem.__fit_atoms__
Implements simple logic of comparison of the xyz-like text record with the :class:`~pysimm.system.System`
object. The comparison is based on the consecutive comparison of particle names and first letters (before the
white space) in the text. In this implementation order matters! E.g. for CO2, if in the system atoms are
ordered as C-O-O and in the text they are ordered like O-C-O fit will return False.
Returns:
boolean: flag whether the text record fit the molecule or not
"""
flag = True
# Cannot map anything if number of molecules is different from number of data lines
if len(molec.particles) != len(text_lines):
return False
# Check the sequence of element names they
for p in molec.particles:
vals = text_lines[p.tag - 1].split()
if vals[0] != p.type.elem:
return False
return flag
class Cassandra(object):
"""pysimm.cassandra.Cassandra
Organizational object for running CASSANDRA simulation tasks. In current implementation it is able to run Canonical,
Grand Canonical, and Isothermal-Isobaric Monte Carlo simulations (:class:`~GCMC`, :class:`~NVT`, and :class:`~NPT`,
correspondingly).
Parameters:
system (:class:`~pysimm.system.System`) : molecular updated during the simulations
run_queue (list) : the list of scheduled tasks
"""
def __init__(self, init_sst):
self.logger = logging.getLogger('CSNDRA')
# Assume all particles in initial system are fixed
self.system = init_sst
if init_sst.particles:
for p in init_sst.particles:
p.is_fixed = True
self.run_queue = []
def run(self):
"""pysimm.cassandra.Cassandra.run
Method that triggers the simulations. Does two consecutive steps: **(1)** tries to write all files necessary
for simulation (.dat, .inp, .mcf): **(2)** tries to invoke the CASSANDRA executable.
"""
global CASSANDRA_EXEC
if check_cs_exec():
for task in self.run_queue:
# Write .inp file
task.write()
# Write .xyz of the fixed system if provided
if task.fxd_sst:
if task.fxd_sst_mcfile is not None:
McfWriter(task.fxd_sst, task.fxd_sst_mcfile).write('atoms')
task.fxd_sst.write_xyz(task.fxd_sst_xyz)
try:
self.logger.info('Starting the GCMC simulations with CASSANDRA')
print('{:.^60}'.format(''))
p = Popen([CASSANDRA_EXEC, task.props_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stout, sterr = p.communicate()
print(stout)
print(sterr)
task.upd_simulation()
self.system = task.tot_sst.copy()
except OSError as ose:
self.logger.error('There was a problem calling CASSANDRA executable')
exit(1)
except IOError as ioe:
if check_cs_exec():
self.logger.error('There was a problem running CASSANDRA. '
'The process started but did not finish')
exit(1)
else:
self.logger.error('There was a problem running CASSANDRA: seems it is not configured properly.\n'
'Please, be sure the CSNDRA_EXEC environment variable is set to the correct '
'CASSANDRA executable path. The current path is set to:\n\n{}\n\n'.format(CASSANDRA_EXEC))
exit(1)
def add_simulation(self, ens_type, obj=None, **kwargs):
"""pysimm.cassandra.Cassandra.add_simulation
Method for adding new Monte Carlo simulation to the run queue.
Args:
ens_type: Type of the molecular ensemble for the Monte-Carlo simulations. The supported options are: `GCMC`
(Grand Canonical); `NVT` (canonical); `NPT` (isobaric-isothermal)
obj: the entity that should be added. Will be ignored if it is not of a type :class:`~MCSimulation`
Keyword Args:
is_new (boolean) : defines whether all previous simulations should be erased or not
species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to
:class:`~McSystem` constructor.
Note:
Other keyword arguments of this method will be redirected to the :class:`~McSystem` and
:class:`~MCSimulation` constructors. See their descriptions for the possible keyword options.
"""
new_job = None
# Reading the molecule ensemble type
simul = locate('pysimm.cassandra.' + ens_type)
if simul is None:
self.logger.error('Unsopported simulation ensemble option. Please use ether GCMC, NPT, or '
'NVT in \'add_simulation\' ')
exit(1)
if isinstance(obj, MCSimulation):
new_job = obj
else:
specs = kwargs.get('species')
if specs:
mc_sst = McSystem(specs, **kwargs)
new_job = simul(mc_sst, self.system, **kwargs)
else:
self.logger.error('Incorrect ' + ens_type + ' initialization. Please provide either Cassandra.' +
ens_type + ' simulation object or the dictionary with initialization parameters '
'of that object')
exit(1)
# Clean the run queue if 'is_new' set to to True
if kwargs.get('is_new'):
self.run_queue[:] = []
if new_job:
new_job.__check_params__()
self.run_queue.append(new_job)
def add_gcmc(self, obj=None, **kwargs):
"""pysimm.cassandra.Cassandra.add_gcmc
Ads new simulation in grand-canonical ensemble to the run queue.
Args:
obj: the entity that should be added. Will be ignored if it is not of a type :class:`~GCMC`
Keyword Args:
is_new (boolean) : defines whether all previous simulations should be erased or not
species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to
:class:`~McSystem` constructor.
Note:
Other keyword arguments of this method will be redirected to the :class:`~McSystem`, :class:`~MCSimulation`,
and :class:`~GCMC` constructors. See their descriptions for the possible keyword options.
"""
new_job = None
if isinstance(obj, GCMC):
new_job = obj
else:
specs = kwargs.get('species')
if specs:
mc_sst = McSystem(specs, **kwargs)
new_job = GCMC(mc_sst, self.system, **kwargs)
else:
self.logger.error('Unknown GCMC initialization. Please provide either '
'the dictionary with GCMC parameters or Cassandra.GCMC simulation object')
exit(1)
if kwargs.get('is_new'):
self.run_queue[:] = []
if new_job:
new_job.__check_params__()
self.run_queue.append(new_job)
def add_npt_mc(self, obj=None, **kwargs):
"""pysimm.cassandra.Cassandra.add_npt_mc
Ads new simulation in isobaric-isothermal ensemble to the run queue.
Args:
obj: the entity that should be added. Will be ignored if it is not of a type :class:`~NPT`
Keyword Args:
is_new (boolean) : defines whether all previous simulations should be erased or not
species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to
:class:`~McSystem` constructor.
Note:
Other keyword arguments of this method will be redirected to the :class:`~McSystem`, :class:`~MCSimulation`,
and :class:`~NPT` constructors. See their descriptions for the possible keyword options.
"""
new_job = None
if isinstance(obj, NPT):
new_job = obj
else:
specs = kwargs.get('species')
if specs:
mc_sst = McSystem(specs, **kwargs)
new_job = NPT(mc_sst, self.system, **kwargs)
else:
self.logger.error('Unknown NPT initialization. Please provide either '
'the dictionary with NPT simulation parameters or Cassandra.NPT simulation object')
exit(1)
if kwargs.get('is_new'):
self.run_queue[:] = []
if new_job:
new_job.__check_params__()
self.run_queue.append(new_job)
def add_nvt(self, obj=None, **kwargs):
"""pysimm.cassandra.Cassandra.add_nvt
Ads new simulation in canonical ensemble to the run queue.
Args:
obj: the entity that should be added. Will be ignored if it is not of a type :class:`~NVT`
Keyword Args:
is_new (boolean) : defines whether all previous simulations should be erased or not
species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to
:class:`~McSystem` constructor.
Note:
Other keyword arguments of this method will be redirected to the :class:`~McSystem`, :class:`~MCSimulation`,
and :class:`~NVT` constructors. See their descriptions for the possible keyword options.
"""
new_job = None
if isinstance(obj, NVT):
new_job = obj
else:
specs = kwargs.get('species')
if specs:
mc_sst = McSystem(specs, **kwargs)
new_job = NVT(mc_sst, self.system, **kwargs)
else:
self.logger.error('Unknown NVT initialization. Please provide either '
'the dictionary with NPT simulation parameters or Cassandra.NPT simulation object')
exit(1)
if kwargs.get('is_new'):
self.run_queue[:] = []
if new_job:
new_job.__check_params__()
self.run_queue.append(new_job)
def read_input(self, inp_file):
"""pysimm.cassandra.Cassandra.read_input
The method parses the CASSANDRA instructions file (.inp) split it into separate instructions and analyses each
according to the instruction name.
Args:
inp_file (str) : the full relative path of the file to be read
Returns:
dictionary : read CASSANDRA properties in the format required by :class:`~GCMC`
"""
result = {}
if os.path.isfile(inp_file):
self.logger.info('Reading simulation parameters from {:} file'.format(inp_file))
# Reading the cassandra .inp file as one long string
inp_stream = open(inp_file, 'r')
lines = inp_stream.read()
raw_props = lines.split('#')
for prop in raw_props:
line = re.sub('\n!.*', '', prop) # Get rid of the CASSANDRA comments
line = re.sub('\n(e|E)(n|N)(d|D)', '', line) # Get rid of the 'END in the end of the file
tmp = line.split()
if len(tmp) > 1:
result[tmp[0]] = self.__parse_value__(tmp)
# File seems fine let's close the stream and return true in the flag
inp_stream.close()
self.logger.info('Reading finished sucsessfully')
else:
self.logger.error('Cannot find specified file: \"{:}\"'.format(inp_file))
return result
def __parse_value__(self, cells):
title = cells[0].lower()
if title == 'run_type':
return OrderedDict([('type', cells[1]), ('steps', map(int, cells[2:]))])
elif title == 'charge_style':
return OrderedDict([('type', cells[1]),
('sum_type', cells[2]),
('cut_val', float(cells[3])),
('accuracy', float(cells[4]))])
elif title == 'vdw_style':
return OrderedDict([('type', cells[1]),
('cut_type', cells[2]),
('cut_val', float(cells[3]))])
elif title == 'simulation_length_info':
tmp = OrderedDict([('units', cells[2]),
('prop_freq', int(cells[4])),
('coord_freq', int(cells[6])),
('run', int(cells[8]))])
if len(cells) > 10:
tmp['steps_per_sweep'] = int(cells[10])
if len(cells) > 12:
tmp['block_averages'] = int(cells[12])
return tmp
elif title == 'cbmc_info':
return OrderedDict([('kappa_ins', int(cells[2])),
('kappa_dih', int(cells[4])),
('rcut_cbmc', float(cells[6]))])
elif title == 'box_info':
size = float(cells[3])
if len(cells) > 6:
size = [float(cells[3]), float(cells[4]), float(cells[5])]
return OrderedDict([('box_count', int(cells[1])), ('box_type', cells[2]), ('box_size', size)])
elif title == 'prob_translation':
vals = []
for i in range(2, len(cells)):
vals.append(float(cells[i]))
return OrderedDict([('tot_prob', float(cells[1])),
('limit_vals', vals)])
elif title == 'prob_insertion':
vals = []
for i in range(2, len(cells)):
vals.append(cells[i])
return OrderedDict([('tot_prob', float(cells[1])),
('types', vals)])
elif title == 'prob_rotation':
vals = []
for i in range(2, len(cells)):
vals.append(float(cells[i]))
return OrderedDict([('tot_prob', float(cells[1])),
('limit_vals', vals)])
elif (title == 'molecule_files') or (title == 'fragment_files'):
tmp = OrderedDict()
for i, c in zip(range(1, len(cells) - 1, 2), range(1, 1 + len(cells) / 2)):
tmp['file' + str(c)] = [cells[i], int(cells[i + 1])]
return tmp
elif title == 'start_type':
if cells[1] == 'read_config':
specs = []
for i in range(2, len(cells) - 1):
specs.append(int(cells[i]))
return OrderedDict([('start_type', 'read_config'),
('species', specs),
('file_name', cells[-1])])
if cells[1] == 'make_config':
specs = []
for i in range(2, len(cells)):
specs.append(int(cells[i]))
return OrderedDict([('start_type', 'make_config'),
('species', specs),
('file_name', '')])
if cells[1] == 'add to config':
self.logger.error('Sorry, \'add to config\' regime of ''Start_Type option is not supported yet')
exit(1)
if cells[1] == 'checkpoint':
self.logger.error('Sorry, \'checkpoint\' regime of ''Start_Type option is not supported yet ')
exit(1)
elif title == 'property_info':
if int(cells[1]) == 1:
tmp = OrderedDict()
for i in range(2, len(cells)):
tmp['prop' + str(i - 1)] = str.lower(cells[i])
return tmp
elif title == 'seed_info':
return [int(cells[1]), int(cells[2])]
elif (title == 'prob_deletion') or (title == 'rcutoff_low') or \
(title == 'bond_prob_cutoff') or (title == 'chemical_potential_info'):
return float(cells[1])
elif (title == 'average_Info') or (title == 'nbr_species') or (title == 'temperature_info'):
return int(cells[1])
else:
return cells[1]
def unwrap_gas(self):
"""pysimm.cassandra.Cassandra.unwrap_gas
Ensures that all particles that are not fixed are unwrapped, otherwise CASSANDRA might not interpret
them correctly
"""
gas_system = self.system.copy()
for p in gas_system.particles:
if p.is_fixed:
gas_system.particles.remove(p.tag, update=False)
else:
self.system.particles.remove(p.tag, update=False)
for m in gas_system.molecules:
if any([t.is_fixed for t in m.particles]):
gas_system.molecules.remove(m.tag, update=False)
else:
self.system.molecules.remove(m.tag, update=False)
gas_system.remove_spare_bonding()
self.system.remove_spare_bonding()
gas_system.unwrap()
self.system.add(gas_system, change_dim=False)
class McfWriter(object):
"""pysimm.cassandra.McfWriter
Object responsible for creating the CASSANDRA Molecular Configuration file (.mcf).
Attributes:
syst (:class:`~pysimm.system.System`) :represents the molecule to be described
file_ref (str) : full relative path to the file that will be created
"""
# Section names in any .mcf file
mcf_tags = ['# Atom_Info', '# Bond_Info', '# Angle_Info', '# Dihedral_Info',
'# Improper_Info', '# Intra_Scaling', '# Fragment_Info', '# Fragment_Connectivity']
empty_line = '0'
def __init__(self, syst, file_ref):
self.syst = syst
self.file_ref = file_ref
self.logger = logging.getLogger('MCF Writer')
def write(self, typing='all'):
"""pysimm.cassandra.McfWriter.write
Method creates the .mcf file writing only those sections of it that are marked to be written
Args:
typing (list) : the list of sections to be written or the text keyword. List items should be as they are
defined in :class:`~pysimm.cassandra.McfWriter.mcf_tags` field); default 'all'
"""
# Initializing output stream
with open(self.file_ref, 'w') as out_stream:
for (name, is_write) in zip(self.mcf_tags, self.__to_tags__(typing)):
if is_write:
try:
method = getattr(self, '__write_' + str.lower(name[2:]) + '__')
method(out_stream)
except AttributeError:
self.__write_empty__(out_stream, name)
else:
self.__write_empty__(out_stream, name)
out_stream.write('\nEND')
out_stream.close()
def __write_empty__(self, out, name):
out.write('{0:}\n{1:}\n\n'.format(name, self.empty_line))
def __write_atom_info__(self, out):
global KCALMOL_2_K
text_tag = '# Atom_Info'
if self.syst.particles.count > 0:
# writing section header
out.write('{:}\n'.format(text_tag))
# Verify and fix net system charge
self.syst.zero_charge()
# writing total number of particles
out.write('{0:<6}\n'.format(self.syst.particles.count))
count = 0
line_template = '{l[0]:<6}{l[1]:<7}{l[2]:<5}{l[3]:<8.3f}{l[4]:<10.6f}' \
'{l[5]:<6}{l[6]:<11.3f}{l[7]:<9.3f}\n'
warn_flag = False
for item in self.syst.particles:
line = [count + 1, '', '', 0, 0, 'LJ', 0, 0]
if item.charge:
line[4] = item.charge
if item.type:
line[1] = item.type.tag
line[2] = item.type.tag
if item.type.name:
line[1] = item.type.name
line[2] = item.type.elem
else:
warn_flag = True
if item.type.mass:
line[3] = item.type.mass
if item.type.epsilon:
line[6] = KCALMOL_2_K * item.type.epsilon
if item.type.sigma:
line[7] = item.type.sigma
else:
continue
out.write(line_template.format(l=line))
count += 1
if warn_flag:
self.logger.warning('Some particle type names (and/or element names) inside the system are not defined.'
' Will use type identifiers instead')
else:
self.__write_empty__(out, text_tag)
out.write('\n')
def __write_bond_info__(self, out):
text_tag = '# Bond_Info'
if self.syst.bonds.count > 0:
# writing section header
out.write('{:}\n'.format(text_tag))
# writing total number of bonds
out.write('{0:<6}\n'.format(self.syst.bonds.count))
line_template = '{l[0]:<6d}{l[1]:<6d}{l[2]:<6d}{l[3]:<9}{l[4]:<6.3f}\n'
count = 1
for bond in self.syst.bonds:
tmp = 'fixed' # Fixed bond is the only option for CASSANDRA V-1.2
line = [count, bond.a.tag, bond.b.tag, tmp, bond.type.r0]
count += 1
out.write(line_template.format(l=line))
out.write('\n')
else:
self.__write_empty__(out, text_tag)
def __write_angle_info__(self, out):
text_tag = '# Angle_Info'
if self.syst.angles.count > 0:
# writing section header
out.write('{:}\n'.format(text_tag))
# writing total number of angles
out.write('{0:<6}\n'.format(self.syst.angles.count))
count = 1
for angle in self.syst.angles:
line_template = '{l[0]:<6d}{l[1]:<6d}{l[2]:<6d}{l[3]:<6d}{l[4]:<10}{l[5]:<13.3f}'
line = [count, angle.a.tag, angle.b.tag, angle.c.tag]
if hasattr(angle.type, 'is_fixed') and angle.type.is_fixed:
addon = ['fixed', angle.type.theta0]
else:
addon = ['harmonic', KCALMOL_2_K * angle.type.k, angle.type.theta0]
line_template += '{l[6]:<13.3f}'
count += 1
out.write(line_template.format(l=line + addon) + '\n')
out.write('\n')
else:
self.__write_empty__(out, text_tag)
def __write_intra_scaling__(self, out):
format_line = '{:<6.2f}{:<6.2f}{:<6.2f}{:<6.2f}'
# writing section header
out.write('{:}\n'.format('# Intra_Scaling'))
# writing vdW scaling: 1-2 1-3 1-4 1-N
out.write(format_line.format(0, 0, 0, 0) + '\n')
# writing charge scaling: 1-2 1-3 1-4 1-N
out.write(format_line.format(0, 0, 0, 0) + '\n\n')
def __write_dihedral_info__(self, out):
text_tag = '# Dihedral_Info'
self.__write_empty__(out, text_tag)
def __write_improper_info__(self, out):
text_tag = '# Improper_Info'
self.__write_empty__(out, text_tag)
def __write_fragment_info__(self, out):
# writing section header
out.write('{:}\n'.format('# Fragment_Info'))
# writing indexing
out.write('{:}\n'.format(1))
n = len(self.syst.particles)
out.write(' '.join('{}'.format(item) for item in [1, n] + range(1, n + 1)))
out.write('\n\n')
def __write_fragment_connectivity__(self, out):
text_tag = '# Fragment_Connectivity'
self.__write_empty__(out, text_tag)
def __to_tags__(self, inpt):
n = len(self.mcf_tags)
idxs = [True] * n
if inpt.lower() == 'atoms':
idxs = [False] * n
idxs[self.mcf_tags.index('# Atom_Info')] = True
idxs[self.mcf_tags.index('# Intra_Scaling')] = True
return idxs
def check_cs_exec():
"""pysimm.cassandra.check_cs_exec
Validates that the absolute path to the CASSANDRA executable is set in the `CASSANDRA_EXEC` environmental variable
of the OS. The validation is called once inside the :class:`~Cassandra.run` method.
"""
global CASSANDRA_EXEC
flag = True
if CASSANDRA_EXEC is None:
print('Please specify the OS environment variable ''CASSANDRA_EXEC'' that points to '
'CASSANDRA compiled binary file, which is by default cassandra_{compiler-name}[_openMP].exe ')
flag = False
return flag
def make_iterable(obj):
"""pysimm.cassandra.make_iterable
Utility method that forces the attributes be iterable (wrap in a list if it contains of only one item)
"""
it_obj = obj
if not isinstance(obj, Iterable):
it_obj = [obj]
return it_obj
|
mit
| -1,943,103,293,411,142,400
| 45.063699
| 123
| 0.549686
| false
| 3.921
| false
| false
| false
|
chop-dbhi/varify-data-warehouse
|
vdw/samples/migrations/0008_force_migrate_default_cohort_and_project.py
|
1
|
23232
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from vdw.samples.models import DEFAULT_COHORT_NAME, DEFAULT_PROJECT_NAME
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
Project = orm['samples.Project']
Cohort = orm['samples.Cohort']
now = datetime.datetime.now()
# Create default project
try:
project = Project.objects.get(name=DEFAULT_PROJECT_NAME)
except Project.DoesNotExist:
project = Project(name=DEFAULT_PROJECT_NAME,
label=DEFAULT_PROJECT_NAME, created=now, modified=now)
project.save()
# Create default cohort
try:
cohort = Cohort.objects.get(name=DEFAULT_COHORT_NAME)
except Cohort.DoesNotExist:
cohort = Cohort(name=DEFAULT_COHORT_NAME, published=True,
autocreated=True, created=now, modified=now)
cohort.save()
def backwards(self, orm):
"Write your backwards methods here."
# There is not guarantee these objects did not already exist
# so these should not be deleted
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 27, 16, 57, 27, 697343)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 27, 16, 57, 27, 697128)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avocado.datacontext': {
'Meta': {'object_name': 'DataContext'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'composite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datacontext+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.chromosome': {
'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'})
},
'genome.genome': {
'Meta': {'object_name': 'Genome', 'db_table': "'genome'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'released': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.genotype': {
'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'literature.pubmed': {
'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"},
'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'phenotypes.phenotype': {
'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'})
},
'samples.batch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'Batch', 'db_table': "'batch'"},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'samples.cohort': {
'Meta': {'object_name': 'Cohort', 'db_table': "'cohort'"},
'autocreated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'context': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['avocado.DataContext']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Project']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'samples': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Sample']", 'through': "orm['samples.CohortSample']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'samples.cohortsample': {
'Meta': {'unique_together': "(('object_set', 'set_object'),)", 'object_name': 'CohortSample', 'db_table': "'cohort_sample'"},
'added': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Cohort']", 'db_column': "'cohort_id'"}),
'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'set_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']", 'db_column': "'sample_id'"})
},
'samples.cohortvariant': {
'Meta': {'unique_together': "(('variant', 'cohort'),)", 'object_name': 'CohortVariant', 'db_table': "'cohort_variant'"},
'af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}),
'cohort': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Cohort']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.person': {
'Meta': {'object_name': 'Person', 'db_table': "'person'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'samples.project': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'samples.relation': {
'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}),
'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'samples.result': {
'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"},
'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}),
'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']"}),
'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.sample': {
'Meta': {'unique_together': "(('batch', 'name'),)", 'object_name': 'Sample', 'db_table': "'sample'"},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}),
'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'samples.samplerun': {
'Meta': {'object_name': 'SampleRun', 'db_table': "'sample_run'"},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genome']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']"})
},
'variants.variant': {
'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"},
'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}),
'pos': ('django.db.models.fields.IntegerField', [], {}),
'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'})
},
'variants.variantphenotype': {
'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"},
'hgmd_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'variants.varianttype': {
'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['samples']
|
bsd-2-clause
| -32,546,773,490,712,990
| 78.835052
| 192
| 0.544594
| false
| 3.663198
| false
| false
| false
|
kingtaurus/cs224d
|
assignment1/q3_word2vec_sol.py
|
1
|
7778
|
import numpy as np
import random
from q1_softmax_sol import softmax_sol as softmax
from q2_gradcheck import gradcheck_naive
from q2_sigmoid_sol import sigmoid_sol as sigmoid
from q2_sigmoid_sol import sigmoid_grad_sol as sigmoid_grad
def normalizeRows_sol(x):
""" Row normalization function """
# Implement a function that normalizes each row of a matrix to have unit length
### YOUR CODE HERE
N = x.shape[0]
x /= np.sqrt(np.sum(x**2, axis=1)).reshape((N,1)) + 1e-30
### END YOUR CODE
return x
def softmaxCostAndGradient_sol(predicted, target, outputVectors, dataset):
""" Softmax cost function for word2vec models """
# Implement the cost and gradients for one predicted word vector
# and one target word vector as a building block for word2vec
# models, assuming the softmax prediction function and cross
# entropy loss.
# Inputs:
# - predicted: numpy ndarray, predicted word vector (\hat{v} in
# the written component or \hat{r} in an earlier version)
# - target: integer, the index of the target word
# - outputVectors: "output" vectors (as rows) for all tokens
# - dataset: needed for negative sampling, unused here.
# Outputs:
# - cost: cross entropy cost for the softmax word prediction
# - gradPred: the gradient with respect to the predicted word
# vector
# - grad: the gradient with respect to all the other word
# vectors
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
### YOUR CODE HERE
probabilities = softmax(predicted.dot(outputVectors.T))
cost = -np.log(probabilities[target])
delta = probabilities
delta[target] -= 1
N = delta.shape[0]
D = predicted.shape[0]
grad = delta.reshape((N,1)) * predicted.reshape((1,D))
gradPred = (delta.reshape((1,N)).dot(outputVectors)).flatten()
### END YOUR CODE
return cost, gradPred, grad
def negSamplingCostAndGradient_sol(predicted, target, outputVectors, dataset,
K=10):
""" Negative sampling cost function for word2vec models """
# Implement the cost and gradients for one predicted word vector
# and one target word vector as a building block for word2vec
# models, using the negative sampling technique. K is the sample
# size. You might want to use dataset.sampleTokenIdx() to sample
# a random word index.
#
# Note: See test_word2vec below for dataset's initialization.
#
# Input/Output Specifications: same as softmaxCostAndGradient
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
### YOUR CODE HERE
grad = np.zeros(outputVectors.shape)
gradPred = np.zeros(predicted.shape)
indices = [target]
for k in range(K):
newidx = dataset.sampleTokenIdx()
while newidx == target:
newidx = dataset.sampleTokenIdx()
indices += [newidx]
labels = np.array([1] + [-1 for k in range(K)])
vecs = outputVectors[indices,:]
t = sigmoid(vecs.dot(predicted) * labels)
cost = -np.sum(np.log(t))
delta = labels * (t - 1)
gradPred = delta.reshape((1,K+1)).dot(vecs).flatten()
gradtemp = delta.reshape((K+1,1)).dot(predicted.reshape(
(1,predicted.shape[0])))
for k in range(K+1):
grad[indices[k]] += gradtemp[k,:]
# t = sigmoid(predicted.dot(outputVectors[target,:]))
# cost = -np.log(t)
# delta = t - 1
# gradPred += delta * outputVectors[target, :]
# grad[target, :] += delta * predicted
# for k in range(K):
# idx = dataset.sampleTokenIdx()
# t = sigmoid(-predicted.dot(outputVectors[idx,:]))
# cost += -np.log(t)
# delta = 1 - t
# gradPred += delta * outputVectors[idx, :]
# grad[idx, :] += delta * predicted
### END YOUR CODE
return cost, gradPred, grad
def skipgram_sol(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient = softmaxCostAndGradient_sol):
""" Skip-gram model in word2vec """
# Implement the skip-gram model in this function.
# Inputs:
# - currrentWord: a string of the current center word
# - C: integer, context size
# - contextWords: list of no more than 2*C strings, the context words
# - tokens: a dictionary that maps words to their indices in
# the word vector list
# - inputVectors: "input" word vectors (as rows) for all tokens
# - outputVectors: "output" word vectors (as rows) for all tokens
# - word2vecCostAndGradient: the cost and gradient function for
# a prediction vector given the target word vectors,
# could be one of the two cost functions you
# implemented above
# Outputs:
# - cost: the cost function value for the skip-gram model
# - grad: the gradient with respect to the word vectors
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
### YOUR CODE HERE
currentI = tokens[currentWord]
predicted = inputVectors[currentI, :]
cost = 0.0
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
for cwd in contextWords:
idx = tokens[cwd]
cc, gp, gg = word2vecCostAndGradient(predicted, idx, outputVectors, dataset)
cost += cc
gradOut += gg
gradIn[currentI, :] += gp
### END YOUR CODE
return cost, gradIn, gradOut
def cbow_sol(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient = softmaxCostAndGradient_sol):
""" CBOW model in word2vec """
# Implement the continuous bag-of-words model in this function.
# Input/Output specifications: same as the skip-gram model
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
#################################################################
# IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN #
# ASSIGNMENT ARE NOT! #
#################################################################
cost = 0
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
### YOUR CODE HERE
D = inputVectors.shape[1]
predicted = np.zeros((D,))
indices = [tokens[cwd] for cwd in contextWords]
for idx in indices:
predicted += inputVectors[idx, :]
cost, gp, gradOut = word2vecCostAndGradient(predicted, tokens[currentWord], outputVectors, dataset)
gradIn = np.zeros(inputVectors.shape)
for idx in indices:
gradIn[idx, :] += gp
### END YOUR CODE
return cost, gradIn, gradOut
|
mit
| 4,524,440,371,980,069,400
| 43.193182
| 120
| 0.576369
| false
| 4.250273
| false
| false
| false
|
taxpon/sverchok
|
ui/sv_icons.py
|
1
|
1302
|
import bpy
import os
import glob
import bpy.utils.previews
# custom icons dictionary
_icon_collection = {}
def custom_icon(name):
load_custom_icons() # load in case they custom icons not already loaded
custom_icons = _icon_collection["main"]
default = lambda: None # for no icon with given name will return zero
default.icon_id = 0
return custom_icons.get(name, default).icon_id
def load_custom_icons():
if len(_icon_collection): # return if custom icons already loaded
return
custom_icons = bpy.utils.previews.new()
iconsDir = os.path.join(os.path.dirname(__file__), "icons")
iconPattern = "sv_*.png"
iconPath = os.path.join(iconsDir, iconPattern)
iconFiles = [os.path.basename(x) for x in glob.glob(iconPath)]
for iconFile in iconFiles:
iconName = os.path.splitext(iconFile)[0]
iconID = iconName.upper()
custom_icons.load(iconID, os.path.join(iconsDir, iconFile), "IMAGE")
_icon_collection["main"] = custom_icons
def remove_custom_icons():
for custom_icons in _icon_collection.values():
bpy.utils.previews.remove(custom_icons)
_icon_collection.clear()
def register():
load_custom_icons()
def unregister():
remove_custom_icons()
if __name__ == '__main__':
register()
|
gpl-3.0
| 1,090,800,781,951,556,500
| 23.111111
| 76
| 0.667435
| false
| 3.481283
| false
| false
| false
|
romeotestuser/glimsol_report
|
report/billing_statement.py
|
1
|
2348
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import netsvc
from openerp.netsvc import Service
for x in ['report.glimsol.billing.statement']:
try:
del Service._services[x]
except:
pass
from openerp.report import report_sxw
class billing(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(billing, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_line':self._get_line,
'get_total_si_amount':self._get_total_si_amount,
'get_total_ticket_amount':self._get_total_ticket_amount,
'get_user_ref':self._get_user_ref,
})
def _get_line(self,obj):
res=[]
return res
def _get_total_si_amount(self,obj):
res=[]
return res
def _get_total_ticket_amount(self,obj):
res=[]
return res
def _get_user_ref(self,obj,trigger):
for target_trigger in ['sales executive','courier','customer']:
if target_trigger != trigger:
continue
res = []
return res
report_sxw.report_sxw('report.glimsol.billing.statement', 'account.billing', 'addons/glimsol_report/report/billing_statement.rml', parser=billing, header="external")
|
gpl-2.0
| 385,092,275,381,778,500
| 32.557143
| 165
| 0.57879
| false
| 4.185383
| false
| false
| false
|
nash-x/hws
|
neutron/plugins/l2_proxy/agent/clients.py
|
1
|
9765
|
# Copyright 2014, Huawei, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo.config import cfg
from neutron import context as n_context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import excutils
logger = logging.getLogger(__name__)
from neutron.plugins.l2_proxy.agent import neutron_proxy_context
from neutron.plugins.l2_proxy.agent import neutron_keystoneclient as hkc
from novaclient import client as novaclient
from novaclient import shell as novashell
from neutronclient.common import exceptions
try:
from swiftclient import client as swiftclient
except ImportError:
swiftclient = None
logger.info('swiftclient not available')
try:
from neutronclient.v2_0 import client as neutronclient
except ImportError:
neutronclient = None
logger.info('neutronclient not available')
try:
from cinderclient import client as cinderclient
except ImportError:
cinderclient = None
logger.info('cinderclient not available')
try:
from ceilometerclient.v2 import client as ceilometerclient
except ImportError:
ceilometerclient = None
logger.info('ceilometerclient not available')
cloud_opts = [
cfg.StrOpt('cloud_backend',
default=None,
help="Cloud module to use as a backend. Defaults to OpenStack.")
]
cfg.CONF.register_opts(cloud_opts)
CASCADING = 'cascading'
CASCADED = 'cascaded'
class OpenStackClients(object):
'''
Convenience class to create and cache client instances.
'''
def __init__(self, context):
self.context = context
self._nova = {}
self._keystone = None
self._swift = None
self._neutron = None
self._cinder = None
self._ceilometer = None
@property
def auth_token(self):
# if there is no auth token in the context
# attempt to get one using the context username and password
return self.context.auth_token or self.keystone().auth_token
def keystone(self):
if self._keystone:
return self._keystone
self._keystone = hkc.KeystoneClient(self.context)
return self._keystone
def url_for(self, **kwargs):
return self.keystone().url_for(**kwargs)
def nova(self, service_type='compute'):
if service_type in self._nova:
return self._nova[service_type]
con = self.context
if self.auth_token is None:
logger.error("Nova connection failed, no auth_token!")
return None
computeshell = novashell.OpenStackComputeShell()
extensions = computeshell._discover_extensions("1.1")
args = {
'project_id': con.tenant_id,
'auth_url': con.auth_url,
'service_type': service_type,
'username': None,
'api_key': None,
'extensions': extensions
}
client = novaclient.Client(1.1, **args)
management_url = self.url_for(
service_type=service_type,
attr='region',
filter_value='RegionTwo')
client.client.auth_token = self.auth_token
client.client.management_url = management_url
self._nova[service_type] = client
return client
def swift(self):
if swiftclient is None:
return None
if self._swift:
return self._swift
con = self.context
if self.auth_token is None:
logger.error("Swift connection failed, no auth_token!")
return None
args = {
'auth_version': '2.0',
'tenant_name': con.tenant_id,
'user': con.username,
'key': None,
'authurl': None,
'preauthtoken': self.auth_token,
'preauthurl': self.url_for(service_type='object-store')
}
self._swift = swiftclient.Connection(**args)
return self._swift
def neutron(self):
if neutronclient is None:
return None
if self._neutron:
return self._neutron
con = self.context
if self.auth_token is None:
logger.error("Neutron connection failed, no auth_token!")
return None
if self.context.region_name is None:
management_url = self.url_for(service_type='network',
endpoint_type='publicURL')
else:
management_url = self.url_for(
service_type='network',
attr='region',
endpoint_type='publicURL',
filter_value=self.context.region_name)
args = {
'auth_url': con.auth_url,
'insecure': self.context.insecure,
'service_type': 'network',
'token': self.auth_token,
'endpoint_url': management_url
}
self._neutron = neutronclient.Client(**args)
return self._neutron
def cinder(self):
if cinderclient is None:
return self.nova('volume')
if self._cinder:
return self._cinder
con = self.context
if self.auth_token is None:
logger.error("Cinder connection failed, no auth_token!")
return None
args = {
'service_type': 'volume',
'auth_url': con.auth_url,
'project_id': con.tenant_id,
'username': None,
'api_key': None
}
self._cinder = cinderclient.Client('1', **args)
management_url = self.url_for(service_type='volume')
self._cinder.client.auth_token = self.auth_token
self._cinder.client.management_url = management_url
return self._cinder
def ceilometer(self):
if ceilometerclient is None:
return None
if self._ceilometer:
return self._ceilometer
if self.auth_token is None:
logger.error("Ceilometer connection failed, no auth_token!")
return None
con = self.context
args = {
'auth_url': con.auth_url,
'service_type': 'metering',
'project_id': con.tenant_id,
'token': lambda: self.auth_token,
'endpoint': self.url_for(service_type='metering'),
}
client = ceilometerclient.Client(**args)
self._ceilometer = client
return self._ceilometer
if cfg.CONF.cloud_backend:
cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend)
Clients = cloud_backend_module.Clients
else:
Clients = OpenStackClients
logger.debug('Using backend %s' % Clients)
def get_cascade_neutron_client(mode):
if mode == CASCADING:
region_name = cfg.CONF.AGENT.region_name
elif mode == CASCADED:
region_name = cfg.CONF.AGENT.neutron_region_name
else:
logger.error(_('Must be input mode(cascading or cascaded).'))
raise
context = n_context.get_admin_context_without_session()
neutron_admin_auth_url = cfg.CONF.AGENT.neutron_admin_auth_url
kwargs = {'auth_token': None,
'username': cfg.CONF.AGENT.neutron_admin_user,
'password': cfg.CONF.AGENT.admin_password,
'aws_creds': None,
'tenant': cfg.CONF.AGENT.neutron_admin_tenant_name,
'auth_url': neutron_admin_auth_url,
'insecure': cfg.CONF.AGENT.auth_insecure,
'roles': context.roles,
'is_admin': context.is_admin,
'region_name': region_name}
reqCon = neutron_proxy_context.RequestContext(**kwargs)
openStackClients = OpenStackClients(reqCon)
neutronClient = openStackClients.neutron()
return neutronClient
def check_neutron_client_valid(function):
@functools.wraps(function)
def decorated_function(self, method_name, *args, **kwargs):
retry = 0
while(True):
try:
return function(self, method_name, *args, **kwargs)
except exceptions.Unauthorized:
retry = retry + 1
if(retry <= 3):
self.client = get_cascade_neutron_client(self.mode)
continue
else:
with excutils.save_and_reraise_exception():
logger.error(_('Try 3 times, Unauthorized.'))
return None
return decorated_function
class CascadeNeutronClient(object):
def __init__(self, mode):
#mode is cascading or cascaded
self.mode = mode
self.client = get_cascade_neutron_client(self.mode)
@check_neutron_client_valid
def __call__(self, method_name, *args, **kwargs):
method = getattr(self.client, method_name)
if method:
return method(*args, **kwargs)
else:
raise Exception('can not find the method')
@check_neutron_client_valid
def execute(self, method_name, *args, **kwargs):
method = getattr(self.client, method_name)
if method:
return method(*args, **kwargs)
else:
raise Exception('can not find the method')
|
apache-2.0
| 659,710,628,943,577,700
| 30.704545
| 79
| 0.603277
| false
| 4.198194
| false
| false
| false
|
sibskull/synaptiks
|
synaptiks/monitors/mouses.py
|
1
|
8959
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Sebastian Wiesner <lunaryorn@googlemail.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
synaptiks.monitors.mouses
=========================
Implementation of mouse monitoring.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@googlemail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from collections import namedtuple
from itertools import ifilter
import pyudev
from pyudev.pyqt4 import QUDevMonitorObserver
from PyQt4.QtCore import QObject, pyqtSignal
from synaptiks.monitors.power import create_resume_monitor
__all__ = ['MouseDevicesManager', 'MouseDevicesMonitor', 'MouseDevice']
def _is_mouse(device):
return (device.sys_name.startswith('event') and
device.get('ID_INPUT_MOUSE') == '1' and
not device.get('ID_INPUT_TOUCHPAD') == '1')
class MouseDevice(namedtuple('_MouseDevice', ['serial', 'name'])):
"""
A :func:`~collections.namedtuple()` representing a mouse device.
A mouse device currently has two attributes, the order corresponds to the
tuple index:
- :attr:`serial`
- :attr:`name`
"""
@classmethod
def from_udev(cls, device):
"""
Create a :class:`MouseDevice` tuple from a :class:`pyudev.Device`.
"""
# The name is available from the parent device of the actual event
# device. The parent represents the actual physical device. The name
# may be decorated with quotation marks, which are removed for the sake
# of a clean represenation
return cls(device['ID_SERIAL'], device.parent['NAME'].strip('"'))
class MouseDevicesMonitor(QObject):
"""
Watch for plugged or unplugged mouse devices.
"""
#: Qt signal, which is emitted, when a mouse is plugged. The slot gets a
#: single argument of :class:`MouseDevice`, which represents the plugged
#: mouse device
mousePlugged = pyqtSignal(MouseDevice)
#: Qt signal, which is emitted, when a mouse is unplugged. The slot gets a
#: single argument of type :class:`MouseDevice`, which represents the
#: unplugged mouse device
mouseUnplugged = pyqtSignal(MouseDevice)
def __init__(self, parent=None):
"""
Create a new monitor.
``parent`` is the parent :class:`~PyQt4.QtCore.QObject`.
"""
QObject.__init__(self, parent)
self._udev = pyudev.Context()
self._notifier = QUDevMonitorObserver(
pyudev.Monitor.from_netlink(self._udev), self)
self._notifier.deviceEvent.connect(self._handle_udev_event)
self._notifier.monitor.filter_by('input')
self._notifier.monitor.start()
self._event_signal_map = dict(
add=self.mousePlugged, remove=self.mouseUnplugged)
@property
def plugged_devices(self):
"""
An iterator over all currently plugged mouse devices as
:class:`MouseDevice` objects.
"""
devices = self._udev.list_devices(
subsystem='input', ID_INPUT_MOUSE=True)
for device in ifilter(_is_mouse, devices):
yield MouseDevice.from_udev(device)
def _handle_udev_event(self, evt, device):
signal = self._event_signal_map.get(unicode(evt))
if signal and _is_mouse(device):
signal.emit(MouseDevice.from_udev(device))
class MouseDevicesManager(MouseDevicesMonitor):
"""
Manage mouse devices.
This class derives from :class:`MouseDevicesMonitor` to provide more
advanced monitoring of mouse devices. In addition to the basic monitoring
provided by :class:`MouseDevicesMonitor` this class keeps a record of
currently plugged devices, and thus also informs about the *first* mouse
plugged, and the *last* mouse unplugged.
"""
#: Qt signal, which is emitted if the first mouse is plugged. The slot :
#: gets a single argument, which is the plugged :class:`MouseDevice`.
firstMousePlugged = pyqtSignal(MouseDevice)
#: Qt signal, which is emitted if the last mouse is unplugged. The slot :
#: gets a single argument, which is the plugged :class:`MouseDevice`.
lastMouseUnplugged = pyqtSignal(MouseDevice)
def __init__(self, parent=None):
"""
Create a new manager.
``parent`` is the parent ``QObject``.
"""
MouseDevicesMonitor.__init__(self, parent)
self._resume_monitor = create_resume_monitor(self)
self._mouse_registry = set()
self._ignored_mouses = frozenset()
self.is_running = False
def start(self):
"""
Start to observe mouse devices.
Does nothing, if the manager is already running.
"""
if not self.is_running:
self.mousePlugged.connect(self._register_mouse)
self.mouseUnplugged.connect(self._unregister_mouse)
if self._resume_monitor:
self._resume_monitor.resuming.connect(self._reset_registry)
self._reset_registry()
self.is_running = True
def stop(self):
"""
Stop to observe mouse devices.
Does nothing, if the manager is not running.
"""
if self.is_running:
self.mousePlugged.disconnect(self._register_mouse)
self.mouseUnplugged.disconnect(self._unregister_mouse)
if self._resume_monitor:
self._resume_monitor.resuming.disconnect(self._reset_registry)
self._clear_registry()
self.is_running = False
def _unregister_mouse(self, device):
"""
Unregister the given mouse ``device``. If this is the last plugged
mouse, :attr:`lastMouseUnplugged` is emitted with the given ``device``.
"""
try:
self._mouse_registry.remove(device)
except KeyError:
pass
else:
if not self._mouse_registry:
self.lastMouseUnplugged.emit(device)
def _register_mouse(self, device):
"""
Register the given mouse ``device``. If this is the first plugged
mouse, :attr:`firstMousePlugged` is emitted with the given ``device``.
"""
if device.serial not in self._ignored_mouses:
if not self._mouse_registry:
self.firstMousePlugged.emit(device)
self._mouse_registry.add(device)
def _reset_registry(self):
"""
Re-register all plugged mouses.
"""
self._clear_registry()
for device in self.plugged_devices:
self._register_mouse(device)
def _clear_registry(self):
"""
Clear the registry of plugged mouse devices.
"""
for device in list(self._mouse_registry):
self._unregister_mouse(device)
@property
def ignored_mouses(self):
"""
The list of ignored mouses.
This property holds a list of serial numbers. Mouse devices with these
serial numbers are simply ignored when plugged or unplugged.
Modifying the returned list in place does not have any effect, assign
to this property to change the list of ignored devices. You may also
assign a list of :class:`~synaptiks.monitors.MouseDevice` objects.
"""
return list(self._ignored_mouses)
@ignored_mouses.setter
def ignored_mouses(self, devices):
devices = set(d if isinstance(d, basestring) else d.serial
for d in devices)
if self._ignored_mouses != devices:
self._ignored_mouses = devices
if self.is_running:
self._reset_registry()
|
bsd-2-clause
| -3,596,675,670,361,718,300
| 35.717213
| 79
| 0.650854
| false
| 4.227938
| false
| false
| false
|
snoopycrimecop/openmicroscopy
|
components/tools/OmeroPy/test/integration/gatewaytest/test_get_objects.py
|
1
|
45419
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gateway tests - Testing the gateway.getObject() and deleteObjects() methods
Copyright 2013-2015 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
- author_testimg_generated
- author_testimg_tiny
"""
from builtins import str
from builtins import range
from builtins import object
import omero
import uuid
import pytest
from omero.gateway.scripts import dbhelpers
from omero.rtypes import wrap, rlong
from omero.testlib import ITest
from omero.gateway import BlitzGateway, KNOWN_WRAPPERS, DatasetWrapper, \
ProjectWrapper, ImageWrapper, ScreenWrapper, PlateWrapper
from omero.model import DatasetI, \
ImageI, \
PlateI, \
ScreenI, \
WellI, \
WellSampleI
try:
int
except Exception:
# Python 3
long = int
class TestDeleteObject (object):
def testDeleteAnnotation(self, author_testimg_generated):
image = author_testimg_generated
gateway = image._conn
# create Tag on Image and try to delete Tag
tag = omero.gateway.TagAnnotationWrapper(gateway)
ns_tag = "omero.gateway.test.get_objects.test_delete_annotation_tag"
tag.setNs(ns_tag)
tag.setValue("Test Delete Tag")
tag = image.linkAnnotation(tag)
tagId = tag.getId()
handle = gateway.deleteObjects("Annotation", [tagId])
gateway._waitOnCmd(handle)
assert gateway.getObject("Annotation", tagId) is None
def testDeleteImage(self, gatewaywrapper, author_testimg_generated):
image = author_testimg_generated
imageId = image.getId()
project = gatewaywrapper.getTestProject()
projectId = project.getId()
ns = "omero.gateway.test.get_objects.test_delete_image_comment"
ns_tag = "omero.gateway.test.get_objects.test_delete_image_tag"
# create Comment
ann = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway)
ann.setNs(ns)
ann.setValue("Test Comment")
ann = image.linkAnnotation(ann)
# create Tag
tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway)
tag.setNs(ns_tag)
tag.setValue("Test Tag")
tag = image.linkAnnotation(tag)
# check the Comment
assert gatewaywrapper.gateway.getObject(
"Annotation", ann.id) is not None
assert gatewaywrapper.gateway.getObject(
"Annotation", tag.id) is not None
# check Image, delete (wait) and check
assert gatewaywrapper.gateway.getObject("Image", imageId) is not None
handle = gatewaywrapper.gateway.deleteObjects("Image", [imageId])
gatewaywrapper.gateway._waitOnCmd(handle)
assert gatewaywrapper.gateway.getObject("Image", imageId) is None
# Comment should be deleted but not the Tag (becomes orphan)
assert gatewaywrapper.gateway.getObject("Annotation", ann.id) is None
assert gatewaywrapper.gateway.getObject(
"Annotation", tag.id) is not None
# Add the tag to project and delete (with Tags)
assert gatewaywrapper.gateway.getObject(
"Project", projectId) is not None
project.linkAnnotation(tag)
datasetIds = [d.getId() for d in project.listChildren()]
assert len(datasetIds) > 0
handle = gatewaywrapper.gateway.deleteObjects(
"Project", [projectId], deleteAnns=True, deleteChildren=True)
gatewaywrapper.gateway._waitOnCmd(handle)
assert gatewaywrapper.gateway.getObject("Project", projectId) is None
assert gatewaywrapper.gateway.getObject("Annotation", tag.id) is None
# Tag should be gone
# check datasets gone too
for dId in datasetIds:
assert gatewaywrapper.gateway.getObject("Dataset", dId) is None
class TestFindObject (object):
def testIllegalObjTypeInt(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
with pytest.raises(AttributeError):
gatewaywrapper.gateway.getObject(1, int(1))
def testObjTypeUnicode(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
a = gatewaywrapper.getTestProject()
b = gatewaywrapper.gateway.getObject(u'Project', a.getId())
assert a.getId() == b.getId()
def testObjTypeString(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
a = gatewaywrapper.getTestProject()
b = gatewaywrapper.gateway.getObject('Project', a.getId())
assert a.getId() == b.getId()
def testFindProject(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
project = gatewaywrapper.getTestProject()
pName = project.getName()
findProjects = list(gatewaywrapper.gateway.getObjects(
"Project", None, attributes={"name": pName}))
assert len(findProjects) > 0, "Did not find Project by name"
for p in findProjects:
assert p.getName() == pName, \
"All projects should have queried name"
def testFindExperimenter(self, gatewaywrapper, author_testimg_tiny):
omeName = author_testimg_tiny.getOwnerOmeName()
group = author_testimg_tiny.getDetails().getGroup()
groupName = group.getName()
gatewaywrapper.loginAsAdmin()
# findObjects
findAuthor = list(gatewaywrapper.gateway.getObjects(
"Experimenter", None, attributes={"omeName": omeName}))
assert len(findAuthor) == 1, "Did not find Experimenter by omeName"
assert findAuthor[0].omeName == omeName
# findObject
author = gatewaywrapper.gateway.getObject(
"Experimenter", None, attributes={"omeName": omeName})
assert author is not None
assert author.omeName == omeName
# find group
grp = gatewaywrapper.gateway.getObject(
"ExperimenterGroup", None, attributes={"name": groupName})
assert grp is not None
assert grp.getName() == groupName
def testFindAnnotation(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# start by deleting any tag created by this method that may have been
# left behind
tag_value = "FindThisTag"
find_ns = "omero.gateway.test.test_find_annotations"
find_tag = gatewaywrapper.gateway.getObjects(
"Annotation", attributes={"textValue": tag_value, "ns": find_ns})
ids = [t._obj.id.val for t in find_tag]
if ids:
gatewaywrapper.gateway.deleteObjects("Annotation", ids, wait=True)
# create Tag
tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway)
tag.setNs(find_ns)
tag.setValue(tag_value)
tag.save()
tagId = tag.getId()
# findObject by name
find_tag = gatewaywrapper.gateway.getObject(
"Annotation", attributes={"textValue": tag_value})
assert find_tag is not None
assert find_tag.getValue() == tag_value
# find by namespace
find_tag = gatewaywrapper.gateway.getObject(
"Annotation", attributes={"ns": find_ns})
assert find_tag is not None
assert find_tag.getNs() == find_ns
# find by text value
find_tag = gatewaywrapper.gateway.getObject(
"TagAnnotation", attributes={"textValue": tag_value})
assert find_tag is not None
assert find_tag.getValue() == tag_value
# create some other annotations... (not linked!)
longAnn = omero.gateway.LongAnnotationWrapper(gatewaywrapper.gateway)
longAnn.setValue(12345)
longAnn.save()
longId = longAnn.getId()
boolAnn = omero.gateway.BooleanAnnotationWrapper(
gatewaywrapper.gateway)
boolAnn.setValue(True)
boolAnn.save()
boolId = boolAnn.getId()
commAnn = omero.gateway.CommentAnnotationWrapper(
gatewaywrapper.gateway)
commAnn.setValue("This is a blitz gatewaytest Comment.")
commAnn.save()
commId = commAnn.getId()
fileAnn = omero.gateway.FileAnnotationWrapper(gatewaywrapper.gateway)
# An original file object needs to be linked to the annotation or it
# will fail to be loaded on getObject(s).
fileObj = omero.model.OriginalFileI()
fileObj = omero.gateway.OriginalFileWrapper(
gatewaywrapper.gateway, fileObj)
fileObj.setName(omero.rtypes.rstring('a'))
fileObj.setPath(omero.rtypes.rstring('a'))
fileObj.setHash(omero.rtypes.rstring('a'))
fileObj.setSize(omero.rtypes.rlong(0))
fileObj.save()
fileAnn.setFile(fileObj)
fileAnn.save()
fileId = fileAnn.getId()
doubleAnn = omero.gateway.DoubleAnnotationWrapper(
gatewaywrapper.gateway)
doubleAnn.setValue(1.23456)
doubleAnn.save()
doubleId = doubleAnn.getId()
termAnn = omero.gateway.TermAnnotationWrapper(gatewaywrapper.gateway)
termAnn.setValue("Metaphase")
termAnn.save()
termId = termAnn.getId()
timeAnn = omero.gateway.TimestampAnnotationWrapper(
gatewaywrapper.gateway)
timeAnn.setValue(1000)
timeAnn.save()
timeId = timeAnn.getId()
# list annotations of various types - check they include ones from
# above
tags = list(gatewaywrapper.gateway.getObjects("TagAnnotation"))
for t in tags:
assert t.OMERO_TYPE == tag.OMERO_TYPE
assert tagId in [t.getId() for t in tags]
longs = list(gatewaywrapper.gateway.getObjects("LongAnnotation"))
for lng in longs:
assert lng.OMERO_TYPE == longAnn.OMERO_TYPE
assert longId in [lng.getId() for lng in longs]
bools = list(gatewaywrapper.gateway.getObjects("BooleanAnnotation"))
for b in bools:
assert b.OMERO_TYPE == boolAnn.OMERO_TYPE
assert boolId in [b.getId() for b in bools]
comms = list(gatewaywrapper.gateway.getObjects("CommentAnnotation"))
for c in comms:
assert c.OMERO_TYPE == commAnn.OMERO_TYPE
assert commId in [c.getId() for c in comms]
files = list(gatewaywrapper.gateway.getObjects("FileAnnotation"))
for f in files:
assert f.OMERO_TYPE == fileAnn.OMERO_TYPE
assert fileId in [f.getId() for f in files]
doubles = list(gatewaywrapper.gateway.getObjects("DoubleAnnotation"))
for d in doubles:
assert d.OMERO_TYPE == doubleAnn.OMERO_TYPE
assert doubleId in [d.getId() for d in doubles]
terms = list(gatewaywrapper.gateway.getObjects("TermAnnotation"))
for t in terms:
assert t.OMERO_TYPE == termAnn.OMERO_TYPE
assert termId in [t.getId() for t in terms]
times = list(gatewaywrapper.gateway.getObjects("TimestampAnnotation"))
for t in times:
assert t.OMERO_TYPE == timeAnn.OMERO_TYPE
assert timeId in [t.getId() for t in times]
# delete what we created
gatewaywrapper.gateway.deleteObjects(
"Annotation", [longId, boolId, fileId, commId, tagId], wait=True)
assert gatewaywrapper.gateway.getObject("Annotation", longId) is None
assert gatewaywrapper.gateway.getObject("Annotation", boolId) is None
assert gatewaywrapper.gateway.getObject("Annotation", fileId) is None
assert gatewaywrapper.gateway.getObject("Annotation", commId) is None
assert gatewaywrapper.gateway.getObject("Annotation", tagId) is None
class TestGetObject (ITest):
def testSearchObjects(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# search for Projects
pros = list(gatewaywrapper.gateway.searchObjects(
["Project"], "weblitz"))
for p in pros:
# assert p.getId() in projectIds
assert p.OMERO_CLASS == "Project", "Should only return Projects"
# P/D/I is default objects to search
# pdis = list( gatewaywrapper.gateway.simpleSearch("weblitz") ) #
# method removed from blitz gateway
# pdis.sort(key=lambda r: "%s%s"%(r.OMERO_CLASS, r.getId()) )
pdiResult = list(gatewaywrapper.gateway.searchObjects(
None, "weblitz"))
pdiResult.sort(key=lambda r: "%s%s" % (r.OMERO_CLASS, r.getId()))
# can directly check that sorted lists are the same
# for r1, r2 in zip(pdis, pdiResult):
# assert r1.OMERO_CLASS == r2.OMERO_CLASS
# assert r1.getId() == r2.getId()
def testListProjects(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# params limit query by owner
params = omero.sys.Parameters()
params.theFilter = omero.sys.Filter()
conn = gatewaywrapper.gateway
# should be no Projects owned by root (in the current group)
params.theFilter.ownerId = omero.rtypes.rlong(0) # owned by 'root'
pros = conn.getObjects("Project", None, params)
assert len(list(pros)) == 0, "Should be no Projects owned by root"
# Also filter by owner using opts dict
pros = conn.getObjects("Project", None, opts={'owner': 0})
assert len(list(pros)) == 0, "Should be no Projects owned by root"
# filter by current user should get same as above. # owned by 'author'
params.theFilter.ownerId = omero.rtypes.rlong(
conn.getEventContext().userId)
pros = list(conn.getObjects(
"Project", None, params))
projects = list(conn.listProjects())
# check unordered lists are the same length & ids
assert len(pros) == len(projects)
projectIds = [p.getId() for p in projects]
for p in pros:
assert p.getId() in projectIds
def testPagination(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
params = omero.sys.ParametersI()
# Only 3 images available
limit = 2
params.page(0, limit)
pros = list(gatewaywrapper.gateway.getObjects(
"Project", None, params))
assert len(pros) == limit
# Also using opts dict
pros = list(gatewaywrapper.gateway.getObjects(
"Project", None, opts={'offset': 0, 'limit': 2}))
assert len(pros) == limit
def testGetDatasetsByProject(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
allDs = list(gatewaywrapper.gateway.getObjects("Dataset"))
# Get Datasets by project.listChildren()...
project = gatewaywrapper.getTestProject()
dsIds = [d.id for d in project.listChildren()]
# Get Datasets, filtering by project
p = {'project': project.id}
datasets = list(gatewaywrapper.gateway.getObjects("Dataset", opts=p))
# Check that not all Datasets are in Project (or test is invalid)
assert len(allDs) > len(dsIds)
# Should get same result both methods
assert len(datasets) == len(dsIds)
for d in datasets:
assert d.id in dsIds
@pytest.mark.parametrize("load_gem", [True, False])
def testListExperimentersAndGroups(self, gatewaywrapper, load_gem):
gatewaywrapper.loginAsAuthor()
conn = gatewaywrapper.gateway
# experimenters - load_experimentergroups True by default
opts = {'limit': 10}
if not load_gem:
opts['load_experimentergroups'] = False
exps = conn.getObjects("Experimenter", opts=opts)
for e in exps:
# check iQuery has loaded at least one group
assert e._obj.groupExperimenterMapLoaded == load_gem
e.copyGroupExperimenterMap()
# groups. load_experimenters True by default
opts = {'limit': 10}
if not load_gem:
opts['load_experimenters'] = False
gps = conn.getObjects("ExperimenterGroup", opts=opts)
for grp in gps:
assert grp._obj.groupExperimenterMapLoaded == load_gem
grp.copyGroupExperimenterMap()
def testListColleagues(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
conn = gatewaywrapper.gateway
# uses gateway.getObjects("ExperimenterGroup") - check this doesn't
# throw
colleagues = conn.listColleagues()
for e in colleagues:
e.getOmeName()
def testFindExperimenterWithGroups(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
conn = gatewaywrapper.gateway
# check we can find some groups
exp = conn.getObject(
"Experimenter", attributes={'omeName': gatewaywrapper.USER.name})
for groupExpMap in exp.copyGroupExperimenterMap():
gName = groupExpMap.parent.name.val
gId = groupExpMap.parent.id.val
findG = gatewaywrapper.gateway.getObject(
"ExperimenterGroup", attributes={'name': gName})
assert gId == findG.id, "Check we found the same group"
@pytest.mark.parametrize("load", [True, False])
def testGetExperimentersByGroup(self, gatewaywrapper, load):
"""
Filter Groups by Experimenters and vice versa.
We test with and without loading experimenters/groups to check
that the query is built correctly in both cases
"""
gatewaywrapper.loginAsAdmin()
conn = gatewaywrapper.gateway
# Two users in the same group...
client, exp1 = self.new_client_and_user()
grp1_id = client.sf.getAdminService().getEventContext().groupId
exp2 = self.new_user(group=grp1_id)
# Another group with one user
grp2 = self.new_group(experimenters=[exp1])
# get Groups by Experimenters (in 1 or 2 groups + user group)
groups = list(conn.getObjects("ExperimenterGroup", opts={
"experimenter": exp2.id.val, 'load_experimenters': load}))
assert len(groups) == 2
assert grp1_id in [g.id for g in groups]
groups = list(conn.getObjects("ExperimenterGroup", opts={
"experimenter": exp1.id.val, 'load_experimenters': load}))
assert len(groups) == 3
# get Experimenters by Group (returns 1 or 2 exps)
exps = list(conn.getObjects("Experimenter", opts={
"experimentergroup": grp2.id.val,
"load_experimentergroups": load}))
assert len(exps) == 1
assert exps[0].id == exp1.id.val
exps = list(conn.getObjects("Experimenter", opts={
"experimentergroup": grp1_id,
"load_experimentergroups": load}))
assert len(exps) == 2
def testGetExperimenter(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
noExp = gatewaywrapper.gateway.getObject(
"Experimenter", attributes={'omeName': "Dummy Fake Name"})
assert noExp is None, "Should not find any matching experimenter"
findExp = gatewaywrapper.gateway.getObject(
"Experimenter", attributes={'omeName': gatewaywrapper.USER.name})
exp = gatewaywrapper.gateway.getObject(
"Experimenter", findExp.id)
assert exp.omeName == findExp.omeName
# check groupExperimenterMap loaded for exp
groupIds = []
for groupExpMap in exp.copyGroupExperimenterMap():
assert findExp.id == groupExpMap.child.id.val
groupIds.append(groupExpMap.parent.id.val)
# for groupExpMap in experimenter.copyGroupExperimenterMap():
# assert findExp.id == groupExpMap.child.id.val
groupGen = gatewaywrapper.gateway.getObjects(
"ExperimenterGroup", groupIds, opts={'load_experimenters': True})
groups = list(groupGen)
assert len(groups) == len(groupIds)
for g in groups:
assert g.getId() in groupIds
for m in g.copyGroupExperimenterMap(): # check exps are loaded
assert m.child
def testGetAnnotations(self, gatewaywrapper, author_testimg_tiny):
obj = author_testimg_tiny
dataset = gatewaywrapper.getTestDataset()
ns = "omero.gateway.test.get_objects.test_get_annotations_comment"
ns_tag = "omero.gateway.test.get_objects.test_get_annotations_tag"
# create Comment
ann = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway)
ann.setNs(ns)
ann.setValue("Test Comment")
ann = obj.linkAnnotation(ann)
# create Tag
tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway)
tag.setNs(ns_tag)
tag.setValue("Test Tag")
tag = obj.linkAnnotation(tag)
dataset.linkAnnotation(tag)
# get the Comment
annotation = gatewaywrapper.gateway.getObject(
"CommentAnnotation", ann.id)
assert "Test Comment" == annotation.textValue
assert ann.OMERO_TYPE == annotation.OMERO_TYPE
# test getObject throws exception if more than 1 returned
threw = True
try:
gatewaywrapper.gateway.getObject("Annotation")
threw = False
except Exception:
threw = True
assert threw, "getObject() didn't throw exception with >1 result"
# get the Comment and Tag
annGen = gatewaywrapper.gateway.getObjects(
"Annotation", [ann.id, tag.id])
anns = list(annGen)
assert len(anns) == 2
assert anns[0].ns in [ns, ns_tag]
assert anns[1].ns in [ns, ns_tag]
assert anns[0].OMERO_TYPE != anns[1].OMERO_TYPE
# get all available annotation links on the image
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image")
for al in annLinks:
assert isinstance(al.getAnnotation(),
omero.gateway.AnnotationWrapper)
assert al.parent.__class__ == omero.model.ImageI
# get selected links - On image only
annLinks = gatewaywrapper.gateway.getAnnotationLinks(
"Image", parent_ids=[obj.getId()])
for al in annLinks:
assert obj.getId() == al.parent.id.val
assert al.parent.__class__ == omero.model.ImageI
# get selected links - On image only
annLinks = gatewaywrapper.gateway.getAnnotationLinks(
"Image", parent_ids=[obj.getId()])
for al in annLinks:
assert obj.getId() == al.parent.id.val
assert al.parent.__class__ == omero.model.ImageI
# compare with getObjectsByAnnotations
annImages = list(gatewaywrapper.gateway.getObjectsByAnnotations(
'Image', [tag.getId()]))
assert obj.getId() in [i.getId() for i in annImages]
# params limit query by owner
params = omero.sys.Parameters()
params.theFilter = omero.sys.Filter()
# should be no links owned by root (in the current group)
params.theFilter.ownerId = omero.rtypes.rlong(0) # owned by 'root'
annLinks = gatewaywrapper.gateway.getAnnotationLinks(
"Image", parent_ids=[obj.getId()], params=params)
assert len(list(annLinks)) == 0, \
"No annotations on this image by root"
# links owned by author
eid = gatewaywrapper.gateway.getEventContext().userId
params.theFilter.ownerId = omero.rtypes.rlong(eid) # owned by 'author'
omeName = gatewaywrapper.gateway.getObject(
"Experimenter", eid).getName()
annLinks = gatewaywrapper.gateway.getAnnotationLinks(
"Image", parent_ids=[obj.getId()], params=params)
for al in annLinks:
assert al.getOwnerOmeName() == omeName
# all links on Image with specific ns
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", ns=ns)
for al in annLinks:
assert al.getAnnotation().ns == ns
# get all uses of the Tag - have to check various types separately
annList = list(gatewaywrapper.gateway.getAnnotationLinks(
"Image", ann_ids=[tag.id]))
assert len(annList) == 1
for al in annList:
assert al.getAnnotation().id == tag.id
annList = list(gatewaywrapper.gateway.getAnnotationLinks(
"Dataset", ann_ids=[tag.id]))
assert len(annList) == 1
for al in annList:
assert al.getAnnotation().id == tag.id
# remove annotations
obj.removeAnnotations(ns)
dataset.unlinkAnnotations(ns_tag) # unlink tag
obj.removeAnnotations(ns_tag) # delete tag
def testGetImage(self, gatewaywrapper, author_testimg_tiny):
testImage = author_testimg_tiny
# This should return image wrapper
image = gatewaywrapper.gateway.getObject("Image", testImage.id)
# test a few methods that involve lazy loading, rendering etc.
assert image.getSizeZ() == testImage.getSizeZ()
assert image.getSizeY() == testImage.getSizeY()
image.isGreyscaleRenderingModel() # loads rendering engine
testImage.isGreyscaleRenderingModel()
assert image._re.getDefaultZ() == testImage._re.getDefaultZ()
assert image._re.getDefaultT() == testImage._re.getDefaultT()
assert image.getOwnerOmeName == testImage.getOwnerOmeName
assert image.getThumbVersion() is not None
@pytest.mark.parametrize("load_pixels", [True, False])
@pytest.mark.parametrize("load_channels", [True, False])
def testGetImageLoadPixels(self, load_pixels, load_channels,
gatewaywrapper, author_testimg_tiny):
testImage = author_testimg_tiny
conn = gatewaywrapper.gateway
# By default (no opts), don't load pixels
image = conn.getObject("Image", testImage.id)
assert not image._obj.isPixelsLoaded()
# parametrized opts...
opts = {'load_pixels': load_pixels, 'load_channels': load_channels}
image = conn.getObject("Image", testImage.id, opts=opts)
# pixels are also loaded if load_channels
pix_loaded = load_pixels or load_channels
assert image._obj.isPixelsLoaded() == pix_loaded
if pix_loaded:
pixels = image._obj._pixelsSeq[0]
assert pixels.getPixelsType().isLoaded()
if load_channels:
assert pixels.isChannelsLoaded()
for c in pixels.copyChannels():
lc = c.getLogicalChannel()
assert lc.getPhotometricInterpretation().isLoaded()
else:
assert not pixels.isChannelsLoaded()
def testGetProject(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
testProj = gatewaywrapper.getTestProject()
p = gatewaywrapper.gateway.getObject("Project", testProj.getId())
assert testProj.getName() == p.getName()
assert testProj.getDescription() == p.getDescription()
assert testProj.getId() == p.getId()
assert testProj.OMERO_CLASS == p.OMERO_CLASS
assert testProj.countChildren_cached() == p.countChildren_cached()
assert testProj.getOwnerOmeName == p.getOwnerOmeName
def testTraversal(self, author_testimg_tiny):
image = author_testimg_tiny
# This should return image wrapper
pr = image.getProject()
ds = image.getParent()
assert image.listParents()[0] == image.getParent()
assert ds == image.getParent(withlinks=True)[0]
assert image.getParent(withlinks=True) == \
image.listParents(withlinks=True)[0]
assert ds.getParent() == pr
assert pr.getParent() is None
assert len(pr.listParents()) == 0
@pytest.mark.parametrize("orphaned", [True, False])
@pytest.mark.parametrize("load_pixels", [False, False])
def testListOrphans(self, orphaned, load_pixels, gatewaywrapper):
# We login as 'User', since they have no other orphaned images
gatewaywrapper.loginAsUser()
conn = gatewaywrapper.gateway
eid = conn.getUserId()
# Create 5 orphaned images
iids = []
for i in range(0, 5):
img = gatewaywrapper.createTestImage(imageName=str(uuid.uuid1()))
iids.append(img.id)
# Create image in Dataset, to check this isn't found
dataset = DatasetI()
dataset.name = wrap('testListOrphans')
image = ImageI()
image.name = wrap('testListOrphans')
dataset.linkImage(image)
dataset = conn.getUpdateService().saveAndReturnObject(dataset)
try:
# Only test listOrphans() if orphaned
if orphaned:
# Pagination
params = omero.sys.ParametersI()
params.page(1, 3)
findImagesInPage = list(conn.listOrphans("Image", eid=eid,
params=params))
assert len(findImagesInPage) == 3
# No pagination (all orphans)
findImages = list(conn.listOrphans("Image",
loadPixels=load_pixels))
assert len(findImages) == 5
for p in findImages:
assert p._obj.pixelsLoaded == load_pixels
# Test getObjects() with 'orphaned' option
opts = {'orphaned': orphaned, 'load_pixels': load_pixels}
getImages = list(conn.getObjects("Image", opts=opts))
assert orphaned == (len(getImages) == 5)
for p in getImages:
assert p._obj.pixelsLoaded == load_pixels
# Simply check this doesn't fail See https://github.com/
# openmicroscopy/openmicroscopy/pull/4950#issuecomment-264142956
dsIds = [d.id for d in conn.listOrphans("Dataset")]
assert dataset.id.val in dsIds
finally:
# Cleanup - Delete what we created
conn.deleteObjects('Image', iids, deleteAnns=True, wait=True)
conn.deleteObjects('Dataset', [dataset.id.val],
deleteChildren=True, wait=True)
def testOrderById(self, gatewaywrapper):
gatewaywrapper.loginAsUser()
imageIds = list()
for i in range(0, 3):
iid = gatewaywrapper.createTestImage(
"%s-testOrderById" % i).getId()
imageIds.append(iid)
images = gatewaywrapper.gateway.getObjects(
"Image", imageIds, respect_order=True)
resultIds = [i.id for i in images]
assert imageIds == resultIds, "Images not ordered by ID"
imageIds.reverse()
reverseImages = gatewaywrapper.gateway.getObjects(
"Image", imageIds, respect_order=True)
reverseIds = [i.id for i in reverseImages]
assert imageIds == reverseIds, "Images not ordered by ID"
wrappedIds = [rlong(i) for i in imageIds]
reverseImages = gatewaywrapper.gateway.getObjects(
"Image", wrappedIds, respect_order=True)
reverseIds = [i.id for i in reverseImages]
assert imageIds == reverseIds, "fails when IDs is list of rlongs"
invalidIds = imageIds[:]
invalidIds[1] = 0
reverseImages = gatewaywrapper.gateway.getObjects(
"Image", invalidIds, respect_order=True)
reverseIds = [i.id for i in reverseImages]
assert len(imageIds) - 1 == len(reverseIds), \
"One image not found by ID: 0"
# Delete to clean up
handle = gatewaywrapper.gateway.deleteObjects(
'Image', imageIds, deleteAnns=True)
try:
gatewaywrapper.gateway._waitOnCmd(handle)
finally:
handle.close()
@pytest.mark.parametrize("datatype", ['Image', 'Dataset', 'Project',
'Screen', 'Plate'])
def testGetObjectsByMapAnnotations(self, datatype):
client, exp = self.new_client_and_user()
conn = BlitzGateway(client_obj=client)
def createTarget(datatype, name, key="", value="", ns=None):
""" Creates an object and attaches a map annotation to it """
if datatype == "Image":
tgt = ImageWrapper(conn, omero.model.ImageI())
tgt.setName(name)
tgt.save()
if datatype == "Dataset":
tgt = DatasetWrapper(conn, omero.model.DatasetI())
tgt.setName(name)
tgt.save()
if datatype == "Project":
tgt = ProjectWrapper(conn, omero.model.ProjectI())
tgt.setName(name)
tgt.save()
if datatype == "Screen":
tgt = ScreenWrapper(conn, omero.model.ScreenI())
tgt.setName(name)
tgt.save()
if datatype == "Plate":
tgt = PlateWrapper(conn, omero.model.PlateI())
tgt.setName(name)
tgt.save()
for _ in range(0, 2):
# Add two map annotations to check that each object
# is still just returned once.
map_ann = omero.gateway.MapAnnotationWrapper(conn)
map_ann.setValue([(key, value)])
if ns:
map_ann.setNs(ns)
map_ann.save()
tgt.linkAnnotation(map_ann)
return tgt
name = str(uuid.uuid4())
key = str(uuid.uuid4())
value = str(uuid.uuid4())
ns = str(uuid.uuid4())
kv = createTarget(datatype, name, key=key, value=value)
v = createTarget(datatype, name, key=str(uuid.uuid4()), value=value)
k = createTarget(datatype, name, key=key, value=str(uuid.uuid4()))
kvn = createTarget(datatype, name, key=key, value=value, ns=ns)
n = createTarget(datatype, name, key=str(uuid.uuid4()),
value=str(uuid.uuid4()), ns=ns)
# 3x key matches, 3x value matches, 2x key+value matches,
# 2x ns matches, 1x key+value+ns matches
# No match
results = list(conn.getObjectsByMapAnnotations(datatype,
key=str(uuid.uuid4())))
assert len(results) == 0
# Key match
results = list(conn.getObjectsByMapAnnotations(datatype, key=key))
assert len(results) == 3
ids = [r.getId() for r in results]
assert k.getId() in ids
assert kv.getId() in ids
assert kvn.getId() in ids
# Key wildcard match
wc = "*"+key[2:12]+"*"
results = list(conn.getObjectsByMapAnnotations(datatype, key=wc))
assert len(results) == 3
ids = [r.getId() for r in results]
assert k.getId() in ids
assert kv.getId() in ids
assert kvn.getId() in ids
# Value match
results = list(conn.getObjectsByMapAnnotations(datatype, value=value))
assert len(results) == 3
ids = [r.getId() for r in results]
assert v.getId() in ids
assert kv.getId() in ids
assert kvn.getId() in ids
# Key+Value match
results = list(conn.getObjectsByMapAnnotations(datatype, key=key,
value=value))
assert len(results) == 2
ids = [r.getId() for r in results]
assert kv.getId() in ids
assert kvn.getId() in ids
# Key+Value wildcard match
wc = "*"+value[2:12]+"*"
results = list(conn.getObjectsByMapAnnotations(datatype, key=key,
value=wc))
assert len(results) == 2
ids = [r.getId() for r in results]
assert kv.getId() in ids
assert kvn.getId() in ids
# Key+Value wildcard doesn't match
wc = value[2:12]+"*"
results = list(conn.getObjectsByMapAnnotations(datatype, key=key,
value=wc))
assert len(results) == 0
# NS match
results = list(conn.getObjectsByMapAnnotations(datatype, ns=ns))
assert len(results) == 2
ids = [r.getId() for r in results]
assert n.getId() in ids
assert kvn.getId() in ids
# Key+Value+NS match
results = list(conn.getObjectsByMapAnnotations(datatype, key=key,
value=value, ns=ns))
assert len(results) == 1
assert kvn.getId() == results[0].getId()
# Test limit
results = list(conn.getObjectsByMapAnnotations(datatype))
assert len(results) == 5
results = list(conn.getObjectsByMapAnnotations(datatype,
opts={"limit": 4}))
assert len(results) == 4
class TestLeaderAndMemberOfGroup(object):
@pytest.fixture(autouse=True)
def setUp(self):
""" Create a group with owner & member"""
dbhelpers.USERS['group_owner'] = dbhelpers.UserEntry(
'group_owner', 'ome',
firstname='Group',
lastname='Owner',
groupname="ownership_test",
groupperms='rwr---',
groupowner=True)
dbhelpers.USERS['group_member'] = dbhelpers.UserEntry(
'group_member', 'ome',
firstname='Group',
lastname='Member',
groupname="ownership_test",
groupperms='rwr---',
groupowner=False)
dbhelpers.bootstrap(onlyUsers=True)
def testGetGroupsLeaderOfAsLeader(self, gatewaywrapper):
gatewaywrapper.doLogin(dbhelpers.USERS['group_owner'])
assert gatewaywrapper.gateway.isLeader()
grs = [g.id for g in gatewaywrapper.gateway.getGroupsLeaderOf()]
assert len(grs) > 0
exp = gatewaywrapper.gateway.getObject(
"Experimenter", attributes={'omeName': 'group_owner'})
assert exp.sizeOfGroupExperimenterMap() > 0
filter_system_groups = [gatewaywrapper.gateway.getAdminService()
.getSecurityRoles().userGroupId]
leaderOf = list()
for groupExpMap in exp.copyGroupExperimenterMap():
gId = groupExpMap.parent.id.val
if groupExpMap.owner.val and gId not in filter_system_groups:
leaderOf.append(gId)
assert(leaderOf == grs)
def testGetGroupsLeaderOfAsMember(self, gatewaywrapper):
gatewaywrapper.doLogin(dbhelpers.USERS['group_member'])
assert not gatewaywrapper.gateway.isLeader()
with pytest.raises(StopIteration):
next(gatewaywrapper.gateway.getGroupsLeaderOf())
def testGetGroupsMemberOf(self, gatewaywrapper):
gatewaywrapper.doLogin(dbhelpers.USERS['group_member'])
assert not gatewaywrapper.gateway.isLeader()
grs = [g.id for g in gatewaywrapper.gateway.getGroupsMemberOf()]
assert len(grs) > 0
exp = gatewaywrapper.gateway.getObject(
"Experimenter", attributes={'omeName': "group_member"})
assert exp.sizeOfGroupExperimenterMap() > 0
filter_system_groups = [gatewaywrapper.gateway.getAdminService()
.getSecurityRoles().userGroupId]
memberOf = list()
for groupExpMap in exp.copyGroupExperimenterMap():
gId = groupExpMap.parent.id.val
if not groupExpMap.owner.val and gId not in filter_system_groups:
memberOf.append(gId)
assert memberOf == grs
def testGroupSummaryAsOwner(self, gatewaywrapper):
"""Test groupSummary() when Group loaded without experimenters."""
gatewaywrapper.doLogin(dbhelpers.USERS['group_owner'])
expGr = gatewaywrapper.gateway.getObject(
"ExperimenterGroup", attributes={'name': 'ownership_test'})
leaders, colleagues = expGr.groupSummary()
assert len(leaders) == 1
assert len(colleagues) == 1
assert leaders[0].omeName == "group_owner"
assert colleagues[0].omeName == "group_member"
leaders, colleagues = expGr.groupSummary(exclude_self=True)
assert len(leaders) == 0
assert len(colleagues) == 1
assert colleagues[0].omeName == "group_member"
def testGroupSummaryAsMember(self, gatewaywrapper):
gatewaywrapper.doLogin(dbhelpers.USERS['group_member'])
expGr = gatewaywrapper.gateway.getObject(
"ExperimenterGroup", attributes={'name': 'ownership_test'})
leaders, colleagues = expGr.groupSummary()
assert len(leaders) == 1
assert len(colleagues) == 1
assert leaders[0].omeName == "group_owner"
assert colleagues[0].omeName == "group_member"
leaders, colleagues = expGr.groupSummary(exclude_self=True)
assert len(leaders) == 1
assert leaders[0].omeName == "group_owner"
assert len(colleagues) == 0
def testGroupSummaryAsOwnerDeprecated(self, gatewaywrapper):
gatewaywrapper.doLogin(dbhelpers.USERS['group_owner'])
summary = gatewaywrapper.gateway.groupSummary()
assert len(summary["leaders"]) == 1
assert len(summary["colleagues"]) == 1
assert summary["leaders"][0].omeName == "group_owner"
assert summary["colleagues"][0].omeName == "group_member"
summary = gatewaywrapper.gateway.groupSummary(exclude_self=True)
assert len(summary["leaders"]) == 0
assert len(summary["colleagues"]) == 1
assert summary["colleagues"][0].omeName == "group_member"
def testGroupSummaryAsMemberDeprecated(self, gatewaywrapper):
gatewaywrapper.doLogin(dbhelpers.USERS['group_member'])
summary = gatewaywrapper.gateway.groupSummary()
assert len(summary["leaders"]) == 1
assert len(summary["colleagues"]) == 1
assert summary["leaders"][0].omeName == "group_owner"
assert summary["colleagues"][0].omeName == "group_member"
summary = gatewaywrapper.gateway.groupSummary(exclude_self=True)
assert len(summary["leaders"]) == 1
assert summary["leaders"][0].omeName == "group_owner"
assert len(summary["colleagues"]) == 0
class TestListParents(ITest):
def testSupportedObjects(self):
"""
Check that we are testing all objects where listParents() is supported.
If this test fails, need to update tested_wrappers and add
corresponding tests below
"""
tested_wrappers = ['plate', 'image', 'dataset', 'experimenter', 'well']
for key, wrapper in list(KNOWN_WRAPPERS.items()):
if (hasattr(wrapper, 'PARENT_WRAPPER_CLASS') and
wrapper.PARENT_WRAPPER_CLASS is not None):
assert key in tested_wrappers
def testListParentsPDI(self):
"""Test listParents() for Image in Dataset"""
# Set up PDI
client, exp = self.new_client_and_user()
p = self.make_project(name="ListParents Test", client=client)
d = self.make_dataset(name="ListParents Test", client=client)
i = self.make_image(name="ListParents Test", client=client)
self.link(p, d, client=client)
self.link(d, i, client=client)
conn = BlitzGateway(client_obj=client)
image = conn.getObject("Image", i.id.val)
# Traverse from Image -> Project
dataset = image.listParents()[0]
assert dataset.id == d.id.val
project = dataset.listParents()[0]
assert project.id == p.id.val
# Project has no parent
assert len(project.listParents()) == 0
def testListParentsSPW(self):
"""Test listParents() for Image in WellSample"""
client, exp = self.new_client_and_user()
conn = BlitzGateway(client_obj=client)
# setup SPW-WS-Img...
s = ScreenI()
s.name = wrap('ScreenA')
p = PlateI()
p.name = wrap('PlateA')
s.linkPlate(p)
w = WellI()
w.column = wrap(0)
w.row = wrap(0)
p.addWell(w)
s = client.sf.getUpdateService().saveAndReturnObject(s)
p = s.linkedPlateList()[0]
w = p.copyWells()[0]
i = self.make_image(name="SPW listParents", client=client)
ws = WellSampleI()
ws.image = i
ws.well = WellI(w.id.val, False)
w.addWellSample(ws)
ws = client.sf.getUpdateService().saveAndReturnObject(ws)
# Traverse from Image -> Screen
image = conn.getObject("Image", i.id.val)
wellSample = image.listParents()[0]
well = wellSample.listParents()[0]
assert well.id == w.id.val
plate = well.listParents()[0]
assert plate.id == p.id.val
screen = plate.listParents()[0]
assert screen.id == s.id.val
# Screen has no parent
assert len(screen.listParents()) == 0
def testExperimenterListParents(self):
"""Test listParents() for Experimenter in ExperimenterGroup."""
client, exp = self.new_client_and_user()
conn = BlitzGateway(client_obj=client)
userGroupId = conn.getAdminService().getSecurityRoles().userGroupId
exp = conn.getUser()
groups = exp.listParents()
assert len(groups) == 2
gIds = [g.id for g in groups]
assert userGroupId in gIds
# ExperimenterGroup has no parent
assert len(groups[0].listParents()) == 0
|
gpl-2.0
| -8,658,934,620,634,377,000
| 39.918018
| 79
| 0.615381
| false
| 4.025793
| true
| false
| false
|
WA4OSH/Learn_Python
|
oldLady.py
|
1
|
2383
|
#-------------------------------------------------------------------------------
# Name: oldLady.py
# Purpose: Demo of program control, loops, branches, etc.
#
# Author: Konrad Roeder, adapted from the nusery rhyme
# There was an Old Lady song from the
# Secret History of Nursery Rhymes Book
# www.rhymes.uk/there_was_an_old_lady.htm
#
# Created: 04/16/2014
# Copyright: (cc) Konrad Roeder 2014
# Licence: CC by 4.0
#-------------------------------------------------------------------------------
#There are seven animals in this song, one for each verse
animalName = ['fly','spider','bird','cat','dog','cow','horse']
#Each verse in the song starts with this section, printing this line
def printSectionA(verse):
print("There was an old lady who swallowed a",animalName[verse-1])
#In section B, the line is different for each verse
def printSectionB(verse):
#if (verse == 1): Do nothing
if (verse == 2):
print("That wriggled and wiggled and tickled inside her")
elif (verse == 3):
print("How absurd to swallow a bird")
elif (verse == 4):
print("Fancy that to swallow a cat")
elif (verse == 5):
print("What a hog to swallow a dog")
elif (verse == 6):
print("I don't know how she swallowed a cow")
elif (verse == 7):
print("She's dead, of course!")
def printSectionC(verse):
#This section only has lines in the middle five verses
if (verse < 7):
#The for loop drops through on the first verse
#In verses 2-6, it prints one line less than the verse number
for line in range(verse-1, 0, -1):
print("She swallowed the",animalName[line],
"to catch the", animalName[line-1])
def printSectionD(verse):
#This sections exists only in the first six verses
if (verse < 7):
print("I don't know why she swallowed a fly - Perhaps she will die!")
print("")
def song():
#Print the title
print("There was an Old Lady song")
print("")
#Print each of the seven verses
for verse in range(1,8):
#Each verse has four sections
printSectionA(verse)
printSectionB(verse)
printSectionC(verse)
printSectionD(verse)
#Print the song's coda (ending)
print("")
print("There was an Old Lady song")
song()
|
cc0-1.0
| 2,651,610,528,392,757,000
| 33.536232
| 80
| 0.579522
| false
| 3.621581
| false
| false
| false
|
linsalrob/EdwardsLab
|
patric/parse_gto.py
|
1
|
2770
|
"""
Parse a GTO object
"""
import os
import sys
import argparse
from roblib import bcolors
import json
def list_keys(gto, verbose=False):
"""
List the primary keys in the patric file
:param gto: the json gto
:param verbose: more output
:return:
"""
print("{}".format("\n".join(gto.keys())))
def dump_json(gto, k, verbose=False):
"""
Print out the json representation of some data
:param gto: the json gto
:param k: the key to dump (none for everything)
:param verbose: more output
:return:
"""
if k:
if k in gto:
print(json.dumps(gto[k], indent=4))
else:
sys.stderr.write(f"{bcolors.RED}ERROR: {k} not found.{bcolors.ENDC}\n")
else:
print(json.dumps(gto, indent=4))
def feature_tbl(gto, verbose=False):
"""
Print a tab separated feature table
:param gto: the json gto
:param verbose: more output
:return:
"""
for peg in gto['features']:
if 'location' not in peg:
sys.stderr.write(f"{bcolors.RED}Error: no location found\n{bcolors.PINK}{peg}{bcolors.ENDC}\n")
continue
locs = []
for l in peg['location']:
start = int(l[1])
if l[2] == '+':
stop = (start + int(l[3])) - 1
elif l[2] == '-':
start = (start - int(l[3])) + 1
stop = int(l[1])
else:
sys.stderr.write(f"{bcolors.RED}Error: Don't know location l[2]\n{bcolors.ENDC}")
continue
locs.append(f"{l[0]} {start} - {stop} ({l[2]})")
data = [
peg['id'],
peg['function'],
"; ".join(locs)
]
print("\t".join(data))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot a heatmap")
parser.add_argument('-f', help='gto file', required=True)
parser.add_argument('-l', help='list the primary keys and exit', action='store_true')
parser.add_argument('-d', help='dump some part of the json object', action='store_true')
parser.add_argument('-p', help='print protein feature table', action='store_true')
parser.add_argument('-k', help='json primary key (e.g. for dumping, etc)')
parser.add_argument('-o', help='output file')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
gto = json.load(open(args.f, 'r'))
if args.l:
list_keys(gto, args.v)
sys.exit(0)
if args.d:
dump_json(gto, args.k, args.v)
sys.exit(0)
if args.p:
feature_tbl(gto, args.v)
sys.exit(0)
sys.stderr.write(f"{bcolors.RED}ERROR: You did not specify a command to run{bcolors.ENDC}\n")
|
mit
| -2,918,537,910,248,930,000
| 27.556701
| 107
| 0.558123
| false
| 3.349456
| false
| false
| false
|
BenKettlewell/Livestreamer-Command-Line-Generator
|
livestreamerCLG.py
|
1
|
2267
|
''' Parses crunchyroll URLs and provides a string command line argument to
download them.
Utilizing youtube-dl to split sub and video files but livestreamer functionality
can be added with minimal effort
-h, --help Output this help document
-u, --url Provide a single url
-f, --file Provide location of csv file
File format (do not include headers)
crunchyroll_url,subtitle_url,season#
#subtitle_url not implemented yet
-c, Use cookie file located at $COOKIES instead of password auth
--cookie-auth
'''
from urlparse import urlparse
import sys # Command Line Arguments
import getopt # Parse CLI Args
import re # Regular Expressions
from CrunchyCSV import CrunchyCSV
from Anime import Anime
from crunchyroll import outputer
from shell import downloader
def main (argv):
''' This program has 3 distinct stages.
1. Request a set of urls from the user and store them
2. Parse and formulate the compiled Livestreamer command
3. Return the string to the user
'''
urls = ''
file_csv = ''
auth_method = 'password'
# parse command line options
try:
opts, args = getopt.getopt(argv, "hu:f:c", ["help","url=","file=","cookie-auth"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(1)
if o in ("-u", "--url"):
urls = a
print'urls are :', a
if o in ("-f", "--file"):
file_csv = a
print'csv_file :', a
if o in ("-c","--cookie-auth"):
auth_method = 'cookies'
print'using cookies'
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if file_csv != '':
crunchyCSV = CrunchyCSV(file_csv)
print outputer.youtube_dl_string_for_CrunchyCSV(crunchyCSV, auth_method)
print outputer.list_of_anime_filenames(crunchyCSV)
else:
anime = Anime(urls, '', '')
print outputer.youtube_dl_string_for_Anime(anime, auth_method)
print downloader.sub_call()
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-2.0
| -2,636,595,250,218,402,000
| 31.385714
| 89
| 0.613586
| false
| 3.704248
| false
| false
| false
|
root-mirror/root
|
tutorials/roofit/rf604_constraints.py
|
11
|
2705
|
## \file
## \ingroup tutorial_roofit
## \notebook -nodraw
## Likelihood and minimization: fitting with constraints
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
from __future__ import print_function
import ROOT
# Create model and dataset
# ----------------------------------------------
# Construct a Gaussian pdf
x = ROOT.RooRealVar("x", "x", -10, 10)
m = ROOT.RooRealVar("m", "m", 0, -10, 10)
s = ROOT.RooRealVar("s", "s", 2, 0.1, 10)
gauss = ROOT.RooGaussian("gauss", "gauss(x,m,s)", x, m, s)
# Construct a flat pdf (polynomial of 0th order)
poly = ROOT.RooPolynomial("poly", "poly(x)", x)
# model = f*gauss + (1-f)*poly
f = ROOT.RooRealVar("f", "f", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model",
"model",
ROOT.RooArgList(
gauss,
poly),
ROOT.RooArgList(f))
# Generate small dataset for use in fitting below
d = model.generate(ROOT.RooArgSet(x), 50)
# Create constraint pdf
# -----------------------------------------
# Construct Gaussian constraint pdf on parameter f at 0.8 with
# resolution of 0.1
fconstraint = ROOT.RooGaussian(
"fconstraint",
"fconstraint",
f,
ROOT.RooFit.RooConst(0.8),
ROOT.RooFit.RooConst(0.1))
# Method 1 - add internal constraint to model
# -------------------------------------------------------------------------------------
# Multiply constraint term with regular pdf using ROOT.RooProdPdf
# Specify in fitTo() that internal constraints on parameter f should be
# used
# Multiply constraint with pdf
modelc = ROOT.RooProdPdf(
"modelc", "model with constraint", ROOT.RooArgList(model, fconstraint))
# Fit model (without use of constraint term)
r1 = model.fitTo(d, ROOT.RooFit.Save())
# Fit modelc with constraint term on parameter f
r2 = modelc.fitTo(
d,
ROOT.RooFit.Constrain(
ROOT.RooArgSet(f)),
ROOT.RooFit.Save())
# Method 2 - specify external constraint when fitting
# ------------------------------------------------------------------------------------------
# Construct another Gaussian constraint pdf on parameter f at 0.8 with
# resolution of 0.1
fconstext = ROOT.RooGaussian("fconstext", "fconstext", f, ROOT.RooFit.RooConst(
0.2), ROOT.RooFit.RooConst(0.1))
# Fit with external constraint
r3 = model.fitTo(d, ROOT.RooFit.ExternalConstraints(
ROOT.RooArgSet(fconstext)), ROOT.RooFit.Save())
# Print the fit results
print("fit result without constraint (data generated at f=0.5)")
r1.Print("v")
print("fit result with internal constraint (data generated at f=0.5, is f=0.8+/-0.2)")
r2.Print("v")
print("fit result with (another) external constraint (data generated at f=0.5, is f=0.2+/-0.1)")
r3.Print("v")
|
lgpl-2.1
| 5,271,455,174,840,602,000
| 28.402174
| 96
| 0.621072
| false
| 3.174883
| false
| false
| false
|
valdur55/py3status
|
py3status/modules/keyboard_locks.py
|
1
|
2349
|
r"""
Display NumLock, CapsLock, and ScrLock keys.
Configuration parameters:
cache_timeout: refresh interval for this module (default 1)
format: display format for this module
*(default '[\?if=num_lock&color=good NUM|\?color=bad NUM] '
'[\?if=caps_lock&color=good CAPS|\?color=bad CAPS] '
'[\?if=scroll_lock&color=good SCR|\?color=bad SCR]')*
Control placeholders:
{num_lock} a boolean based on xset data
{caps_lock} a boolean based on xset data
{scroll_lock} a boolean based on xset data
Color options:
color_good: Lock on
color_bad: Lock off
Examples:
```
# hide NUM, CAPS, SCR
keyboard_locks {
format = '\?color=good [\?if=num_lock NUM][\?soft ]'
format += '[\?if=caps_lock CAPS][\?soft ][\?if=scroll_lock SCR]'
}
```
@author lasers
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': 'NUM CAPS SCR'}
no_locks
{'color': '#FF0000', 'full_text': 'NUM CAPS SCR'}
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 1
format = (
r"[\?if=num_lock&color=good NUM|\?color=bad NUM] "
r"[\?if=caps_lock&color=good CAPS|\?color=bad CAPS] "
r"[\?if=scroll_lock&color=good SCR|\?color=bad SCR]"
)
def post_config_hook(self):
items = [
"icon_num_on",
"icon_num_off",
"icon_caps_on",
"icon_caps_off",
"icon_scr_on",
"icon_scr_off",
]
if self.py3.format_contains(self.format, ["caps", "num", "scr"]) or (
any(getattr(self, v, None) is not None for v in items)
):
raise Exception("please update the config for this module")
# end deprecation
self.locks = {}
self.keyring = {"num_lock": "Num", "caps_lock": "Caps", "scroll_lock": "Scroll"}
def keyboard_locks(self):
xset_data = self.py3.command_output("xset q")
for k, v in self.keyring.items():
self.locks[k] = "on" in xset_data.split("%s Lock:" % v)[1][0:6]
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, self.locks),
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
bsd-3-clause
| -3,356,702,791,891,390,500
| 26.635294
| 88
| 0.57301
| false
| 3.182927
| false
| false
| false
|
MaxIV-KitsControls/netspot
|
netspot/ts_lib.py
|
1
|
5698
|
#!/usr/bin/python -tt
"""Junos Interface Troubleshooting library."""
import re
import warnings
import helpers
from napalm import get_network_driver
from jnpr.junos.exception import ConnectRefusedError, ConnectAuthError
# JUNOS MAC table RE
RE_VLAN = r'\s+([\w\d-]+)\s+'
RE_MAC = r'\s?([*\w\d:]+)\s+'
RE_TYPE = r'\s?([\w]+) '
RE_AGE = r'\s+([-\d:]+)'
RE_INTERFACE = r'\s+([-.\w\d/]+)'
RE_SWITCHING_TABLE = RE_VLAN + RE_MAC + RE_TYPE + RE_AGE + RE_INTERFACE
class TroubleshootDevice(object):
"""Class to help troubleshoot device."""
def __init__(self, asset, loopback, ssh_key, interface_name):
self.asset = asset
self.loopback = loopback
self.ssh_key = ssh_key
self.interface_name = interface_name
self.mac_address = None
self.dhcp_logs = None
self.dhcp_error = None
self.log_entries = None
self.interface = None
self.error_message = None
self.macs = {}
self.lldp = {}
def run(self):
"""Run troubleshooter."""
try:
# Connect to asset
driver = get_network_driver('junos')
device = driver(self.loopback,
'automation',
'',
optional_args={'key_file': self.ssh_key})
device.open()
with warnings.catch_warnings(record=True) as warning:
warnings.filterwarnings('ignore')
# Check interface
cmd = 'show interfaces {0} detail'.format(self.interface_name)
show_interface = device.cli([cmd])
self.interface = Interface(show_interface[cmd])
if self.interface.link_state == 'Up':
# Get LLDP neighbor
cmd = 'show lldp neighbors interface {0}'.format(self.interface_name)
lldp_neighbor = device.cli([cmd])
self.lldp = LLDP(lldp_neighbor[cmd])
# Check MAC table
cmd = 'show ethernet-switching table interface {0}'.format(self.interface_name)
mac_table = device.cli([cmd])
self.macs = MACTable(mac_table[cmd])
# Search DHCP logs if MAC is specified
if self.macs:
self.mac_address = self.macs.mac_entries[0]['mac']
dhcp_server = helpers.get_dhcp_server(asset=self.asset)
self.dhcp_logs, self.dhcp_error = helpers.search_dhcp_log(self.mac_address, dhcp_server)
# Check log file
cmd = 'show log messages'
show_log = device.cli([cmd])
show_log = show_log[cmd]
self.log_entries = re.findall(r'\n([\[\]\s\w\d:.-]+{0}[\s\w\d:.-]+)\n'.format(self.interface_name),
show_log)
device.close()
except ConnectAuthError:
self.error_message = 'Autentication failed to %s.' % self.loopback
except ConnectRefusedError:
self.error_message = 'Connection refused to %s.' % self.loopback
except ValueError:
self.error_message = 'No switch found.'
class Interface(object):
"""Class to represent a JUNOS interface."""
def __init__(self, output):
self.output = output
self.link_state = ''
self.speed = ''
self.duplex = ''
self.flapped = ''
self.auto_neg = ''
# Analyze output
self.analyze_output()
def analyze_output(self):
"""Anlyze the output from show interfaces X."""
# Link down
match = re.search(r'Physical link is ([\w]+)', self.output)
if match:
self.link_state = match.groups()[0]
# Speed
match = re.search(r'Speed: ([\w\d]+),', self.output)
if match:
self.speed = match.groups()[0]
# Duplex
match = re.search(r'Duplex: ([\w-]+),', self.output)
if match:
self.duplex = match.groups()[0]
# Last flapped
match = re.search(r'Last flapped : ([\w\d ():-]+)\n', self.output)
if match:
self.flapped = match.groups()[0]
# Auto negotiation
match = re.search(r'Auto-negotiation: ([\w]+),', self.output)
if match:
self.auto_neg = match.groups()[0]
class LLDP(object):
"""Parse and represent a LLDP neighbor."""
def __init__(self, output):
self.output = output
self.empty = True
self.remote_chassis_id = ''
self.remote_port_description = ''
self.remote_system = ''
# Analyze output
self.analyze_output()
if self.remote_chassis_id:
self.empty = False
def analyze_output(self):
"""Parse JUNOS show lldp neighboir interface X command."""
# Remote chassis ID
match = re.search(r'Chassis ID\s+: ([\w\d:-]+)', self.output)
if match:
self.remote_chassis_id = match.groups()[0]
# Remote port description
match = re.search(r'Port description\s+: ([\w\d\/:-]+)', self.output)
if match:
self.remote_port_description = match.groups()[0]
# Remote port system
match = re.search(r'System name\s+: ([\w\d\/:-]+)', self.output)
if match:
self.remote_system = match.groups()[0]
class MACTable(object):
"""Parse and save MAC entries from a JUNOS device."""
def __init__(self, output):
self.output = output
self.mac_entries = []
# Analyze output
self.analyze_output()
def analyze_output(self):
"""Parse JUNOS show ethernet-switching interface X command."""
# Remote chassis ID
match = re.findall(RE_SWITCHING_TABLE, self.output)
for entry in match:
if entry[1] != '*':
mac_entry = {'vlan': entry[0],
'mac': entry[1],
'type': entry[2],
'age': entry[3],
'interface': entry[4]}
self.mac_entries.append(mac_entry)
def __str__(self):
if self.mac_entries:
return self.mac_entries[0]['mac']
return None
def main():
"""Main."""
pass
if __name__ == '__main__':
main()
|
mit
| -7,438,312,396,324,293,000
| 26.931373
| 107
| 0.586697
| false
| 3.493562
| false
| false
| false
|
ndaniels/Ammolite
|
scripts/figure-generators/smsdIsoCompare.py
|
1
|
1534
|
import matplotlib.pyplot as plt
from pylab import polyfit, poly1d, show, savefig
import sys
def isNumber( s):
try:
float(s)
return True
except ValueError:
return False
def makeGraph(X,Y, xName, yName, name="NoName"):
fig = plt.figure()
ax = fig.add_subplot(111)
superName = "Comparison of {} and {}".format(xName,yName)
outname = "{} from {}.png".format(superName,name)
fig.suptitle(superName)
ax.scatter(X,Y)
fit = polyfit(X,Y,1)
fit_fn = poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
ax.plot(X,Y, 'yo', X, fit_fn(X), '--k')
ax.set_xlabel('Size of MCS found by {}'.format(xName))
ax.set_ylabel('Size of MCS found by {}'.format(yName))
ax.text(1, 1, "y = {}*x + {}".format(fit[0], fit[1]))
fig.savefig(outname)
def buildIsoSMSDComparison( filename, outname="SMSD-IsoRank-comparison"):
X, Y, xName, yName = [], [], "", ""
with open( filename) as f:
inComparison = False
nameLine = False
for line in f:
if line.split()[0] == "COMPARISON_DELIMITER":
if inComparison:
makeGraph( X, Y, xName, yName, filename)
inComparison = True
nameLine = True
X, Y = [], []
elif inComparison:
l = line.split()
if nameLine:
xName, yName = l[0], l[1]
nameLine = False
else:
X.append( float( l[0]))
Y.append( float( l[1]))
makeGraph( X, Y, xName, yName, filename)
if __name__ == "__main__":
args = sys.argv
if(len(args) == 2):
buildIsoSMSDComparison(args[1])
else:
buildIsoSMSDComparison(args[1], args[2])
|
gpl-2.0
| 7,310,710,760,138,721,000
| 22.96875
| 95
| 0.634289
| false
| 2.635739
| false
| false
| false
|
Frencil/box-python-sdk
|
test/unit/object/test_events.py
|
1
|
8316
|
# coding: utf-8
from __future__ import unicode_literals
from itertools import chain
import json
from mock import Mock
import pytest
from requests.exceptions import Timeout
from six.moves import map # pylint:disable=redefined-builtin
from six.moves.urllib.parse import urlencode, urlunsplit # pylint:disable=import-error,no-name-in-module
from boxsdk.network.default_network import DefaultNetworkResponse
from boxsdk.object.events import Events, EventsStreamType, UserEventsStreamType
from boxsdk.session.box_session import BoxResponse
from boxsdk.util.ordered_dict import OrderedDict
@pytest.fixture()
def test_events(mock_box_session):
return Events(mock_box_session)
@pytest.fixture()
def final_stream_position():
return 1348790499820
@pytest.fixture()
def initial_stream_position():
return 1348790499819
# pylint:disable=no-member
# pylint isn't currently smart enough to recognize the class member that was
# added by the metaclass, when the metaclass was added by @add_metaclass() /
# with_metaclass().
STREAM_TYPES_AS_ENUM_INSTANCES = list(EventsStreamType.__members__.values())
# pylint:enable=no-member
STREAM_TYPES_AS_STRINGS = list(map(str, STREAM_TYPES_AS_ENUM_INSTANCES))
def test_events_stream_type_extended_enum_class_has_expected_members():
assert len(STREAM_TYPES_AS_ENUM_INSTANCES) >= 4
assert len(STREAM_TYPES_AS_STRINGS) >= 4
assert 'all' in STREAM_TYPES_AS_STRINGS
assert 'changes' in STREAM_TYPES_AS_STRINGS
assert 'sync' in STREAM_TYPES_AS_STRINGS
assert 'admin_logs' in STREAM_TYPES_AS_STRINGS
@pytest.fixture(
scope='session',
params=list(chain(
[None], # Default behavior of not passing any stream_type
STREAM_TYPES_AS_ENUM_INSTANCES, # Passing an enum instance
STREAM_TYPES_AS_STRINGS, # Passing an enum value
# For forwards compatibility, make sure that it works to pass a string
# value that is not a member of the enum.
['future_stream_type'],
)),
)
def stream_type_param(request):
"""The value to pass as an Event method's stream_type parameter.
:return:
The parameter value, or `None` if no value should be passed.
:rtype:
:enum:`EventsStreamType` or `unicode` or `None`
"""
return request.param
@pytest.fixture()
def expected_stream_type(stream_type_param):
"""The stream type we expect to use.
:rtype:
`unicode`
"""
if stream_type_param is None:
return UserEventsStreamType.ALL
return stream_type_param
@pytest.fixture()
def stream_type_kwargs(stream_type_param):
"""The kwargs for stream_type to pass when invoking a method on `Events`.
:rtype:
`dict`
"""
if stream_type_param:
return {'stream_type': stream_type_param}
return {}
@pytest.fixture()
def expected_stream_type_params(expected_stream_type):
"""The stream_type-related params that we expect to pass to request methods.
:rtype:
:class:`OrderedDict`
"""
return OrderedDict(stream_type=expected_stream_type)
@pytest.fixture()
def empty_events_response(final_stream_position):
# pylint:disable=redefined-outer-name
mock_box_response = Mock(BoxResponse)
mock_network_response = Mock(DefaultNetworkResponse)
mock_box_response.network_response = mock_network_response
mock_box_response.json.return_value = mock_json = {'next_stream_position': final_stream_position, 'entries': []}
mock_box_response.content = json.dumps(mock_json).encode()
mock_box_response.status_code = 200
mock_box_response.ok = True
return mock_box_response
@pytest.fixture()
def long_poll_url(test_url, expected_stream_type_params):
return urlunsplit(('', '', test_url, urlencode(expected_stream_type_params), ''))
@pytest.fixture()
def retry_timeout():
return 610
@pytest.fixture()
def options_response_entry(long_poll_url, retry_timeout):
return {'url': long_poll_url, 'retry_timeout': retry_timeout}
@pytest.fixture()
def options_response(options_response_entry, make_mock_box_request):
# pylint:disable=redefined-outer-name
mock_box_response, _ = make_mock_box_request(
response={'entries': [options_response_entry]},
)
return mock_box_response
@pytest.fixture()
def new_change_long_poll_response(make_mock_box_request):
mock_box_response, _ = make_mock_box_request(
response={'message': 'new_change'},
)
return mock_box_response
@pytest.fixture()
def reconnect_long_poll_response(make_mock_box_request):
mock_box_response, _ = make_mock_box_request(
response={'message': 'reconnect'},
)
return mock_box_response
@pytest.fixture()
def max_retries_long_poll_response(make_mock_box_request):
mock_box_response, _ = make_mock_box_request(
response={'message': 'max_retries'},
)
return mock_box_response
@pytest.fixture()
def mock_event():
return {
"type": "event",
"event_id": "f82c3ba03e41f7e8a7608363cc6c0390183c3f83",
"source": {
"type": "folder",
"id": "11446498",
}
}
@pytest.fixture()
def events_response(initial_stream_position, mock_event, make_mock_box_request):
# pylint:disable=redefined-outer-name
mock_box_response, _ = make_mock_box_request(
response={"next_stream_position": initial_stream_position, "entries": [mock_event]},
)
return mock_box_response
def test_get_events(
test_events,
mock_box_session,
events_response,
stream_type_kwargs,
expected_stream_type_params,
):
# pylint:disable=redefined-outer-name
expected_url = test_events.get_url()
mock_box_session.get.return_value = events_response
events = test_events.get_events(**stream_type_kwargs)
assert 'next_stream_position' in events
mock_box_session.get.assert_any_call(
expected_url,
params=dict(limit=100, stream_position=0, **expected_stream_type_params),
)
def test_get_long_poll_options(
mock_box_session,
test_events,
stream_type_kwargs,
expected_stream_type_params,
options_response,
options_response_entry,
):
expected_url = test_events.get_url()
mock_box_session.options.return_value = options_response
long_poll_options = test_events.get_long_poll_options(**stream_type_kwargs)
mock_box_session.options.assert_called_with(expected_url, params=expected_stream_type_params)
assert long_poll_options == options_response_entry
def test_generate_events_with_long_polling(
test_events,
mock_box_session,
events_response,
empty_events_response,
initial_stream_position,
long_poll_url,
retry_timeout,
options_response,
new_change_long_poll_response,
reconnect_long_poll_response,
max_retries_long_poll_response,
mock_event,
stream_type_kwargs,
expected_stream_type,
expected_stream_type_params,
):
# pylint:disable=redefined-outer-name
expected_url = test_events.get_url()
mock_box_session.options.return_value = options_response
mock_box_session.get.side_effect = [
events_response, # initial call to get now stream position
Timeout,
reconnect_long_poll_response,
max_retries_long_poll_response,
new_change_long_poll_response,
events_response,
new_change_long_poll_response,
empty_events_response,
]
events = test_events.generate_events_with_long_polling(**stream_type_kwargs)
assert next(events) == mock_event
with pytest.raises(StopIteration):
next(events)
events.close()
mock_box_session.options.assert_called_with(expected_url, params=expected_stream_type_params)
mock_box_session.get.assert_any_call(expected_url, params={'stream_position': 'now', 'limit': 0, 'stream_type': expected_stream_type})
assert '/events' in expected_url
mock_box_session.get.assert_any_call(
expected_url,
params=dict(limit=100, stream_position=initial_stream_position, **expected_stream_type_params),
)
mock_box_session.get.assert_any_call(
long_poll_url,
timeout=retry_timeout,
params={'stream_position': initial_stream_position},
)
|
apache-2.0
| -5,183,375,154,726,981,000
| 29.686347
| 138
| 0.684343
| false
| 3.584483
| true
| false
| false
|
alexisVallet/anime-bgrm
|
objectSegmentation.py
|
1
|
3095
|
import disjointSetForest as dsj
import cv2
import numpy as np
def toRowMajor(cols, i, j):
return i * cols + j
def fromRowMajor(cols, idx):
return (idx / cols, idx % cols)
class ObjectsSegmentation:
""" Disjoint set forest, with the additional semantic element of an image to segment
into background and objects (foreground).
"""
def __init__(self, image):
rows, cols = image.shape[0:2]
self.image = image
self.segmentation = dsj.DisjointSetForest(rows * cols)
self.background = None
self.largest = None
def find(self, i, j):
""" Finds the root pixel of the segment containing pixel (i,j).
"""
rows, cols = self.image.shape[0:2]
return fromRowMajor(cols,
self.segmentation.find(toRowMajor(cols, i, j)))
def unsafeUnion(self, i, j, k, l):
""" Fuses the segments containing pixels (i,j) and (k,l) into a single segment.
Doesn't check if either segment is the background.
"""
rows, cols = self.image.shape[0:2]
newRoot = self.segmentation.union(toRowMajor(cols,i,j),
toRowMajor(cols,k,l))
return fromRowMajor(cols, newRoot)
def union(self, i, j, k, l):
""" Fuses the segments containing pixels (i,j) and (k,l) into a single segment.
Neither segments should be the background.
"""
rows, cols = self.image.shape[0:2]
fstRoot = self.find(i,j)
sndRoot = self.find(k,l)
if fstRoot == self.background or sndRoot == self.background:
raise ValueError("Cannot perform union of background pixels!")
else:
newRoot = self.segmentation.union(toRowMajor(cols,i,j),
toRowMajor(cols,k,l))
newRootPixel = fromRowMajor(cols,newRoot)
# keep track of the largest object
if self.largest == None:
self.largest = newRootPixel
else:
(li, lj) = self.largest
largestSize = self.segmentation.compSize[toRowMajor(cols,li,lj)]
if self.segmentation.compSize[newRoot] > largestSize:
self.largest = newRootPixel
def setBackground(self, i, j):
""" Marks the (i,j) pixel as a background pixel.
"""
if self.background == None:
self.background = (i,j)
else:
(k,l) = self.background
self.background = self.unsafeUnion(k, l, i, j)
def getLargestObject(self):
return (0,0) if self.largest == None else self.largest
def foregroundMask(self, fgVal=0, bgVal=255):
rows, cols = self.image.shape[0:2]
mask = np.empty([rows, cols], dtype=np.uint8)
for i in range(0,rows):
for j in range(0,cols):
root = self.find(i,j)
if root == self.background:
mask[i,j] = bgVal
else:
mask[i,j] = fgVal
return mask
|
gpl-2.0
| -6,634,703,475,703,437,000
| 35.845238
| 88
| 0.555412
| false
| 3.84472
| false
| false
| false
|
crypto101/arthur
|
arthur/test/test_util.py
|
1
|
4581
|
from arthur.util import MultiDeferred
from twisted.internet import defer
from twisted.trial import unittest
class MultiDeferredTests(unittest.SynchronousTestCase):
"""
Tests for L{defer.MultiDeferred}, except now in Arthur.
See tm.tl/6365.
"""
def setUp(self):
self.multiDeferred = MultiDeferred()
def test_callback(self):
"""
Any produced L{defer.Deferred}s have their callbacks called when the
L{defer.MultiDeferred} does.
"""
a, b, c = [self.multiDeferred.tee() for _ in xrange(3)]
self.assertNoResult(a)
self.assertNoResult(b)
self.assertNoResult(c)
result = object()
self.multiDeferred.callback(result)
self.assertIdentical(self.successResultOf(a), result)
self.assertIdentical(self.successResultOf(b), result)
self.assertIdentical(self.successResultOf(c), result)
def test_errback(self):
"""
Any produced L{defer.Deferred}s have their errbacks called when the
L{defer.MultiDeferred} does.
"""
a, b, c = [self.multiDeferred.tee() for _ in xrange(3)]
self.assertNoResult(a)
self.assertNoResult(b)
self.assertNoResult(c)
error = RuntimeError()
self.multiDeferred.errback(error)
self.assertIdentical(self.failureResultOf(a, RuntimeError).value, error)
self.assertIdentical(self.failureResultOf(b, RuntimeError).value, error)
self.assertIdentical(self.failureResultOf(c, RuntimeError).value, error)
def test_callbackAfterCallback(self):
"""
Calling C{callback} twice raises L{defer.AlreadyCalledError}.
"""
self.multiDeferred.callback(None)
self.assertRaises(defer.AlreadyCalledError,
self.multiDeferred.callback, None)
def test_callbackAfterErrback(self):
"""
Calling C{callback} after C{errback} raises L{defer.AlreadyCalledError}.
"""
self.multiDeferred.errback(RuntimeError())
self.assertRaises(defer.AlreadyCalledError,
self.multiDeferred.callback, None)
def test_errbackAfterCallback(self):
"""
Calling C{errback} after C{callback} raises L{defer.AlreadyCalledError}.
"""
self.multiDeferred.callback(None)
self.assertRaises(defer.AlreadyCalledError,
self.multiDeferred.errback, RuntimeError())
def test_errbackAfterErrback(self):
"""
Calling C{errback} after C{errback} raises L{defer.AlreadyCalledError}.
"""
self.multiDeferred.errback(RuntimeError())
self.assertRaises(defer.AlreadyCalledError,
self.multiDeferred.errback, RuntimeError())
def test_synchronousCallbacks(self):
"""
All callbacks are called sequentially, synchronously, and in the order
they were produced. If one or more of the L{defer.Deferred}s produced
by L{defer.MultiDeferred.tee} is waiting on a deferred that will never
fire, all the other deferreds produced by that method are still fired.
"""
called = []
result = object()
def callback(r, i):
"""
Checks this is the correct result, adds this deferreds index to the list
of called deferreds, and then returns a deferred that will never
fire.
"""
self.assertIdentical(r, result)
called.append(i)
return defer.Deferred()
for i in range(10):
self.multiDeferred.tee().addCallback(callback, i=i)
self.assertEqual(called, [])
self.multiDeferred.callback(result)
self.assertEqual(called, range(10))
def test_alreadyFiredWithResult(self):
"""
If the C{MultiDeferred} already fired, C{tee} produces a
C{Deferred} that has already been fired.
"""
result = object()
self.multiDeferred.callback(result)
d = self.multiDeferred.tee()
self.assertIdentical(self.successResultOf(d), result)
def test_alreadyFiredWithError(self):
"""
If the C{MultiDeferred} already fired with a failure, C{tee}
produces a C{Deferred} that has already been fired with the
failure.
"""
error = RuntimeError()
self.multiDeferred.errback(error)
d = self.multiDeferred.tee()
failure = self.failureResultOf(d, RuntimeError)
self.assertIdentical(failure.value, error)
|
isc
| -7,686,488,387,015,034,000
| 31.956835
| 84
| 0.629993
| false
| 4.379541
| true
| false
| false
|
OffenesJena/JenLoRa
|
LoPy/LoAirRohr01/lib/DHT22RinusW.py
|
1
|
2355
|
from machine import enable_irq, disable_irq
import time
# this onewire protocol code tested with Pycom LoPy device and AM2302/DHT22 sensor
def getval(pin):
ms = [1]*700 # needs long sample size to grab all the bits from the DHT
time.sleep(1)
pin(0)
time.sleep_us(10000)
pin(1)
irqf = disable_irq()
for i in range(len(ms)):
ms[i] = pin() ## sample input and store value
enable_irq(irqf)
#for i in range(len(ms)): #print debug for checking raw data
# print (ms[i])
return ms
def decode(inp):
res= [0]*5
bits=[]
ix = 0
try:
#if inp[0] == 1 : ix = inp.index(0, ix) ## skip to first 0 # ignore first '1' as probably sample of start signal. *But* code seems to be missing the start signal, so jump this line to ensure response signal is identified in next two lines.
ix = inp.index(1,ix) ## skip first 0's to next 1 # ignore first '10' bits as probably the response signal.
ix = inp.index(0,ix) ## skip first 1's to next 0
while len(bits) < len(res)*8 : ##need 5 * 8 bits :
ix = inp.index(1,ix) ## index of next 1
ie = inp.index(0,ix) ## nr of 1's = ie-ix
# print ('ie-ix:',ie-ix)
bits.append(ie-ix)
ix = ie
except:
print('6: decode error')
# print('length:')
# print(len(inp), len(bits))
return([0xff,0xff,0xff,0xff])
# print('bits:', bits)
for i in range(len(res)):
for v in bits[i*8:(i+1)*8]: #process next 8 bit
res[i] = res[i]<<1 ##shift byte one place to left
if v > 7: # less than 7 '1's is a zero, more than 7 1's in the sequence is a one
res[i] = res[i]+1 ##and add 1 if lsb is 1
# print ('res', i, res[i])
if (res[0]+res[1]+res[2]+res[3])&0xff != res[4] : ##parity error!
print("Checksum Error")
print (res[0:4])
res= [0xff,0xff,0xff,0xff]
# print ('res:', res[0:4])
return(res[0:4])
def DHT11(pin):
res = decode(getval(pin))
temp = 10 * res[0] + res[1]
hum = 10 * res[2] + res[3]
return temp, hum
def DHT22(pin):
res = decode(getval(pin))
hum = res[0] * 256 + res[1]
temp = res[2] * 256 + res[3]
if (temp > 0x7fff):
temp = 0x8000 - temp
return temp, hum
|
apache-2.0
| 6,041,025,592,553,690,000
| 33.632353
| 248
| 0.546072
| false
| 3.026992
| false
| false
| false
|
Ilias95/lib389
|
lib389/tests/dseldif_test.py
|
1
|
4107
|
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2017 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
import logging
import pytest
from lib389._constants import *
from lib389.dseldif import DSEldif
from lib389.topologies import topology_st as topo
DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
@pytest.mark.parametrize("entry_dn", (DN_CONFIG,
DN_CONFIG_LDBM))
def test_get_singlevalue(topo, entry_dn):
"""Check that we can get an attribute value under different suffixes"""
dse_ldif = DSEldif(topo.standalone)
log.info("Get 'cn' attr from {}".format(entry_dn))
attr_values = dse_ldif.get(entry_dn, "cn")
assert attr_values == ["config"]
log.info("Get 'nonexistent' attr from {}".format(entry_dn))
attr_values = dse_ldif.get(entry_dn, "nonexistent")
assert not attr_values
def test_get_multivalue(topo):
"""Check that we can get attribute values"""
dse_ldif = DSEldif(topo.standalone)
log.info("Get objectClass from {}".format(DN_CONFIG))
attr_values = dse_ldif.get(DN_CONFIG, "objectClass")
assert len(attr_values) == 3
assert "top" in attr_values
assert "extensibleObject" in attr_values
assert "nsslapdConfig" in attr_values
@pytest.mark.parametrize("fake_attr_value", ("fake value",
"fakevalue"))
def test_add(topo, fake_attr_value):
"""Check that we can add an attribute to a given suffix"""
dse_ldif = DSEldif(topo.standalone)
fake_attr = "fakeAttr"
log.info("Add {} to {}".format(fake_attr, DN_CONFIG))
dse_ldif.add(DN_CONFIG, fake_attr, fake_attr_value)
attr_values = dse_ldif.get(DN_CONFIG, fake_attr)
assert attr_values == [fake_attr_value]
log.info("Clean up")
dse_ldif.delete(DN_CONFIG, fake_attr)
assert not dse_ldif.get(DN_CONFIG, fake_attr)
def test_replace(topo):
"""Check that we can replace an attribute to a given suffix"""
dse_ldif = DSEldif(topo.standalone)
port_attr = "nsslapd-port"
port_value = "390"
log.info("Get default value of {}".format(port_attr))
default_value = dse_ldif.get(DN_CONFIG, port_attr)[0]
log.info("Replace {} with {}".format(port_attr, port_value))
dse_ldif.replace(DN_CONFIG, port_attr, port_value)
attr_values = dse_ldif.get(DN_CONFIG, port_attr)
assert attr_values == [port_value]
log.info("Restore default value")
dse_ldif.replace(DN_CONFIG, port_attr, default_value)
def test_delete_singlevalue(topo):
"""Check that we can delete an attribute from a given suffix"""
dse_ldif = DSEldif(topo.standalone)
fake_attr = "fakeAttr"
fake_attr_values = ["fake1", "fake2", "fake3"]
log.info("Add multivalued {} to {}".format(fake_attr, DN_CONFIG))
for value in fake_attr_values:
dse_ldif.add(DN_CONFIG, fake_attr, value)
log.info("Delete {}".format(fake_attr_values[0]))
dse_ldif.delete(DN_CONFIG, fake_attr, fake_attr_values[0])
attr_values = dse_ldif.get(DN_CONFIG, fake_attr)
assert len(attr_values) == 2
assert fake_attr_values[0] not in attr_values
assert fake_attr_values[1] in attr_values
assert fake_attr_values[2] in attr_values
log.info("Clean up")
dse_ldif.delete(DN_CONFIG, fake_attr)
assert not dse_ldif.get(DN_CONFIG, fake_attr)
def test_delete_multivalue(topo):
"""Check that we can delete attributes from a given suffix"""
dse_ldif = DSEldif(topo.standalone)
fake_attr = "fakeAttr"
fake_attr_values = ["fake1", "fake2", "fake3"]
log.info("Add multivalued {} to {}".format(fake_attr, DN_CONFIG))
for value in fake_attr_values:
dse_ldif.add(DN_CONFIG, fake_attr, value)
log.info("Delete all values of {}".format(fake_attr))
dse_ldif.delete(DN_CONFIG, fake_attr)
assert not dse_ldif.get(DN_CONFIG, fake_attr)
|
gpl-3.0
| 5,447,008,069,091,832,000
| 30.592308
| 75
| 0.661066
| false
| 3.186191
| true
| false
| false
|
darbaga/simple_compiler
|
virtual_machine.py
|
1
|
2139
|
class VirtualMachine:
def __init__(self, ram_size=512, executing=True):
self.data = {i: None for i in range(ram_size)}
self.stack = []
self.executing = executing
self.pc = 0
self.devices_start = 256
def push(self, value):
"""Push something onto the stack."""
self.stack += [value]
def pop(self):
"""Pop something from the stack. Crash if empty."""
return self.stack.pop()
def read_memory(self, index):
"""Read from memory, crashing if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
return self.data[index].read(index)
else:
return self.data[index]
def write_memory(self, index, value):
"""Write to memory. Crash if index is out of bounds."""
if isinstance(self.data[index], DeviceProxy):
self.data[index].write(index, value)
else:
self.data[index] = value
def register_device(self, device, needed_addresses):
"""Given an instantiated device and the number of required addresses, registers it in memory"""
# If not enough addresses, just error out
if self.devices_start+needed_addresses > len(self.data):
raise Exception('Not enough addresses to allocate')
proxyed_device = DeviceProxy(device, self.devices_start)
for i in range(self.devices_start, self.devices_start+needed_addresses):
self.data[i] = proxyed_device
self.devices_start += needed_addresses
def run(self, bytecodes):
self.bytecodes = bytecodes
while self.executing:
increment = self.bytecodes[self.pc].autoincrement
self.bytecodes[self.pc].execute(self)
if increment:
self.pc += 1
class DeviceProxy:
"""Manages address translation between devices"""
def __init__(self, device, pos):
self.device = device
self.pos = pos
def read(self, index):
return self.device.read(self.pos-index)
def write(self, index, value):
self.device.write(self.pos-index, value)
|
bsd-3-clause
| 2,304,611,489,600,544,300
| 34.65
| 103
| 0.610566
| false
| 4.113462
| false
| false
| false
|
gladgod/zhiliao
|
zhiliao/twitter/defaults.py
|
1
|
2296
|
"""
Default settings for the ``mezzanine.twitter`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from zhiliao.conf import register_setting
from zhiliao.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_SEARCH
register_setting(
name="TWITTER_DEFAULT_QUERY_TYPE",
label=_("Default Twitter Query Type"),
description=_("Type of query that will be used to retrieve tweets for "
"the default Twitter feed."),
editable=True,
default=QUERY_TYPE_SEARCH,
choices=QUERY_TYPE_CHOICES,
)
register_setting(
name="TWITTER_DEFAULT_QUERY",
label=_("Default Twitter Query"),
description=_("Twitter query to use for the default query type. "
"\n\n*Note:* Once you change this from the default, you'll need to "
"configure each of the oAuth consumer/access key/secret settings. "
"Please refer to http://dev.twitter.com for more information "
"on creating an application and acquiring these settings."),
editable=True,
default="from:stephen_mcd mezzanine",
)
register_setting(
name="TWITTER_DEFAULT_NUM_TWEETS",
label=_("Default Number of Tweets"),
description=_("Number of tweets to display in the default Twitter feed."),
editable=True,
default=3,
)
register_setting(
name="TWITTER_CONSUMER_KEY",
label=_("Twitter OAuth consumer key"),
editable=True,
default='',
)
register_setting(
name="TWITTER_CONSUMER_SECRET",
label=_("Twitter OAuth consumer secret"),
editable=True,
default='',
)
register_setting(
name="TWITTER_ACCESS_TOKEN_KEY",
label=_("Twitter OAuth access token"),
editable=True,
default='',
)
register_setting(
name="TWITTER_ACCESS_TOKEN_SECRET",
label=_("Twitter OAuth access token secret"),
editable=True,
default='',
)
|
bsd-3-clause
| 6,396,643,095,361,620,000
| 29.613333
| 78
| 0.708624
| false
| 3.965458
| false
| false
| false
|
nmmmnu/MessageQueue
|
protocols/memcachedhandler.py
|
1
|
4321
|
#
# Memcached protocol implementation
# Nikolay Mihaylov nmmm@nmmm.nu
#
# For Memcached telnet protocol see:
# http://blog.elijaa.org/?post/2010/05/21/Memcached-telnet-command-summary
import asynchat
import time
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class MemcachedHandler(asynchat.async_chat):
commands_with_data = ['set', 'add', "sismember"]
def __init__(self, sock, addr, processor):
#
# Constructs new Memcached protocol handler
#
# @param sock : socket from asyncore
# @param addr : address from asyncore
# @param processor : processor class
#
asynchat.async_chat.__init__(self, sock=sock)
self.addr = addr
self.started = time.time()
self.lastping = time.time()
self.head = ""
self.data = ""
self.processor = processor
self.state_change("read_header")
def state_change(self, state, size = 0):
self.io = StringIO()
if state == "read_header":
self.state = state
self.set_terminator("\r\n")
return True
if state == "read_data":
# size == 0 is an error, but we will ignore it.
if size < 0:
return False
self.state = state
self.set_terminator(size + len("\r\n") )
return True
# Unknown state ?
return False
def cmd_parse_head(self):
m2 = self.head.split(" ")
# clean up empty arguments.
m = []
for x in m2:
x = x.strip()
if x != "":
m.append(x)
# for easy access, put some blanks at the end.
while len(m) < 10:
m.append("")
return m
def cmd_parse(self):
self.lastping = time.time()
args = self.cmd_parse_head()
command = args[0].lower()
if command == "get":
key = args[1]
x = self.processor.get(key)
if x is None:
self.push("END\r\n")
return
msg = "VALUE %s 0 %d\r\n%s\r\nEND\r\n" % (key, len(x), x)
self.push(msg)
return
if command == "delete":
key = args[1]
x = self.processor.delete(key)
if x:
self.push("DELETED\r\n")
return
self.push("NOT_FOUND\r\n")
return
if command == "set":
# It is protocol responsibility to check the size.
try:
size = int(args[4])
if len(self.data) > size:
self.data = self.data[:size]
except:
pass
key = args[1]
x = self.processor.set(key, self.data)
if x:
self.push("STORED\r\n")
return
self.push("NOT_STORED\r\n")
return
if command == "add":
# It is protocol responsibility to check the size.
try:
size = int(args[4])
if len(self.data) > size:
self.data = self.data[:size]
except:
pass
key = args[1]
x = self.processor.add(key, self.data)
if x:
self.push("STORED\r\n")
return
self.push("NOT_STORED\r\n")
return
# Non standard command
if command == "scard":
key = args[1]
x = self.processor.len(key)
if x is None:
x = "0"
msg = "VALUE %s 0 %d\r\n%s\r\nEND\r\n" % (key, len(x), x)
self.push(msg)
return
# Non standard command
if command == "sismember":
# It is protocol responsibility to check the size.
try:
size = int(args[4])
if len(self.data) > size:
self.data = self.data[:size]
except:
pass
key = args[1]
x = self.processor.contains(key, self.data)
if x:
self.push("MEMBER\r\n")
return
self.push("NOT_MEMBER\r\n")
return
if command == "quit":
self.push("QUIT\r\n")
self.close()
return
# error, not implemented
self.push("ERROR\r\n")
return
def state_read_header(self):
self.head = self.io.getvalue()
m = self.cmd_parse_head()
if m[0] in self.commands_with_data:
try:
size = int(m[4])
except:
size = 0
self.state_change("read_data", size)
return
self.state_change("read_header")
self.cmd_parse()
def state_read_data(self):
self.data = self.io.getvalue()
self.state_change("read_header")
self.cmd_parse()
def found_terminator(self):
if self.state == "read_header":
return self.state_read_header()
if self.state == "read_data":
return self.state_read_data()
# Unknown state ?
return False
def collect_incoming_data(self, data):
self.io.write(data)
|
gpl-3.0
| 3,332,516,228,514,705,400
| 15.123134
| 74
| 0.585744
| false
| 2.837163
| false
| false
| false
|
pandysong/dxf2kicad_mod
|
dxf2kicad_mod.py
|
1
|
4880
|
# refer to http://pythonhosted.org/dxfgrabber/#
# Note that there must not a line or shape overlapped
import sys
import math
import functools
from itertools import groupby
import dxfgrabber
import kicad_mod_format as kf
def _arc_point(center, radius, angle_degree):
'''
point defined by arc center,radius, and angel in degree
'''
return (center[0] + radius * math.cos(angle_degree/180*math.pi),
center[1] + radius * math.sin(angle_degree/180*math.pi))
def _endpoints(entity):
'''
return a tuple of start and end points of the entity
'''
if "LINE" == entity.dxftype:
return (entity.start, entity.end)
elif "ARC" == entity.dxftype:
return (_arc_point(entity.center, entity.radius, entity.start_angle),
_arc_point(entity.center, entity.radius, entity.end_angle))
else:
raise TypeError(
"[Error]: Unexpceted dxftype {}".format(entity.dxftype))
def _touched(p1, p2):
distance_error = 1e-2
return ((math.fabs(p1[0]-p2[0]) < distance_error) and
(math.fabs(p1[1]-p2[1]) < distance_error))
def _points_in_entity(ety):
if 'LINE' == ety.dxftype:
return [ety.start, ety.end]
elif 'ARC' == ety.dxftype:
if (ety.start_angle > ety.end_angle):
ety.end_angle += 360
def angles(start_angle, end_angle, radius):
'''
yields descrete angles with step length defined by radius
'''
step = 1.0/ety.radius # larger radius indicates small steps
angle = start_angle
while True:
yield angle
if (angle + step > ety.end_angle):
yield end_angle
break
else:
angle += step
return [_arc_point(ety.center, ety.radius, a) for a in
angles(ety.start_angle, ety.end_angle, ety.radius)]
else:
raise TypeError(
"[Error]: Unexpceted dxftype {}".format(ety.dxftype))
def fp_polys(layer, entities):
'''
yields fp_poly cmd in the layer of `entities`
'''
entities = list(entities)
def _points_next_to(next_start):
for e in entities:
start, end = _endpoints(e)
pts = _points_in_entity(e)
if _touched(next_start, start):
return pts, e
elif _touched(next_start, end):
pts.reverse()
return pts, e
return None, None
def poly(e):
start, next_start = _endpoints(e)
yield [start] # yield start points
while True:
pts, pts_e = _points_next_to(next_start)
if pts:
entities.remove(pts_e) # remove from the set
yield pts # yield a list of points
next_start = pts[-1] # new start
else:
if _touched(next_start, start):
return
else:
raise ValueError('Unclosed shape at {}'.format(next_start))
def polys():
while True:
if not entities:
return
e = entities.pop() # pick up one
yield poly(e) # yield an iterator which will yields points
for p in polys():
poly_points = functools.reduce(lambda x, y: x+y, p)
# we may use *point, but since there might be more than 3 values in one
# point, we unpack it manually
yield kf.fp_poly(children=(kf.pts(children=(kf.xy(point[0], point[1])
for point in poly_points)),
kf.layer(layer),
kf.width(0.001)))
def _layer_entities(entities):
seq = list(entities)
seq.sort(key=lambda e: e.layer)
groups = groupby(seq, lambda e: e.layer)
return groups
def cmds_from_entities(entities):
'''
get all cmd (in kicad_mod_format) from entities which is the entities
on all layers.
'''
return functools.reduce(lambda x, y: x+y,
(list(fp_polys(layer, entities))
for (layer, entities) in
_layer_entities(dxf.entities)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage:\n'
' save to a file: python {} '
'inputfile.dxf > outputfile.kicad_mod\n'
' print to stdout: python {} inputfile.dxf'.format(
sys.argv[0], sys.argv[0]))
else:
dxf = dxfgrabber.readfile(sys.argv[1])
print(str(kf.Module('autogenerated',
children=cmds_from_entities(dxf.entities))))
|
gpl-3.0
| 8,028,408,619,376,183,000
| 31.197279
| 79
| 0.518648
| false
| 3.92283
| false
| false
| false
|
cidles/poio-api
|
src/poioapi/io/graf.py
|
1
|
18028
|
# -*- coding: utf-8 -*-
#
# Poio Tools for Linguists
#
# Copyright (C) 2009-2013 Poio Project
# Author: António Lopes <alopes@cidles.eu>
# URL: <http://media.cidles.eu/poio/>
# For license information, see LICENSE.TXT
""" This document contain the responsible
methods to write and parse the GrAF files.
The parser use the ContentHandler from
SAX Xml module.
"""
from __future__ import absolute_import, unicode_literals
import abc
import codecs
import os
from xml.etree.ElementTree import tostring
from xml.dom import minidom
import graf
# GrAF ID's separator
GRAFSEPARATOR = ".."
(TEXT, AUDIO, VIDEO, NONE) = ("text", "audio", "video", "none")
class Tier:
"""A list of tiers.
The name is the tier unique identification.
"""
__slots__ = ['name', 'annotation_space']
def __init__(self, name, annotation_space=None):
self.name = name
self.annotation_space = annotation_space
class Annotation:
"""A list of annotations.
The id is the annotation identification, the
value the annotation value and the features are
a dict type of values containing the annotation
features.
"""
__slots__ = ['id', 'value', 'features']
def __init__(self, id, value, features=None):
self.value = value
self.id = id
self.features = features
class NodeId:
"""A list of nodes using a specific format.
The prefix is the node type and the index
the identification number.
"""
__slots__ = ['prefix', 'index']
def __init__(self, prefix, index):
self.prefix = prefix
self.index = str(index)
def to_str(self):
return "{0}{1}n{2}".format(self.prefix, GRAFSEPARATOR, self.index)
def str_edge(self):
return "e{0}".format(self.index)
def str_region(self):
return "{0}{1}r{2}".format(self.prefix, GRAFSEPARATOR, self.index)
class PrimaryData:
"""This class represents the primary data of an AnnotationGraph object.
"""
def __init__(self):
self.type = None
self.external_link = None
self.filename = None
self.content = None
class BaseParser(object):
"""This class is a base class to the
parser classes in order to create
GrAF objects.
This class contains some methods that must be
implemented other wise it will be raise a
exception error.
Although the methods that should be implemented
with properly code are the get_root_tiers,
get_child_tiers_for_tier and get_annotations_for_tier.
The method tier_has_regions and region_for_annotation
could simply return None or pass.
Raises
------
NotImplementedError
Method must be implemented.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_root_tiers(self):
"""Method to get the root tiers. The root tiers
are defined by the parser when the method is
implemented.
Returns
-------
list : array-like
List of tiers type.
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def get_child_tiers_for_tier(self, tier):
"""Method that get the child tiers of a specific tier.
Parameters
----------
tier : object
Tier object.
Returns
-------
list : array-like
List of tiers type.
See also
--------
Tier
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def get_annotations_for_tier(self, tier, annotation_parent=None):
"""Method that get all the annotations for a specific tier.
The annotations can be filtered using an annotation parent.
Parameters
----------
tier : object
Tier object.
annotation_parent : object
Annotation object.
Returns
-------
list : array-like
List of annotations type.
See also
--------
Tier, Annotation
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def tier_has_regions(self, tier):
"""Method to verify if a tier has regions.
Parameters
----------
tier : object
Tier object.
Returns
-------
has_region : bool
A true or false variable.
See also
--------
Tier
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def region_for_annotation(self, annotation):
"""Method to get the regions values of a specific
annotation.
Parameters
----------
annotation : object
Annotation object.
Returns
-------
regions : tuple
A tuple with the two regions.
See also
--------
Annotation
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def get_primary_data(self):
"""Method to get the primary data of the GrAF file.
Returns
-------
primaryData : object
Object type of PrimaryData class.
See also
--------
PrimaryData
"""
raise NotImplementedError("Method must be implemented")
class BaseWriter(object):
"""This class is a base class to the
writer classes in order to create
files from GrAF objects.
This class contains some methods that must be
implemented other wise it will be raise a
exception error.
Raises
------
NotImplementedError
Method must be implemented.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, outputfile, converter):
"""Method that will write the GrAF object into
a specific format.
Parameters
----------
outputfile : str
The filename of the output file. The filename should be the header
file for GrAF with the extension ".hdr".
converter : Converter or AnnotationGraph
A converter object. The converter object containes the data that
will be use for output. All writers need at least a GrAF graph
and the tier hierarchy, some will also need the primary data object.
"""
raise NotImplementedError("Method must be implemented")
class GrAFConverter:
"""This class handles the conversion of different file formats into GrAF
objects and back again. It uses a sub-class of BaseParser to get the
annotations and the tier hierarchies. A sub-class of BaseWriter is used
to write back the files. Please be aware that meta-data might get lost
if you write to a file format from another one. This depends on whether the
output file format can store all meta-data from the input file format.
In any case all the data and annotation will be stored.
"""
def __init__(self, parser, writer=None):
self.parser = parser
self.writer = writer
self.graf = graf.Graph()
self.tier_hierarchies = []
self.meta_information = None
self.primary_data = None
self.original_file = None
def write(self, outputfile):
if self.writer:
self.writer.write(outputfile, self)
def parse(self):
"""This method will be the responsible to transform
the parser into a GrAF object. This method also
retrieves the tiers hierarchies.
"""
self._tiers_parent_list = []
self.root_tiers = []
tiers_hierarchy_map = {}
for tier in self.parser.get_root_tiers():
self.root_tiers.append(tier.name)
self._convert_tier(tier, None, None)
i = 0
for t in self._tiers_parent_list:
if t[1] is None:
i += 1
tiers_hierarchy_map[str(i)] = [t[0]]
else:
self._append_tier_to_hierarchy(tiers_hierarchy_map[str(i)],
t[1], t[0])
for i, hierarchy in tiers_hierarchy_map.items():
self.tier_hierarchies.append(hierarchy)
if hasattr(self.parser, 'meta_information'):
self.meta_information = self.parser.meta_information
self.primary_data = self.parser.get_primary_data()
if hasattr(self.parser, 'filepath') and \
isinstance(self.parser.filepath, str):
self.original_file = os.path.abspath(self.parser.filepath)
def _convert_tier(self, tier, parent_node, parent_annotation,
parent_prefix=None):
child_tiers = self.parser.get_child_tiers_for_tier(tier)
if tier.annotation_space is None:
prefix = tier.name
annotation_name = prefix
else:
annotation_name = tier.annotation_space.replace(' ', '_')
prefix = "{0}{1}{2}".format(annotation_name, GRAFSEPARATOR,
tier.name)
has_regions = False
if self.parser.tier_has_regions(tier):
has_regions = True
self._add_tier_in_hierarchy_list(prefix, parent_prefix)
annotations = self.parser.get_annotations_for_tier(tier,
parent_annotation)
for annotation in annotations:
regions = None
if has_regions:
regions = self.parser.region_for_annotation(annotation)
node_id = NodeId(prefix, annotation.id)
self._add_node(node_id, annotation, annotation_name, regions,
parent_node)
self._add_root_nodes(prefix, node_id)
if child_tiers:
for t in child_tiers:
self._convert_tier(t, node_id, annotation, prefix)
if annotations == [] and child_tiers:
for t in child_tiers:
self._convert_tier(t, None, None, prefix)
def _add_tier_in_hierarchy_list(self, prefix, parent_prefix):
if not (prefix, parent_prefix) in self._tiers_parent_list:
self._tiers_parent_list.append((prefix, parent_prefix))
def _append_tier_to_hierarchy(self, tiers_list, parent_tier, tier):
for t in tiers_list:
if isinstance(t, list):
self._append_tier_to_hierarchy(t, parent_tier, tier)
else:
if t == parent_tier:
tiers_list.append([tier])
def _add_node(self, node_id, annotation, annotation_name, regions,
from_node_id):
self._add_node_to_graph(node_id, regions, from_node_id)
self._add_graf_annotation(annotation_name, annotation.id, node_id,
annotation.value, annotation.features)
def _add_root_nodes(self, prefix, node_id):
if prefix in self.root_tiers:
self.graf.header.roots.append(node_id.to_str())
def _add_graf_annotation(self, annotation_name, annotation_id,
annotation_ref, annotation_value, annotation_features=None):
annotation = graf.Annotation(annotation_name, annotation_features,
annotation_id)
if annotation_value is not None:
annotation.features['annotation_value'] = annotation_value
self.graf.nodes[annotation_ref.to_str()].annotations.add(annotation)
if annotation_name in self.graf.annotation_spaces:
#if annotation not in self.graf.annotation_spaces[annotation_name]:
self.graf.annotation_spaces[annotation_name].add(annotation)
else:
annotation_space = graf.AnnotationSpace(annotation_name)
annotation_space.add(annotation)
self.graf.annotation_spaces.add(annotation_space)
def _add_node_to_graph(self, node_id, regions=None,
from_node_id=None):
node = graf.Node(node_id.to_str())
if from_node_id is not None:
edge_id = node_id.str_edge()
self.graf.create_edge(self.graf.nodes[from_node_id.to_str()], node,
edge_id)
if regions is not None:
region_id = node_id.str_region()
region = graf.Region(region_id, *regions)
node.add_region(region)
self.graf.regions.add(region)
self.graf.nodes.add(node)
class Writer(BaseWriter):
def __init__(self, **kwargs):
self.tier_hierarchies = None
self.meta_information = None
self.standoffheader = graf.StandoffHeader(**kwargs)
def _flatten_hierarchy_elements(self, elements):
"""Flat the elements appended to a new list of elements.
Parameters
----------
elements : array_like
An array of string values.
Returns
-------
flat_elements : array_like
An array of flattened `elements`.
"""
flat_elements = []
for e in elements:
if type(e) is list:
flat_elements.extend(self._flatten_hierarchy_elements(e))
else:
flat_elements.append(e)
return flat_elements
def write(self, outputfile, ag):
"""Writes an AnnotationGraph object as GrAF files.
Parameters
----------
outputfile : str
The filename of the output file. The filename should be the header
file for GrAF with the extension ".hdr".
ag : poioapi.annotationgraph.AnnotationGraph
An AnnotationGraph object. The AG object containes the data that
will be use for output.
"""
(basedirname, _) = os.path.splitext(outputfile)
self._get_parents(ag.tier_hierarchies)
standoffrenderer = graf.StandoffHeaderRenderer("{0}.hdr".format(
basedirname))
for tier_name in self._flatten_hierarchy_elements(
ag.tier_hierarchies):
annotation_space = tier_name.split(GRAFSEPARATOR)[0]
out_graf = graf.Graph()
renderer = graf.GrafRenderer("{0}-{1}.xml".format(
basedirname, annotation_space
))
out_graf.nodes = [n for n in ag.graf.nodes
if n.id.startswith(tier_name)]
out_graf.edges = [e for e in ag.graf.edges
if e.to_node.id.startswith(tier_name)]
out_graf.regions = [r for r in ag.graf.regions
if r.id.startswith(tier_name)]
out_graf.annotation_spaces.add(graf.AnnotationSpace(
annotation_space))
out_graf.header.add_dependency(self._parent[tier_name])
out_graf = self._add_root_nodes(ag.graf, annotation_space,
out_graf)
renderer.render(out_graf)
basename = os.path.basename(basedirname)
self.standoffheader.datadesc.add_annotation(
"{0}-{1}.xml".format(basename, annotation_space),
annotation_space)
self._add_primary_data(ag.primary_data, basedirname)
standoffrenderer.render(self.standoffheader)
self._generate_metafile(basedirname, ag.meta_information)
def _add_root_nodes(self, graph, annotation_space, out_graf):
for root in graph.header.roots:
if annotation_space in root:
out_graf.header.roots.append(root)
return out_graf
def _get_parents(self, tier_hierarchies):
self._parent = {}
for h in tier_hierarchies:
self._get_hierarchy_parents(h, None)
def _get_hierarchy_parents(self, hierarchy, parent):
for i, h in enumerate(hierarchy):
if isinstance(h, list):
self._get_hierarchy_parents(h, parent)
else:
self._parent[h] = parent
if i is 0:
parent = h.split(GRAFSEPARATOR)[0]
def _add_primary_data(self, primary_data, basedirname):
if primary_data.external_link:
loc = primary_data.external_link
elif primary_data.content:
loc = self._create_raw_txt_file(primary_data.content, basedirname)
elif primary_data.filename:
loc = primary_data.filename
self.standoffheader.datadesc.primaryData = {'loc': loc,
'f.id': primary_data.type}
def _create_raw_txt_file(self, content, basedirname):
filename = "{0}.txt".format(os.path.splitext(basedirname)[0])
file = os.path.abspath(filename)
f = codecs.open(file, 'w', 'utf-8')
f.write(content)
f.close()
return os.path.basename(filename)
def _generate_metafile(self, basedirname, meta_information=None):
"""Generate a metafile with all the extra information
extracted from a file when it is parsed.
Parameters
----------
basedirname : str
Base name of the inpufile.
meta_information: ElementTree
ElementTree with the extra information.
"""
if meta_information is not None:
out = open("{0}-extinfo.xml".format(basedirname), "wb")
doc = minidom.parseString(tostring(meta_information,
encoding="utf-8"))
out.write(doc.toprettyxml(encoding='utf-8'))
out.close()
|
apache-2.0
| -3,918,818,425,620,900,400
| 28.818803
| 80
| 0.568758
| false
| 4.361723
| false
| false
| false
|
brain-research/mirage-rl-qprop
|
sandbox/rocky/tf/q_functions/continuous_mlp_q_function.py
|
1
|
6100
|
from sandbox.rocky.tf.q_functions.base import QFunction
import numpy as np
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.layers import batch_norm
from sandbox.rocky.tf.policies.base import StochasticPolicy
from sandbox.rocky.tf.misc import tensor_utils
import tensorflow as tf
import sandbox.rocky.tf.core.layers as L
class ContinuousMLPQFunction(QFunction, LayersPowered, Serializable):
def __init__(
self,
env_spec,
name='qnet',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
action_merge_layer=-2,
output_nonlinearity=None,
eqf_use_full_qf=False,
eqf_sample_size=1,
mqprop=False,
bn=False):
Serializable.quick_init(self, locals())
assert not env_spec.action_space.is_discrete
self._env_spec = env_spec
with tf.variable_scope(name):
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")
n_layers = len(hidden_sizes) + 1
if n_layers > 1:
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
else:
action_merge_layer = 1
l_hidden = l_obs
for idx, size in enumerate(hidden_sizes):
if bn:
l_hidden = batch_norm(l_hidden)
if idx == action_merge_layer:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
nonlinearity=hidden_nonlinearity,
name="h%d" % (idx + 1)
)
if action_merge_layer == n_layers:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_output = L.DenseLayer(
l_hidden,
num_units=1,
nonlinearity=output_nonlinearity,
name="output"
)
output_var = L.get_output(l_output, deterministic=True)
output_var = tf.reshape(output_var, (-1,))
self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var)
self._output_layer = l_output
self._obs_layer = l_obs
self._action_layer = l_action
self._output_nonlinearity = output_nonlinearity
self.eqf_use_full_qf=eqf_use_full_qf
self.eqf_sample_size=eqf_sample_size
self.mqprop=mqprop
LayersPowered.__init__(self, [l_output])
def get_qval(self, observations, actions):
return self._f_qval(observations, actions)
def get_qval_sym(self, obs_var, action_var, **kwargs):
qvals = L.get_output(
self._output_layer,
{self._obs_layer: obs_var, self._action_layer: action_var},
**kwargs
)
return tf.reshape(qvals, (-1,))
def get_e_qval(self, observations, policy):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info(observations)
means, log_stds = agent_info['mean'], agent_info['log_std']
if self.eqf_use_full_qf and self.eqf_sample_size > 1:
observations = np.repeat(observations, self.eqf_sample_size, axis=0)
means = np.repeat(means, self.eqf_sample_size, axis=0)
stds = np.repeat(np.exp(log_stds), self.eqf_sample_size, axis=0)
randoms = np.random.randn(*(means))
actions = means + stds * randoms
all_qvals = self.get_qval(observations, actions)
qvals = np.mean(all_qvals.reshape((-1,self.eqf_sample_size)),axis=1)
else:
qvals = self.get_qval(observations, means)
else:
actions, _ = policy.get_actions(observations)
qvals = self.get_qval(observations, actions)
return qvals
def _get_e_qval_sym(self, obs_var, policy, **kwargs):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info_sym(obs_var)
mean_var, log_std_var = agent_info['mean'], agent_info['log_std']
if self.eqf_use_full_qf:
assert self.eqf_sample_size > 0
if self.eqf_sample_size == 1:
action_var = tf.random_normal(shape=tf.shape(mean_var))*tf.exp(log_std_var) + mean_var
return self.get_qval_sym(obs_var, action_var, **kwargs), action_var
else: raise NotImplementedError
else:
return self.get_qval_sym(obs_var, mean_var, **kwargs), mean_var
else:
action_var = policy.get_action_sym(obs_var)
return self.get_qval_sym(obs_var, action_var, **kwargs), action_var
def get_e_qval_sym(self, obs_var, policy, **kwargs):
return self._get_e_qval_sym(obs_var, policy, **kwargs)[0]
def get_cv_sym(self, obs_var, action_var, policy, **kwargs):
if self.eqf_use_full_qf:
qvals = self.get_qval_sym(obs_var, action_var, deterministic=True, **kwargs)
e_qvals = self.get_e_qval_sym(obs_var, policy, deterministic=True, **kwargs)
return qvals - e_qvals
else:
if self.mqprop:
# Just use zero-order Taylor expansion (aka just the constant qvals)
qvals, action0 = self._get_e_qval_sym(obs_var, policy, deterministic=True, **kwargs)
return qvals
else:
qvals, action0 = self._get_e_qval_sym(obs_var, policy, deterministic=True, **kwargs)
# Use first-order Taylor expansion
qprimes = tf.gradients(qvals, action0)[0]
deltas = action_var - action0
return tf.reduce_sum(deltas * qprimes, 1)
|
mit
| -6,248,511,351,676,138,000
| 40.216216
| 107
| 0.562787
| false
| 3.605201
| false
| false
| false
|
blockstack/blockstack-server
|
integration_tests/blockstack_integration_tests/scenarios/name_import_expire_pre_reg_expire_pay2ns_multi.py
|
1
|
8928
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
# activate F-day 2017
"""
TEST ENV BLOCKSTACK_EPOCH_1_END_BLOCK 682
TEST ENV BLOCKSTACK_EPOCH_2_END_BLOCK 683
TEST ENV BLOCKSTACK_EPOCH_2_NAMESPACE_LIFETIME_MULTIPLIER 1
TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_LIFETIME_MULTIPLIER 1
TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_LIFETIME_GRACE_PERIOD 0
TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_RECEIVE_FEES_PERIOD 22
"""
import testlib
import virtualchain
import blockstack
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
def scenario( wallets, **kw ):
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 3, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey, version_bits=2)
testlib.next_block( **kw )
resp = testlib.blockstack_name_import( "foo.test", wallets[3].addr, "11" * 20, wallets[1].privkey )
if 'error' in resp:
print json.dumps( resp, indent=4 )
return False
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
namespace_rec = testlib.blockstack_cli_get_namespace_blockchain_record("test")
if 'error' in namespace_rec:
print namespace_rec
return False
namespace_balance = testlib.get_balance(namespace_rec['address'])
burn_balance = testlib.get_balance(blockstack.lib.config.BLOCKSTACK_BURN_ADDRESS)
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw ) # expired
res = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr ) # +name_cost
if 'error' in res:
print res
return False
testlib.next_block( **kw )
res = testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr )
if 'error' in res:
print res
return False
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw ) # expired
res = testlib.blockstack_name_preorder( "foo.test", wallets[3].privkey, wallets[4].addr ) # +name_cost
if 'error' in res:
print res
return False
testlib.next_block( **kw )
res = testlib.blockstack_name_register( "foo.test", wallets[3].privkey, wallets[4].addr )
if 'error' in res:
print res
return False
testlib.next_block( **kw )
testlib.next_block( **kw )
res = testlib.blockstack_name_renew("foo.test", wallets[4].privkey) # +name_cost
if 'error' in res:
print res
return False
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw )
testlib.next_block( **kw ) # expired
res = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr ) # +name_cost
if 'error' in res:
print res
return False
testlib.next_block( **kw )
res = testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr )
if 'error' in res:
print res
return False
testlib.next_block( **kw )
new_namespace_balance = testlib.get_balance(namespace_rec['address'])
name_rec = testlib.get_name_blockchain_record('foo.test')
name_cost = name_rec['op_fee']
testlib.next_block( **kw )
testlib.next_block( **kw ) # stop fee collection
testlib.next_block( **kw )
testlib.next_block( **kw ) # expired
if new_namespace_balance - namespace_balance != 4*name_cost:
print 'address {} did not get credited'.format(namespace_rec['address'])
print '{} != {} + 4*{}'.format(new_namespace_balance, namespace_balance, name_cost)
return False
# preorder should send to the null burn address now.
res = testlib.blockstack_name_preorder( "foo2.test", wallets[4].privkey, wallets[0].addr ) # does not pay to namespace
if 'error' in res:
print res
return False
# try forcing it to the namespace burn address, to verify that it fails
res = testlib.blockstack_name_preorder( "foo_fail.test", wallets[4].privkey, wallets[0].addr, burn_addr=namespace_rec['address'], expect_fail=True ) # does not pay to namespace (command fails)
if 'error' not in res:
print res
return False
res = testlib.blockstack_name_preorder( "foo_fail.test", wallets[4].privkey, wallets[0].addr, burn_addr=namespace_rec['address'], price={'units': 'BTC', 'amount': name_cost}, safety_checks=False, tx_fee=10000*5 ) # +name_cost
if 'error' in res:
print res
return False
testlib.next_block( **kw )
testlib.expect_snv_fail_at('foo_fail.test', testlib.get_current_block(**kw))
# should be accepted
res = testlib.blockstack_name_register( "foo2.test", wallets[4].privkey, wallets[0].addr )
if 'error' in res:
print res
return False
# should be rejected
res = testlib.blockstack_name_register( "foo_fail.test", wallets[4].privkey, wallets[0].addr, safety_checks=False )
if 'error' in res:
print res
return False
testlib.next_block( **kw )
testlib.expect_snv_fail_at('foo_fail.test', testlib.get_current_block(**kw))
# should have been rejected due to wrong burn address
whois = testlib.blockstack_cli_whois('foo_fail.test')
if 'error' not in whois:
print whois
return False
new_burn_balance = testlib.get_balance(blockstack.lib.config.BLOCKSTACK_BURN_ADDRESS)
new_namespace_balance = testlib.get_balance(namespace_rec['address'])
name_rec_2 = testlib.get_name_blockchain_record('foo2.test')
name_cost_2 = name_rec_2['op_fee']
# namespace should NOT have gotten the fee for foo_fail. It should only have gotten it for foo.test
if new_namespace_balance - namespace_balance < 5*name_cost or new_namespace_balance - namespace_balance > 6*name_cost:
print 'address {} got credited after fee capture period'.format(namespace_rec['address'])
print '{} != {} + 5*{}'.format(new_namespace_balance, namespace_balance, name_cost)
return False
# burn address should have received the fee for the second name
if new_burn_balance - name_cost_2 != burn_balance:
print 'null burn address did not get credited'
print '{} != {} + {}'.format(new_burn_balance, burn_balance, name_cost_2)
return False
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
print "namespace reveal exists"
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
print "no namespace"
return False
if ns['namespace_id'] != 'test':
print "wrong namespace"
return False
for name in ['foo2.test']:
# not preordered
preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[4].addr), wallets[0].addr )
if preorder is not None:
print "preorder exists"
return False
# registered
name_rec = state_engine.get_name( name )
if name_rec is None:
print "name does not exist"
return False
# owned by
if name_rec['address'] != wallets[0].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[0].addr):
print "sender is wrong"
return False
return True
|
gpl-3.0
| -6,916,285,018,192,869,000
| 35.292683
| 230
| 0.660506
| false
| 3.173836
| true
| false
| false
|
diefenbach/django-cba
|
cba/layouts.py
|
1
|
1576
|
from . base import Component
class Grid(Component):
"""A CSS grid layout.
A grid consists arbitrary rows and 16 columns per row. see http://semantic-ui.com/collections/grid.html
for more.
"""
template = "cba/layouts/grid.html"
class Column(Component):
"""A column of a grid.
width
The width of the column. Valid values are 1-16. A row consist of
maxmimal 16 columns but can be ended explicitly.
"""
template = "cba/layouts/column.html"
# TODO: This needs be moved out of Python, to be independent of the use ui
# system
WIDTH = ["love", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine", "ten", "eleven", "twelve", "thirteen",
"fourteen", "fifteen", "sixteen"]
def __init__(self, id=None, width=16, *args, **kwargs):
super(Column, self).__init__(id, *args, **kwargs)
self.width = self.WIDTH[width]
class Row(Component):
"""A row of a grid.
It can be used to end a row explicitly.
"""
template = "cba/layouts/row.html"
class Split(Component):
"""Splits the screen in two or more panels.
All direct sub components are splitted into an own panel. Split components
can be nested.
direction
The direction of the splitting. One of ``vertical`` or ``horizontal``.
"""
template = "cba/layouts/split.html"
def __init__(self, id=None, direction="vertical", *args, **kwargs):
super(Split, self).__init__(id, *args, **kwargs)
self.direction = direction
|
bsd-3-clause
| 7,044,278,349,596,012,000
| 28.185185
| 107
| 0.607234
| false
| 3.734597
| false
| false
| false
|
lekston/ardupilot
|
Tools/autotest/autotest.py
|
1
|
18930
|
#!/usr/bin/env python
"""
APM automatic test suite
Andrew Tridgell, October 2011
"""
from __future__ import print_function
import atexit
import fnmatch
import glob
import optparse
import os
import shutil
import signal
import sys
import time
import traceback
from apmrover2 import *
from arducopter import *
from quadplane import *
from arduplane import *
from ardusub import *
from pysim import util
from pymavlink import mavutil
from pymavlink.generator import mavtemplate
def buildlogs_dirpath():
return os.getenv("BUILDLOGS", util.reltopdir("../buildlogs"))
def buildlogs_path(path):
'''return a string representing path in the buildlogs directory'''
bits = [buildlogs_dirpath()]
if isinstance(path, list):
bits.extend(path)
else:
bits.append(path)
return os.path.join(*bits)
def get_default_params(atype, binary):
"""Get default parameters."""
# use rover simulator so SITL is not starved of input
HOME = mavutil.location(40.071374969556928, -105.22978898137808, 1583.702759, 246)
if "plane" in binary or "rover" in binary:
frame = "rover"
else:
frame = "+"
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sitl = util.start_SITL(binary, wipe=True, model=frame, home=home, speedup=10, unhide_parameters=True)
mavproxy = util.start_MAVProxy_SITL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
sitl = util.start_SITL(binary, model=frame, home=home, speedup=10)
mavproxy = util.start_MAVProxy_SITL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = buildlogs_path('%s-defaults.parm' % atype)
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sitl)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def build_all():
"""Run the build_all.sh script."""
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), directory=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
"""Run the build_binaries.py script."""
print("Running build_binaries.py")
# copy the script as it changes git branch, which can change the script while running
orig = util.reltopdir('Tools/scripts/build_binaries.py')
copy = util.reltopdir('./build_binaries.py')
shutil.copy2(orig, copy)
# also copy generate_manifest library:
orig_gm = util.reltopdir('Tools/scripts/generate_manifest.py')
copy_gm = util.reltopdir('./generate_manifest.py')
shutil.copy2(orig_gm, copy_gm)
if util.run_cmd(copy, directory=util.reltopdir('.')) != 0:
print("Failed build_binaries.py")
return False
return True
def build_devrelease():
"""Run the build_devrelease.sh script."""
print("Running build_devrelease.sh")
# copy the script as it changes git branch, which can change the script while running
orig = util.reltopdir('Tools/scripts/build_devrelease.sh')
copy = util.reltopdir('./build_devrelease.sh')
shutil.copy2(orig, copy)
if util.run_cmd(copy, directory=util.reltopdir('.')) != 0:
print("Failed build_devrelease.sh")
return False
return True
def build_examples():
"""Build examples."""
for target in 'px4-v2', 'navio':
print("Running build.examples for %s" % target)
try:
util.build_examples(target)
except Exception as e:
print("Failed build_examples on board=%s" % target)
print(str(e))
return False
return True
def build_parameters():
"""Run the param_parse.py script."""
print("Running param_parse.py")
for vehicle in 'ArduPlane', 'ArduCopter', 'ArduSub', 'APMrover2', 'AntennaTracker':
if util.run_cmd([util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), '--vehicle', vehicle], directory=util.reltopdir('.')) != 0:
print("Failed param_parse.py (%s)" % vehicle)
return False
return True
def convert_gpx():
"""Convert any tlog files to GPX and KML."""
mavlog = glob.glob(buildlogs_path("*.tlog"))
passed = True
for m in mavlog:
util.run_cmd(util.reltopdir("modules/mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
try:
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml))
except CalledProcessError as e:
passed = False
try:
util.run_cmd('zip %s.kmz %s.kml' % (m, m))
except CalledProcessError as e:
passed = False
util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m, m))
return passed
def test_prerequisites():
"""Check we have the right directories and tools to run tests."""
print("Testing prerequisites")
util.mkdir_p(buildlogs_dirpath())
return True
def alarm_handler(signum, frame):
"""Handle test timeout."""
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
write_fullresults()
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
def should_run_step(step):
"""See if a step should be skipped."""
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return False
return True
__bin_names = {
"ArduCopter" : "arducopter",
"ArduPlane" : "arduplane",
"APMrover2" : "ardurover",
"AntennaTracker" : "antennatracker",
"CopterAVC" : "arducopter-heli",
"QuadPlane" : "arduplane",
"ArduSub" : "ardusub"
}
def binary_path(step, debug=False):
try:
vehicle = step.split(".")[1]
except Exception:
return None
if vehicle in __bin_names:
binary_name = __bin_names[vehicle]
else:
# cope with builds that don't have a specific binary
return None
binary_basedir = "sitl"
binary = util.reltopdir(os.path.join('build', binary_basedir, 'bin', binary_name))
if not os.path.exists(binary):
if os.path.exists(binary + ".exe"):
binary += ".exe"
else:
raise ValueError("Binary (%s) does not exist" % (binary,))
return binary
def run_step(step):
"""Run one step."""
# remove old logs
util.run_cmd('/bin/rm -f logs/*.BIN logs/LASTLOG.TXT')
if step == "prerequisites":
return test_prerequisites()
build_opts = {
"j": opts.j,
"debug": opts.debug,
"clean": not opts.no_clean,
"configure": not opts.no_configure,
}
if step == 'build.ArduPlane':
return util.build_SITL('bin/arduplane', **build_opts)
if step == 'build.APMrover2':
return util.build_SITL('bin/ardurover', **build_opts)
if step == 'build.ArduCopter':
return util.build_SITL('bin/arducopter', **build_opts)
if step == 'build.AntennaTracker':
return util.build_SITL('bin/antennatracker', **build_opts)
if step == 'build.Helicopter':
return util.build_SITL('bin/arducopter-heli', **build_opts)
if step == 'build.ArduSub':
return util.build_SITL('bin/ardusub', **build_opts)
binary = binary_path(step, debug=opts.debug)
if step.startswith("defaults"):
vehicle = step[9:]
return get_default_params(vehicle, binary)
fly_opts = {
"viewerip": opts.viewerip,
"use_map": opts.map,
"valgrind": opts.valgrind,
"gdb": opts.gdb,
"gdbserver": opts.gdbserver,
}
if opts.speedup is not None:
fly_opts["speedup"] = opts.speedup
if step == 'fly.ArduCopter':
arducopter = AutoTestCopter(binary, frame=opts.frame, **fly_opts)
return arducopter.autotest()
if step == 'fly.CopterAVC':
arducopter = AutoTestCopter(binary, **fly_opts)
return arducopter.autotest_heli()
if step == 'fly.ArduPlane':
arduplane = AutoTestPlane(binary, **fly_opts)
return arduplane.autotest()
if step == 'fly.QuadPlane':
quadplane = AutoTestQuadPlane(binary, **fly_opts)
return quadplane.autotest()
if step == 'drive.APMrover2':
apmrover2 = AutoTestRover(binary, frame=opts.frame, **fly_opts)
return apmrover2.autotest()
if step == 'dive.ArduSub':
ardusub = AutoTestSub(binary, **fly_opts)
return ardusub.autotest()
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.DevRelease':
return build_devrelease()
if step == 'build.Examples':
return build_examples()
if step == 'build.Parameters':
return build_parameters()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
"""Test result class."""
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
"""Test result file."""
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
"""Test results class."""
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, directory=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
"""Add a result."""
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
"""Add a result file."""
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
"""Add a result image."""
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
"""Add a set of files."""
for f in glob.glob(buildlogs_path(pattern)):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
"""Add a set of images."""
for f in glob.glob(buildlogs_path(pattern)):
self.addimage(name, os.path.basename(f))
def write_webresults(results_to_write):
"""Write webpage results."""
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open(buildlogs_path(os.path.basename(h)), mode='w')
t.write(f, html, results_to_write)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, buildlogs_path(os.path.basename(f)))
def write_fullresults():
"""Write out full results set."""
global results
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*-log.bin')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
# results common to all vehicles:
vehicle_files = [ ('{vehicle} build log', '{vehicle}.txt'),
('{vehicle} code size', '{vehicle}.sizes.txt'),
('{vehicle} stack sizes', '{vehicle}.framesizes.txt'),
('{vehicle} defaults', '{vehicle}-defaults.parm'),
('{vehicle} core', '{vehicle}.core'),
('{vehicle} ELF', '{vehicle}.elf'),
]
vehicle_globs = [('{vehicle} log', '{vehicle}-*.BIN'),
]
for vehicle in 'ArduPlane','ArduCopter','APMrover2','AntennaTracker', 'ArduSub':
subs = { 'vehicle': vehicle }
for vehicle_file in vehicle_files:
description = vehicle_file[0].format(**subs)
filename = vehicle_file[1].format(**subs)
results.addfile(description, filename)
for vehicle_glob in vehicle_globs:
description = vehicle_glob[0].format(**subs)
glob = vehicle_glob[1].format(**subs)
results.addglob(description, glob)
results.addglob("CopterAVC log", 'CopterAVC-*.BIN')
results.addfile("CopterAVC core", 'CopterAVC.core')
results.addglob('APM:Libraries documentation', 'docs/libraries/index.html')
results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html')
results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html')
results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html')
results.addglob('APM:Sub documentation', 'docs/ArduSub/index.html')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
def check_logs(step):
"""Check for log files from a step."""
print("check step: ", step)
if step.startswith('fly.'):
vehicle = step[4:]
elif step.startswith('drive.'):
vehicle = step[6:]
else:
return
logs = glob.glob("logs/*.BIN")
for log in logs:
bname = os.path.basename(log)
newname = buildlogs_path("%s-%s" % (vehicle, bname))
print("Renaming %s to %s" % (log, newname))
shutil.move(log, newname)
corefile = "core"
if os.path.exists(corefile):
newname = buildlogs_path("%s.core" % vehicle)
print("Renaming %s to %s" % (corefile, newname))
shutil.move(corefile, newname)
try:
util.run_cmd('/bin/cp build/sitl/bin/* %s' % buildlogs_dirpath(),
directory=util.reltopdir('.'))
except Exception:
print("Unable to save binary")
def run_tests(steps):
"""Run a list of steps."""
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if run_step(step):
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
check_logs(step)
else:
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
except Exception as msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
check_logs(step)
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
write_fullresults()
return passed
if __name__ == "__main__":
############## main program #############
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
parser.add_option("--speedup", default=None, type='int', help='speedup to run the simulations at')
parser.add_option("--valgrind", default=False, action='store_true', help='run ArduPilot binaries under valgrind')
parser.add_option("--gdb", default=False, action='store_true', help='run ArduPilot binaries under gdb')
parser.add_option("--debug", default=False, action='store_true', help='make built binaries debug binaries')
parser.add_option("-j", default=None, type='int', help='build CPUs')
parser.add_option("--frame", type='string', default=None, help='specify frame type')
parser.add_option("--gdbserver", default=False, action='store_true', help='run ArduPilot binaries under gdbserver')
parser.add_option("--no-clean", default=False, action='store_true', help='do not clean before building', dest="no_clean")
parser.add_option("--no-configure", default=False, action='store_true', help='do not configure before building', dest="no_configure")
opts, args = parser.parse_args()
steps = [
'prerequisites',
'build.All',
'build.Binaries',
# 'build.DevRelease',
'build.Examples',
'build.Parameters',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'fly.QuadPlane',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'build.Helicopter',
'fly.CopterAVC',
'build.AntennaTracker',
'build.ArduSub',
'defaults.ArduSub',
'dive.ArduSub',
'convertgpx',
]
skipsteps = opts.skip.split(',')
# ensure we catch timeouts
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
util.mkdir_p(buildlogs_dirpath())
lckfile = buildlogs_path('autotest.lck')
print("lckfile=%s" % repr(lckfile))
lck = util.lock_file(lckfile)
if lck is None:
print("autotest is locked - exiting. lckfile=(%s)" % (lckfile,))
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
matches = [step for step in steps if fnmatch.fnmatch(step.lower(), a.lower())]
if not len(matches):
print("No steps matched {}".format(a))
sys.exit(1)
matched.extend(matches)
steps = matched
# skip steps according to --skip option:
steps_to_run = [ s for s in steps if should_run_step(s) ]
results = TestResults()
try:
if not run_tests(steps_to_run):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
|
gpl-3.0
| -2,348,797,625,056,130,600
| 31.414384
| 148
| 0.609245
| false
| 3.532375
| true
| false
| false
|
walchko/pygecko
|
retired/find_geckocore.py
|
1
|
1087
|
# #!/usr/bin/env python3
# # -*- coding: utf-8 -*-
# ##############################################
# # The MIT License (MIT)
# # Copyright (c) 2018 Kevin Walchko
# # see LICENSE for full details
# ##############################################
# # import time
# import argparse
# import os
# from pygecko.transport.beacon import BeaconFinder
#
#
# def handleArgs():
# parser = argparse.ArgumentParser(description='Use multicast to find a geckocore node on the network')
# parser.add_argument('-k', '--key', help='key, default is hostname', default=None)
# args = vars(parser.parse_args())
# return args
#
#
# if __name__ == "__main__":
# args = handleArgs()
# key = args['key']
# if key is None:
# key = os.uname().nodename.split('.')[0].lower()
# finder = BeaconFinder(key)
# resp = finder.search(0,"0")
# if resp:
# print("[GeckoCore]===========================")
# print(" in: {}".format(resp[0]))
# print(" out: {}".format(resp[1]))
# else:
# print("*** No GeckoCore found on this network ***")
|
mit
| -3,143,877,320,339,952,600
| 31.939394
| 107
| 0.516099
| false
| 3.439873
| false
| false
| false
|
yoshrote/valid_model
|
valid_model/descriptors.py
|
1
|
7772
|
from datetime import datetime, timedelta
import six
from .base import Generic, Object
from .exc import ValidationError
from .utils import is_descriptor
class SimpleType(Generic):
"""This descriptor will not attempt to coerce the value on __set__."""
_type_klass = None
_type_label = None
def __set__(self, instance, value):
if value is not None and not isinstance(value, self._type_klass):
raise ValidationError(
"{!r} is not {}".format(value, self._type_label),
self.name
)
return Generic.__set__(self, instance, value)
class EmbeddedObject(Generic):
def __init__(self, class_obj):
self.class_obj = class_obj
def validator(obj):
return isinstance(obj, class_obj)
Generic.__init__(
self, default=class_obj, validator=validator
)
def __set__(self, instance, value):
try:
if isinstance(value, dict):
value = self.class_obj(**value)
return Generic.__set__(self, instance, value)
except ValidationError as ex:
raise ValidationError(
ex.msg,
'{}.{}'.format(self.name, ex.field) if ex.field else self.name
)
class String(Generic):
"""
This descriptor attempts to set a unicode string value.
If the value is type(str) it will be decoded using utf-8.
"""
def __set__(self, instance, value):
if value is None or isinstance(value, six.text_type):
pass
elif isinstance(value, six.binary_type):
value = value.decode('utf-8')
else:
raise ValidationError(
"{!r} is not a string".format(value),
self.name
)
return Generic.__set__(self, instance, value)
class _Number(Generic):
"""This descriptor attempts to converts any a value to a number."""
_number_type = None
_number_label = None
def __set__(self, instance, value):
if value is not None:
number_like = isinstance(value, (six.integer_types, float))
is_bool = isinstance(value, bool)
if not number_like or is_bool:
raise ValidationError(
"{!r} is not {}".format(value, self._number_label),
self.name
)
else:
value = int(value)
return Generic.__set__(self, instance, value)
class Integer(_Number):
"""This descriptor attempts to coerce a number to an integer."""
_number_type = int
_number_label = "an int"
class Float(_Number):
"""This descriptor attempts to coerce a number to a float."""
_number_type = float
_number_label = "a float"
class Bool(Generic):
"""This descriptor attempts to converts any a value to a boolean."""
def __set__(self, instance, value):
if value is not None:
if value in (0, 1) or isinstance(value, bool):
value = bool(value)
else:
raise ValidationError(
"{!r} is not a bool".format(value),
self.name
)
return Generic.__set__(self, instance, value)
class DateTime(SimpleType):
"""This descriptor attempts to set a datetime value."""
_type_klass = datetime
_type_label = "a datetime"
class TimeDelta(SimpleType):
"""This descriptor attempts to set a timedalta value."""
_type_klass = timedelta
_type_label = "a timedelta"
NO_DEFAULT = object()
class _Collection(Generic):
_collection_type = object
_collection_label = None
def __init__(self, default=NO_DEFAULT, value=None, validator=None, mutator=None):
if default is NO_DEFAULT:
default = self._collection_type
Generic.__init__(
self, default=default, validator=validator, mutator=mutator, nullable=False
)
if value is not None and not isinstance(value, Generic):
raise TypeError('value must be None or an instance of Generic')
self.value = value
@staticmethod
def iterate(collection):
return iter(collection)
def recursive_validation(self, element):
"""Validate element of collection against `self.value`."""
dummy = Object()
if self.value is not None:
try:
element = self.value.__set__(dummy, element)
except ValidationError as ex:
raise ValidationError(
ex.msg,
'{}.{}'.format(self.name, ex.field) if ex.field else self.name
)
return element
def add_to_collection(self, collection, element):
raise NotImplementedError("_add_to_collection")
def __set__(self, instance, value):
if value is None:
value = self._collection_type()
elif not isinstance(value, self._collection_type):
raise ValidationError(
"{!r} is not {}".format(value, self._collection_label),
self.name
)
new_value = self._collection_type()
iterable = self.iterate(value)
for element in iterable:
element = self.recursive_validation(element)
self.add_to_collection(new_value, element)
value = new_value
return Generic.__set__(self, instance, value)
class List(_Collection):
_collection_type = list
_collection_label = "a list"
def add_to_collection(self, collection, element):
collection.append(element)
return collection
class Set(_Collection):
_collection_type = set
_collection_label = "a set"
def add_to_collection(self, collection, element):
collection.add(element)
return collection
class Dict(_Collection):
_collection_type = dict
_collection_label = "a dict"
def __init__(self, default=dict, key=None, value=None, validator=None, mutator=None):
_Collection.__init__(
self, default=default, value=value, validator=validator, mutator=mutator
)
if key is not None and not isinstance(key, Generic):
raise TypeError('key must be None or an instance of Generic')
self.key = key
@staticmethod
def iterate(collection):
return six.iteritems(collection)
def recursive_validation(self, element):
"""Validate element of collection against `self.value`."""
dummy = Object()
key, value = element
if self.key is not None:
try:
key = self.key.__set__(dummy, key)
except ValidationError as ex:
raise ValidationError(
ex.msg,
"{} key {}".format(self.name, key)
)
if self.value is not None:
try:
value = self.value.__set__(dummy, value)
except ValidationError as ex:
raise ValidationError(
ex.msg,
"{}['{}']".format(self.name, key)
)
return key, value
def add_to_collection(self, collection, element):
key, value = element
collection[key] = value
return collection
def descriptors():
"""Generate list of descriptor class names."""
return [
name for name, value in six.iteritems(globals())
if is_descriptor(value) and issubclass(value, Generic)
]
def descriptor_classes():
"""Generate list of descriptor classes."""
return [
value for value in six.itervalues(globals())
if is_descriptor(value) and issubclass(value, Generic)
]
__all__ = ['descriptor_classes'] + descriptors()
|
mit
| 7,817,542,412,266,166,000
| 28.439394
| 89
| 0.573726
| false
| 4.492486
| false
| false
| false
|
walterbender/story
|
collabwrapper.py
|
1
|
33058
|
# Copyright (C) 2015 Walter Bender
# Copyright (C) 2015 Sam Parkinson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
'''
The wrapper module provides an abstraction over the Sugar
collaboration system.
Using CollabWrapper
-------------------
1. Add `get_data` and `set_data` methods to the activity class::
def get_data(self):
# return plain python objects - things that can be encoded
# using the json module
return dict(
text=self._entry.get_text()
)
def set_data(self, data):
# data will be the same object returned by get_data
self._entry.set_text(data.get('text'))
2. Make a CollabWrapper instance::
def __init__(self, handle):
sugar3.activity.activity.Activity.__init__(self, handle)
self._collab = CollabWrapper(self)
self._collab.connect('message', self.__message_cb)
# setup your activity here
self._collab.setup()
3. Post any changes of shared state to the CollabWrapper. The changes
will be sent to other buddies if any are connected, for example::
def __entry_changed_cb(self, *args):
self._collab.post(dict(
action='entry_changed',
new_text=self._entry.get_text()
))
4. Handle incoming messages, for example::
def __message_cb(self, collab, buddy, msg):
action = msg.get('action')
if action == 'entry_changed':
self._entry.set_text(msg.get('new_text'))
'''
import os
import json
import socket
from gettext import gettext as _
import gi
gi.require_version('TelepathyGLib', '0.12')
from gi.repository import GObject
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import TelepathyGLib
import dbus
from dbus import PROPERTIES_IFACE
CHANNEL_INTERFACE = TelepathyGLib.IFACE_CHANNEL
CHANNEL_INTERFACE_GROUP = TelepathyGLib.IFACE_CHANNEL_INTERFACE_GROUP
CHANNEL_TYPE_TEXT = TelepathyGLib.IFACE_CHANNEL_TYPE_TEXT
CHANNEL_TYPE_FILE_TRANSFER = TelepathyGLib.IFACE_CHANNEL_TYPE_FILE_TRANSFER
CONN_INTERFACE_ALIASING = TelepathyGLib.IFACE_CONNECTION_INTERFACE_ALIASING
CONN_INTERFACE = TelepathyGLib.IFACE_CONNECTION
CHANNEL = TelepathyGLib.IFACE_CHANNEL
CLIENT = TelepathyGLib.IFACE_CLIENT
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES = \
TelepathyGLib.ChannelGroupFlags.CHANNEL_SPECIFIC_HANDLES
CONNECTION_HANDLE_TYPE_CONTACT = TelepathyGLib.HandleType.CONTACT
CHANNEL_TEXT_MESSAGE_TYPE_NORMAL = TelepathyGLib.ChannelTextMessageType.NORMAL
SOCKET_ADDRESS_TYPE_UNIX = TelepathyGLib.SocketAddressType.UNIX
SOCKET_ACCESS_CONTROL_LOCALHOST = TelepathyGLib.SocketAccessControl.LOCALHOST
from sugar3.presence import presenceservice
from sugar3.activity.activity import SCOPE_PRIVATE
from sugar3.graphics.alert import NotifyAlert
import logging
_logger = logging.getLogger('CollabWrapper')
ACTION_INIT_REQUEST = '!!ACTION_INIT_REQUEST'
ACTION_INIT_RESPONSE = '!!ACTION_INIT_RESPONSE'
ACTIVITY_FT_MIME = 'x-sugar/from-activity'
class CollabWrapper(GObject.GObject):
'''
The wrapper provides a high level abstraction over the
collaboration system. The wrapper deals with setting up the
channels, encoding and decoding messages, initialization and
alerting the caller to the status.
An activity instance is initially private, but may be shared. Once
shared, an instance will remain shared for as long as the activity
runs. On stop, the journal will preserve the instance as shared,
and on resume the instance will be shared again.
When the caller shares an activity instance, they are the leader,
and other buddies may join. The instance is now a shared activity.
When the caller joins a shared activity, the leader will call
`get_data`, and the caller's `set_data` will be called with the
result.
The `joined` signal is emitted when the caller joins a shared
activity. One or more `buddy_joined` signals will be emitted before
this signal. The signal is not emitted to the caller who first
shared the activity. There are no arguments.
The `buddy_joined` signal is emitted when another buddy joins the
shared activity. At least one will be emitted before the `joined`
signal. The caller will never be mentioned, but is assumed to be
part of the set. The signal passes a
:class:`sugar3.presence.buddy.Buddy` as the only argument.
The `buddy_left` signal is emitted when another user leaves the
shared activity. The signal is not emitted during quit. The signal
passes a :class:`sugar3.presence.buddy.Buddy` as the only argument.
Any buddy may call `post` to send a message to all buddies. Each
buddy will receive a `message` signal.
The `message` signal is emitted when a `post` is received from any
buddy. The signal has two arguments. The first is a
:class:`sugar3.presence.buddy.Buddy`. The second is the message.
Any buddy may call `send_file_memory` or `send_file_file` to
transfer a file to all buddies. A description is to be given.
Each buddy will receive an `incoming_file` signal.
The `incoming_file` signal is emitted when a file transfer is
received. The signal has two arguments. The first is a
:class:`IncomingFileTransfer`. The second is the description.
'''
message = GObject.Signal('message', arg_types=[object, object])
joined = GObject.Signal('joined')
buddy_joined = GObject.Signal('buddy_joined', arg_types=[object])
buddy_left = GObject.Signal('buddy_left', arg_types=[object])
incoming_file = GObject.Signal('incoming_file', arg_types=[object, object])
def __init__(self, activity):
_logger.debug('__init__')
GObject.GObject.__init__(self)
self.activity = activity
self.shared_activity = activity.shared_activity
self._leader = False
self._init_waiting = False
self._text_channel = None
self._owner = presenceservice.get_instance().get_owner()
def setup(self):
'''
Setup must be called so that the activity can join or share
if appropriate.
.. note::
As soon as setup is called, any signal, `get_data` or
`set_data` call may occur. This means that the activity
must have set up enough so these functions can work. For
example, call setup at the end of the activity
`__init__` function.
'''
_logger.debug('setup')
# Some glue to know if we are launching, joining, or resuming
# a shared activity.
if self.shared_activity:
# We're joining the activity.
self.activity.connect("joined", self.__joined_cb)
if self.activity.get_shared():
_logger.debug('calling _joined_cb')
self.__joined_cb(self)
else:
_logger.debug('Joining activity...')
self._alert(_('Joining activity...'),
_('Please wait for the connection...'))
else:
self._leader = True
if not self.activity.metadata or self.activity.metadata.get(
'share-scope', SCOPE_PRIVATE) == \
SCOPE_PRIVATE:
# We are creating a new activity instance.
_logger.debug('Off-line')
else:
# We are sharing an old activity instance.
_logger.debug('On-line')
self._alert(_('Resuming shared activity...'),
_('Please wait for the connection...'))
self.activity.connect('shared', self.__shared_cb)
def _alert(self, title, msg=None):
a = NotifyAlert()
a.props.title = title
a.props.msg = msg
self.activity.add_alert(a)
a.connect('response', lambda a, r: self.activity.remove_alert(a))
a.show()
def __shared_cb(self, sender):
''' Callback for when activity is shared. '''
_logger.debug('__shared_cb')
# FIXME: may be called twice, but we should only act once
self.shared_activity = self.activity.shared_activity
self._setup_text_channel()
self._listen_for_channels()
def __joined_cb(self, sender):
'''Callback for when an activity is joined.'''
_logger.debug('__joined_cb')
self.shared_activity = self.activity.shared_activity
if not self.shared_activity:
return
self._setup_text_channel()
self._listen_for_channels()
self._init_waiting = True
self.post({'action': ACTION_INIT_REQUEST})
for buddy in self.shared_activity.get_joined_buddies():
self.buddy_joined.emit(buddy)
self.joined.emit()
def _setup_text_channel(self):
''' Set up a text channel to use for collaboration. '''
_logger.debug('_setup_text_channel')
self._text_channel = _TextChannelWrapper(
self.shared_activity.telepathy_text_chan,
self.shared_activity.telepathy_conn)
# Tell the text channel what callback to use for incoming
# text messages.
self._text_channel.set_received_callback(self.__received_cb)
# Tell the text channel what callbacks to use when buddies
# come and go.
self.shared_activity.connect('buddy-joined', self.__buddy_joined_cb)
self.shared_activity.connect('buddy-left', self.__buddy_left_cb)
def _listen_for_channels(self):
_logger.debug('_listen_for_channels')
conn = self.shared_activity.telepathy_conn
conn.connect_to_signal('NewChannels', self.__new_channels_cb)
def __new_channels_cb(self, channels):
_logger.debug('__new_channels_cb')
conn = self.shared_activity.telepathy_conn
for path, props in channels:
if props[CHANNEL + '.Requested']:
continue # This channel was requested by me
channel_type = props[CHANNEL + '.ChannelType']
if channel_type == CHANNEL_TYPE_FILE_TRANSFER:
self._handle_ft_channel(conn, path, props)
def _handle_ft_channel(self, conn, path, props):
_logger.debug('_handle_ft_channel')
ft = IncomingFileTransfer(conn, path, props)
if ft.description == ACTION_INIT_RESPONSE:
ft.connect('ready', self.__ready_cb)
ft.accept_to_memory()
else:
desc = json.loads(ft.description)
self.incoming_file.emit(ft, desc)
def __ready_cb(self, ft, stream):
_logger.debug('__ready_cb')
if self._init_waiting:
stream.close(None)
# FIXME: The data prop seems to just be the raw pointer
gbytes = stream.steal_as_bytes()
data = gbytes.get_data()
_logger.debug('Got init data from buddy: %r', data)
data = json.loads(data)
self.activity.set_data(data)
self._init_waiting = False
def __received_cb(self, buddy, msg):
'''Process a message when it is received.'''
_logger.debug('__received_cb')
action = msg.get('action')
if action == ACTION_INIT_REQUEST:
if self._leader:
data = self.activity.get_data()
if data is not None:
data = json.dumps(data)
OutgoingBlobTransfer(
buddy,
self.shared_activity.telepathy_conn,
data,
self.get_client_name(),
ACTION_INIT_RESPONSE,
ACTIVITY_FT_MIME)
return
if buddy:
nick = buddy.props.nick
else:
nick = '???'
_logger.debug('Received message from %s: %r', nick, msg)
self.message.emit(buddy, msg)
def send_file_memory(self, buddy, data, description):
'''
Send a one to one file transfer from memory to a buddy. The
buddy will get the file transfer and description through the
`incoming_transfer` signal.
Args:
buddy (sugar3.presence.buddy.Buddy), buddy to send to.
data (str), the data to send.
description (object), a json encodable description for the
transfer. This will be given to the
`incoming_transfer` signal at the buddy.
'''
OutgoingBlobTransfer(
buddy,
self.shared_activity.telepathy_conn,
data,
self.get_client_name(),
json.dumps(description),
ACTIVITY_FT_MIME)
def send_file_file(self, buddy, path, description):
'''
Send a one to one file transfer from a filesystem path to a
given buddy. The buddy will get the file transfer and
description through the `incoming_transfer` signal.
Args:
buddy (sugar3.presence.buddy.Buddy), buddy to send to.
path (str), path of the file containing the data to send.
description (object), a json encodable description for the
transfer. This will be given to the
`incoming_transfer` signal at the buddy.
'''
OutgoingFileTransfer(
buddy,
self.shared_activity.telepathy_conn,
path,
self.get_client_name(),
json.dumps(description),
ACTIVITY_FT_MIME)
def post(self, msg):
'''
Send a message to all buddies. If the activity is not shared,
no message is sent.
Args:
msg (object): json encodable object to send,
eg. :class:`dict` or :class:`str`.
'''
if self._text_channel is not None:
self._text_channel.post(msg)
def __buddy_joined_cb(self, sender, buddy):
'''A buddy joined.'''
self.buddy_joined.emit(buddy)
def __buddy_left_cb(self, sender, buddy):
'''A buddy left.'''
self.buddy_left.emit(buddy)
def get_client_name(self):
'''
Get the name of the activity's telepathy client.
Returns: str, telepathy client name
'''
return CLIENT + '.' + self.activity.get_bundle_id()
@GObject.property
def leader(self):
'''
Boolean of if this client is the leader in this activity. The
way the leader is decided may change, however there should only
ever be one leader for an activity.
'''
return self._leader
@GObject.property
def owner(self):
'''
Ourselves, :class:`sugar3.presence.buddy.Owner`
'''
return self._owner
FT_STATE_NONE = 0
FT_STATE_PENDING = 1
FT_STATE_ACCEPTED = 2
FT_STATE_OPEN = 3
FT_STATE_COMPLETED = 4
FT_STATE_CANCELLED = 5
FT_REASON_NONE = 0
FT_REASON_REQUESTED = 1
FT_REASON_LOCAL_STOPPED = 2
FT_REASON_REMOTE_STOPPED = 3
FT_REASON_LOCAL_ERROR = 4
FT_REASON_LOCAL_ERROR = 5
FT_REASON_REMOTE_ERROR = 6
class _BaseFileTransfer(GObject.GObject):
'''
The base file transfer should not be used directly. It is used as a
base class for the incoming and outgoing file transfers.
Props:
filename (str), metadata provided by the buddy
file_size (str), size of the file being sent/received, in bytes
description (str), metadata provided by the buddy
mime_type (str), metadata provided by the buddy
buddy (:class:`sugar3.presence.buddy.Buddy`), other party
in the transfer
reason_last_change (FT_REASON_*), reason for the last state change
GObject Props:
state (FT_STATE_*), current state of the transfer
transferred_bytes (int), number of bytes transferred so far
'''
def __init__(self):
GObject.GObject.__init__(self)
self._state = FT_STATE_NONE
self._transferred_bytes = 0
self.channel = None
self.buddy = None
self.filename = None
self.file_size = None
self.description = None
self.mime_type = None
self.reason_last_change = FT_REASON_NONE
def set_channel(self, channel):
'''
Setup the file transfer to use a given telepathy channel. This
should only be used by direct subclasses of the base file transfer.
'''
self.channel = channel
self.channel[CHANNEL_TYPE_FILE_TRANSFER].connect_to_signal(
'FileTransferStateChanged', self.__state_changed_cb)
self.channel[CHANNEL_TYPE_FILE_TRANSFER].connect_to_signal(
'TransferredBytesChanged', self.__transferred_bytes_changed_cb)
self.channel[CHANNEL_TYPE_FILE_TRANSFER].connect_to_signal(
'InitialOffsetDefined', self.__initial_offset_defined_cb)
channel_properties = self.channel[PROPERTIES_IFACE]
props = channel_properties.GetAll(CHANNEL_TYPE_FILE_TRANSFER)
self._state = props['State']
self.filename = props['Filename']
self.file_size = props['Size']
self.description = props['Description']
self.mime_type = props['ContentType']
def __transferred_bytes_changed_cb(self, transferred_bytes):
_logger.debug('__transferred_bytes_changed_cb %r', transferred_bytes)
self.props.transferred_bytes = transferred_bytes
def _set_transferred_bytes(self, transferred_bytes):
self._transferred_bytes = transferred_bytes
def _get_transferred_bytes(self):
return self._transferred_bytes
transferred_bytes = GObject.property(type=int,
default=0,
getter=_get_transferred_bytes,
setter=_set_transferred_bytes)
def __initial_offset_defined_cb(self, offset):
_logger.debug('__initial_offset_defined_cb %r', offset)
self.initial_offset = offset
def __state_changed_cb(self, state, reason):
_logger.debug('__state_changed_cb %r %r', state, reason)
self.reason_last_change = reason
self.props.state = state
def _set_state(self, state):
self._state = state
def _get_state(self):
return self._state
state = GObject.property(type=int, getter=_get_state, setter=_set_state)
def cancel(self):
'''
Request that telepathy close the file transfer channel
Spec: http://telepathy.freedesktop.org/spec/Channel.html#Method:Close
'''
self.channel[CHANNEL].Close()
class IncomingFileTransfer(_BaseFileTransfer):
'''
An incoming file transfer from another buddy. You need to first accept
the transfer (either to memory or to a file). Then you need to listen
to the state and wait until the transfer is completed. Then you can
read the file that it was saved to, or access the
:class:`Gio.MemoryOutputStream` from the `output` property.
The `output` property is different depending on how the file was accepted.
If the file was accepted to a file on the file system, it is a string
representing the path to the file. If the file was accepted to memory,
it is a :class:`Gio.MemoryOutputStream`.
'''
ready = GObject.Signal('ready', arg_types=[object])
def __init__(self, connection, object_path, props):
_BaseFileTransfer.__init__(self)
channel = {}
proxy = dbus.Bus().get_object(connection.bus_name, object_path)
channel[PROPERTIES_IFACE] = dbus.Interface(proxy, PROPERTIES_IFACE)
channel[CHANNEL] = dbus.Interface(proxy, CHANNEL)
channel[CHANNEL_TYPE_FILE_TRANSFER] = dbus.Interface(
proxy, CHANNEL_TYPE_FILE_TRANSFER)
self.set_channel(channel)
self.connect('notify::state', self.__notify_state_cb)
self._destination_path = None
self._output_stream = None
self._socket_address = None
self._socket = None
self._splicer = None
def accept_to_file(self, destination_path):
'''
Accept the file transfer and write it to a new file. The file must
already exist.
Args:
destination_path (str): the path where a new file will be
created and saved to
'''
if os.path.exists(destination_path):
raise ValueError('Destination path already exists: %r' %
destination_path)
self._destination_path = destination_path
self._accept()
def accept_to_memory(self):
'''
Accept the file transfer. Once the state is FT_STATE_OPEN, a
:class:`Gio.MemoryOutputStream` accessible via the output prop.
'''
self._destination_path = None
self._accept()
def _accept(self):
channel_ft = self.channel[CHANNEL_TYPE_FILE_TRANSFER]
self._socket_address = channel_ft.AcceptFile(
SOCKET_ADDRESS_TYPE_UNIX,
SOCKET_ACCESS_CONTROL_LOCALHOST,
'',
0,
byte_arrays=True)
def __notify_state_cb(self, file_transfer, pspec):
_logger.debug('__notify_state_cb %r', self.props.state)
if self.props.state == FT_STATE_OPEN:
# Need to hold a reference to the socket so that python doesn't
# close the fd when it goes out of scope
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(self._socket_address)
input_stream = Gio.UnixInputStream.new(self._socket.fileno(), True)
if self._destination_path is not None:
destination_file = Gio.File.new_for_path(
self._destination_path)
if self.initial_offset == 0:
self._output_stream = destination_file.create(
Gio.FileCreateFlags.PRIVATE, None)
else:
self._output_stream = destination_file.append_to()
else:
if hasattr(Gio.MemoryOutputStream, 'new_resizable'):
self._output_stream = \
Gio.MemoryOutputStream.new_resizable()
else:
self._output_stream = Gio.MemoryOutputStream()
self._output_stream.splice_async(
input_stream,
Gio.OutputStreamSpliceFlags.CLOSE_SOURCE |
Gio.OutputStreamSpliceFlags.CLOSE_TARGET,
GLib.PRIORITY_LOW, None, self.__splice_done_cb, None)
def __splice_done_cb(self, output_stream, res, user):
_logger.debug('__splice_done_cb')
self.ready.emit(self._destination_path or self._output_stream)
@GObject.Property
def output(self):
return self._destination_path or self._output_stream
class _BaseOutgoingTransfer(_BaseFileTransfer):
'''
This class provides the base of an outgoing file transfer.
You can override the `_get_input_stream` method to return any type of
Gio input stream. This will then be used to provide the file if
requested by the application. You also need to call `_create_channel`
with the length of the file in bytes during your `__init__`.
Args:
buddy (sugar3.presence.buddy.Buddy), who to send the transfer to
conn (telepathy.client.conn.Connection), telepathy connection to
use to send the transfer. Eg. `shared_activity.telepathy_conn`
filename (str), metadata sent to the receiver
description (str), metadata sent to the receiver
mime (str), metadata sent to the receiver
'''
def __init__(self, buddy, conn, filename, description, mime):
_BaseFileTransfer.__init__(self)
self.connect('notify::state', self.__notify_state_cb)
self._socket_address = None
self._socket = None
self._splicer = None
self._conn = conn
self._filename = filename
self._description = description
self._mime = mime
self.buddy = buddy
def _create_channel(self, file_size):
object_path, properties_ = self._conn.CreateChannel(dbus.Dictionary({
CHANNEL + '.ChannelType': CHANNEL_TYPE_FILE_TRANSFER,
CHANNEL + '.TargetHandleType': CONNECTION_HANDLE_TYPE_CONTACT,
CHANNEL + '.TargetHandle': self.buddy.contact_handle,
CHANNEL_TYPE_FILE_TRANSFER + '.Filename': self._filename,
CHANNEL_TYPE_FILE_TRANSFER + '.Description': self._description,
CHANNEL_TYPE_FILE_TRANSFER + '.Size': file_size,
CHANNEL_TYPE_FILE_TRANSFER + '.ContentType': self._mime,
CHANNEL_TYPE_FILE_TRANSFER + '.InitialOffset': 0}, signature='sv'))
channel = {}
proxy = dbus.Bus().get_object(self._conn.bus_name, object_path)
channel[PROPERTIES_IFACE] = dbus.Interface(proxy, PROPERTIES_IFACE)
channel[CHANNEL] = dbus.Interface(proxy, CHANNEL)
channel[CHANNEL_TYPE_FILE_TRANSFER] = dbus.Interface(
proxy, CHANNEL_TYPE_FILE_TRANSFER)
self.set_channel(channel)
channel_file_transfer = self.channel[CHANNEL_TYPE_FILE_TRANSFER]
self._socket_address = channel_file_transfer.ProvideFile(
SOCKET_ADDRESS_TYPE_UNIX, SOCKET_ACCESS_CONTROL_LOCALHOST, '',
byte_arrays=True)
def _get_input_stream(self):
raise NotImplementedError()
def __notify_state_cb(self, file_transfer, pspec):
if self.props.state == FT_STATE_OPEN:
# Need to hold a reference to the socket so that python doesn't
# closes the fd when it goes out of scope
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(self._socket_address)
output_stream = Gio.UnixOutputStream.new(
self._socket.fileno(), True)
input_stream = self._get_input_stream()
output_stream.splice_async(
input_stream,
Gio.OutputStreamSpliceFlags.CLOSE_SOURCE |
Gio.OutputStreamSpliceFlags.CLOSE_TARGET,
GLib.PRIORITY_LOW, None, None, None)
class OutgoingFileTransfer(_BaseOutgoingTransfer):
'''
An outgoing file transfer to send from a file (on the computer's file
system).
Note that the `path` argument is the path for the file that will be
sent, whereas the `filename` argument is only for metadata.
Args:
path (str), path of the file to send
'''
def __init__(self, buddy, conn, path, filename, description, mime):
_BaseOutgoingTransfer.__init__(
self, buddy, conn, filename, description, mime)
self._path = path
file_size = os.stat(path).st_size
self._create_channel(file_size)
def _get_input_stream(self):
return Gio.File.new_for_path(self._path).read(None)
class OutgoingBlobTransfer(_BaseOutgoingTransfer):
'''
An outgoing file transfer to send from a string in memory.
Args:
blob (str), data to send
'''
def __init__(self, buddy, conn, blob, filename, description, mime):
_BaseOutgoingTransfer.__init__(
self, buddy, conn, filename, description, mime)
self._blob = blob
self._create_channel(len(self._blob))
def _get_input_stream(self):
return Gio.MemoryInputStream.new_from_data(self._blob, None)
class _TextChannelWrapper(object):
'''Wrapper for a telepathy Text Channel'''
def __init__(self, text_chan, conn):
'''Connect to the text channel'''
self._activity_cb = None
self._activity_close_cb = None
self._text_chan = text_chan
self._conn = conn
self._signal_matches = []
m = self._text_chan[CHANNEL_INTERFACE].connect_to_signal(
'Closed', self._closed_cb)
self._signal_matches.append(m)
def post(self, msg):
if msg is not None:
_logger.debug('post')
self._send(json.dumps(msg))
def _send(self, text):
'''Send text over the Telepathy text channel.'''
_logger.debug('sending %s' % text)
if self._text_chan is not None:
self._text_chan[CHANNEL_TYPE_TEXT].Send(
CHANNEL_TEXT_MESSAGE_TYPE_NORMAL, text)
def close(self):
'''Close the text channel.'''
_logger.debug('Closing text channel')
try:
self._text_chan[CHANNEL_INTERFACE].Close()
except Exception:
_logger.debug('Channel disappeared!')
self._closed_cb()
def _closed_cb(self):
'''Clean up text channel.'''
for match in self._signal_matches:
match.remove()
self._signal_matches = []
self._text_chan = None
if self._activity_close_cb is not None:
self._activity_close_cb()
def set_received_callback(self, callback):
'''Connect the function callback to the signal.
callback -- callback function taking buddy and text args
'''
if self._text_chan is None:
return
self._activity_cb = callback
m = self._text_chan[CHANNEL_TYPE_TEXT].connect_to_signal(
'Received', self._received_cb)
self._signal_matches.append(m)
def handle_pending_messages(self):
'''Get pending messages and show them as received.'''
for identity, timestamp, sender, type_, flags, text in \
self._text_chan[
CHANNEL_TYPE_TEXT].ListPendingMessages(False):
self._received_cb(identity, timestamp, sender, type_, flags, text)
def _received_cb(self, identity, timestamp, sender, type_, flags, text):
'''Handle received text from the text channel.
Converts sender to a Buddy.
Calls self._activity_cb which is a callback to the activity.
'''
_logger.debug('received_cb %r %s' % (type_, text))
if type_ != 0:
# Exclude any auxiliary messages
return
msg = json.loads(text)
if self._activity_cb:
try:
self._text_chan[CHANNEL_INTERFACE_GROUP]
except Exception:
# One to one XMPP chat
nick = self._conn[
CONN_INTERFACE_ALIASING].RequestAliases([sender])[0]
buddy = {'nick': nick, 'color': '#000000,#808080'}
_logger.debug('exception: received from sender %r buddy %r' %
(sender, buddy))
else:
# XXX: cache these
buddy = self._get_buddy(sender)
_logger.debug('Else: received from sender %r buddy %r' %
(sender, buddy))
self._activity_cb(buddy, msg)
self._text_chan[
CHANNEL_TYPE_TEXT].AcknowledgePendingMessages([identity])
else:
_logger.debug('Throwing received message on the floor'
' since there is no callback connected. See'
' set_received_callback')
def set_closed_callback(self, callback):
'''Connect a callback for when the text channel is closed.
callback -- callback function taking no args
'''
_logger.debug('set closed callback')
self._activity_close_cb = callback
def _get_buddy(self, cs_handle):
'''Get a Buddy from a (possibly channel-specific) handle.'''
# XXX This will be made redundant once Presence Service
# provides buddy resolution
# Get the Presence Service
pservice = presenceservice.get_instance()
# Get the Telepathy Connection
tp_name, tp_path = pservice.get_preferred_connection()
obj = dbus.Bus().get_object(tp_name, tp_path)
conn = dbus.Interface(obj, CONN_INTERFACE)
group = self._text_chan[CHANNEL_INTERFACE_GROUP]
my_csh = group.GetSelfHandle()
if my_csh == cs_handle:
handle = conn.GetSelfHandle()
elif group.GetGroupFlags() & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
handle = group.GetHandleOwners([cs_handle])[0]
else:
handle = cs_handle
# XXX: deal with failure to get the handle owner
assert handle != 0
return pservice.get_buddy_by_telepathy_handle(
tp_name, tp_path, handle)
|
gpl-3.0
| 3,951,370,830,328,633,000
| 36.438279
| 79
| 0.616038
| false
| 4.068176
| false
| false
| false
|
amsimoes/bat-country
|
batcountry/batcountry.py
|
1
|
6348
|
# import the necessary packages
from __future__ import print_function
from google.protobuf import text_format
from cStringIO import StringIO
from PIL import Image
import scipy.ndimage as nd
import numpy as np
import caffe
import os
class BatCountry:
def __init__(self, base_path, deploy_path, model_path,
patch_model="./tmp.prototxt", mean=(104.0, 116.0, 122.0),
channels=(2, 1, 0)):
# if the deploy path is None, set the default
if deploy_path is None:
deploy_path = base_path + "/deploy.prototxt"
# if the model path is None, set it to the default GoogleLeNet model
if model_path is None:
model_path = base_path + "/imagenet.caffemodel"
# check to see if the model should be patched to compute gradients
if patch_model:
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(deploy_path).read(), model)
model.force_backward = True
f = open(patch_model, "w")
f.write(str(model))
f.close()
# load the network and store the patched model path
self.net = caffe.Classifier(patch_model, model_path, mean=np.float32(mean),
channel_swap=channels)
self.patch_model = patch_model
def dream(self, image, iter_n, octave_n, octave_scale=None,
end="inception_4c/output", clip=True, step_fn=None, objective_fn=None,
preprocess_fn=None, deprocess_fn=None, verbose=True, visualize=False,
**step_params):
if iter_n is None:
iter_n = 10
if octave_n is None:
octave_n = 4
if octave_scale is None:
octave_scale = 1.4
# if a step function has not been supplied, initialize it as the
# standard gradient ascent step
if step_fn is None:
step_fn = BatCountry.gradient_ascent_step
# if the objective function has not been supplied, initialize it
# as the L2 objective
if objective_fn is None:
objective_fn = BatCountry.L2_objective
# if the preprocess function has not been supplied, initialize it
if preprocess_fn is None:
preprocess_fn = BatCountry.preprocess
# if the deprocess function has not been supplied, initialize it
if deprocess_fn is None:
deprocess_fn = BatCountry.deprocess
# initialize the visualization list
visualizations = []
# prepare base image_dims for all octaves
octaves = [preprocess_fn(self.net, image)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale,
1.0 / octave_scale), order=1))
# allocate image for network-produced details
detail = np.zeros_like(octaves[-1])
src = self.net.blobs["data"]
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h/ h1, 1.0 * w / w1), order=1)
# resize the network's input image size
src.reshape(1, 3, h, w)
src.data[0] = octave_base + detail
for i in xrange(iter_n):
step_fn(self.net, end=end, clip=clip, objective_fn=objective_fn,
**step_params)
# visualization
vis = deprocess_fn(self.net, src.data[0])
# adjust image contrast if clipping is disabled
if not clip:
vis = vis * (255.0 / np.percentile(vis, 99.98))
if verbose:
print("octave={}, iter={}, layer={}, image_dim={}".format(octave,
i, end, vis.shape))
# check to see if the visualization list should be
# updated
if visualize:
k = "octave_{}-iter_{}-layer_{}".format(octave, i,
end.replace("/", "_"))
visualizations.append((k, vis))
# extract details produced on the current octave
detail = src.data[0] - octave_base
# grab the resulting image
r = deprocess_fn(self.net, src.data[0])
# check to see if the visualizations should be included
if visualize:
r = (r, visualizations)
return r
@staticmethod
def gradient_ascent_step(net, step_size=1.5, end="inception_4c/output",
jitter=32, clip=True, objective_fn=None, **objective_params):
# if the objective function is None, initialize it as
# the standard L2 objective
if objective_fn is None:
objective_fn = BatCountry.L2_objective
# input image is stored in Net's 'data' blob
src = net.blobs["data"]
dst = net.blobs[end]
# apply jitter shift
ox, oy = np.random.randint(-jitter, jitter + 1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2)
net.forward(end=end)
objective_fn(dst, **objective_params)
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size / np.abs(g).mean() * g
# unshift image
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2)
# unshift image
if clip:
bias = net.transformer.mean["data"]
src.data[:] = np.clip(src.data, -bias, 255 - bias)
def layers(self):
# return the layers of the network
return self.net._layer_names
def cleanup(self):
# remove the patched model from disk
os.remove(self.patch_model)
def prepare_guide(self, image, end="inception_4c/output", maxW=224, maxH=224,
preprocess_fn=None):
# if the preprocess function has not been supplied, initialize it
if preprocess_fn is None:
preprocess_fn = BatCountry.preprocess
# grab dimensions of input image
(w, h) = image.size
# GoogLeNet was trained on images with maximum width and heights
# of 224 pixels -- if either dimension is larger than 224 pixels,
# then we'll need to do some resizing
nW, nH = 244, 244
if w != 244 or h != 244:
image = np.float32(image.resize((nW, nH), Image.BILINEAR))
(src, dst) = (self.net.blobs["data"], self.net.blobs[end])
src.reshape(1, 3, nH, nW)
src.data[0] = preprocess_fn(self.net, image)
self.net.forward(end=end)
guide_features = dst.data[0].copy()
return guide_features
@staticmethod
def L2_objective(dst):
dst.diff[:] = dst.data
@staticmethod
def guided_objective(dst, objective_features):
x = dst.data[0].copy()
y = objective_features
ch = x.shape[0]
x = x.reshape(ch,-1)
y = y.reshape(ch,-1)
# compute the matrix of dot-products with guide features
A = x.T.dot(y)
# select ones that match best
dst.diff[0].reshape(ch, -1)[:] = y[:,A.argmax(1)]
@staticmethod
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean["data"]
@staticmethod
def deprocess(net, img):
return np.dstack((img + net.transformer.mean["data"])[::-1])
|
mit
| -3,119,327,493,951,980,500
| 28.525581
| 78
| 0.675961
| false
| 2.93617
| false
| false
| false
|
KonradBreitsprecher/espresso
|
doc/tutorials/09-swimmer_reactions/EXERCISES/reaction.py
|
1
|
9461
|
################################################################################
# #
# Copyright (C) 2010,2011,2012,2013,2014,2015,2016 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Catalytic Reactions: Enhanced Diffusion Tutorial #
# #
################################################################################
from __future__ import print_function
import numpy as np
import os
import sys
import time
from espressomd import assert_features
from espressomd.observables import ParticlePositions, ParticleBodyAngularMomentum
from espressomd.correlators import Correlator
from espressomd.reaction import Reaction
################################################################################
# Read in the active velocity from the command prompt
if len(sys.argv) != 2:
print("Usage:",sys.argv[0],"<passive/active = 0/1>")
exit()
active = int(sys.argv[1])
if (active != 0) and (active != 1):
print("Usage:",sys.argv[0],"<passive/active = 0/1>")
exit()
# Set the parameters
box_l = 10
radius = 3.0
csmall = 0.1
rate = 1000.0
# Print input parameters
print("Box length: {}".format(box_l))
print("Colloid radius: {}".format(radius))
print("Particle concentration: {}".format(csmall))
print("Reaction rate: {}".format(rate))
print("Active or Passive: {}".format(active))
# Create output directory
if active == 0:
outdir = "./passive-system"
else:
outdir = "./active-system"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
################################################################################
# Setup system parameters
equi_steps = 250
equi_length = 100
prod_steps = 2000
prod_length = 100
dt = 0.01
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 1.1*radius
# Set up the random seeds
system.seed = np.random.randint(0,2**31-1)
################################################################################
# Thermostat parameters
# Catalyzer is assumed to be larger, thus larger friction
frict_trans_colloid = 20.0
frict_rot_colloid = 20.0
# Particles are small and have smaller friction
frict_trans_part = 1.0
frict_rot_part = 1.0
# Temperature
temp = 1.0
################################################################################
# Set up the swimmer
## Exercise 1 ##
# Determine the initial position of the particle, which
# should be in the center of the box.
x0pnt = ...
y0pnt = ...
z0pnt = ...
# Note that the swimmer needs to rotate freely
cent = len(system.part)
system.part.add(id=cent,pos=[x0pnt,y0pnt,z0pnt],type=0,temp=temp,
gamma=frict_trans_colloid,
gamma_rot=frict_rot_colloid,
rotation=[1,1,1])
# Set up the particles
## Exercise 2 ##
# Above, we have set the concentration of the particles in the
# variable $csmall. The concentration of both species of particles is
# equal. Determine *how many* particles of one species there are.
# There are two species of equal concentration
nB = ...
nA = nB
print("Number of reactive A particles: {}".format(nB))
print("Number of reactive B particles: {}".format(nA))
for i in range(nA):
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# Prevent overlapping the colloid
while (x-x0pnt)**2 + (y-y0pnt)**2 + (z-z0pnt)**2 < 1.15*radius**2:
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# reactants and products do not need to rotate
system.part.add(pos=[x,y,z],type=1,temp=temp,
gamma=frict_trans_part,
gamma_rot=frict_rot_part,
rotation=[0,0,0])
for i in range(nB):
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# Prevent overlapping the colloid
while (x-x0pnt)**2 + (y-y0pnt)**2 + (z-z0pnt)**2 < 1.15*radius**2:
x = box_l*np.random.random()
y = box_l*np.random.random()
z = box_l*np.random.random()
# reactants and products do not need to rotate
system.part.add(pos=[x,y,z],type=2,temp=temp,
gamma=frict_trans_part,
gamma_rot=frict_rot_part,
rotation=[0,0,0])
print("box: {}, npart: {}".format(system.box_l,len(system.part)))
################################################################################
# Set up the WCA potential
## Exercise 3 ##
# Why are there two different cutoff lengths for the LJ interaction
# catalyzer/product and catalyzer/reactant?
eps = 5.0
sig = 1.0
shift = 0.25
roff = radius - 0.5*sig
# central and A particles
cut = 2**(1/6.)*sig
system.non_bonded_inter[0,1].lennard_jones.set_params(epsilon=eps, sigma=sig, cutoff=cut, shift=shift, offset=roff)
# central and B particles (larger cutoff)
cut = 1.5*sig
system.non_bonded_inter[0,2].lennard_jones.set_params(epsilon=eps, sigma=sig, cutoff=cut, shift=shift, offset=roff)
################################################################################
# Set up the reaction
cat_range = radius + 1.0*sig
cat_rate = rate
## Exercise 4 ##
# We have read the acticity parameter from the command line into
# $active, where 0 means off and 1 means on. When $active = 0 we can
# simply go on, but when $active = 1 we have to set up the reaction.
# Check the $active parameter and setup a reaction for the catalyzer
# of type 0 with the reactants of type 1 and products of type 2. The
# reaction range is stored in $cat_range, the reaction rate in
# $cat_rate. Use the number-conserving scheme by setting swap on.
...
################################################################################
# Perform warmup
cap = 1.0
warm_length = 100
## Exercise 5 ##
# Consult the User Guide for minimize_energy to find out the
# difference to warmup with explicit force-capping.
system.minimize_energy.init(f_max=cap,max_steps=warm_length,gamma=1.0/20.0,max_displacement=0.05)
system.minimize_energy.minimize()
################################################################################
# Enable the thermostat
## Exercise 6 ##
# Why do we enable the thermostat only after warmup?
system.thermostat.set_langevin(kT=temp, gamma=frict_trans_colloid)
################################################################################
# Perform equilibration
# Integrate
for k in range(equi_steps):
print("Equilibration: {} of {}".format(k,equi_steps))
system.integrator.run(equi_length)
################################################################################
for cnt in range(5):
# Set up the MSD calculation
tmax = prod_steps*prod_length*dt
pos_id = ParticlePositions(ids=[cent])
msd = Correlator(obs1=pos_id,
corr_operation="square_distance_componentwise",
dt=dt,
tau_max=tmax,
tau_lin=16)
system.auto_update_correlators.add(msd)
## Exercise 7a ##
# Construct the auto-correlators for the AVACF, using the example
# of the MSD.
# Initialize the angular velocity auto-correlation function
# (AVACF) correlator
...
# Perform production
# Integrate
for k in range(prod_steps):
print("Production {} of 5: {} of {}".format(cnt+1,k,prod_steps))
system.integrator.run(prod_length)
# Finalize the MSD and export
system.auto_update_correlators.remove(msd)
msd.finalize()
np.savetxt("{}/msd_{}.dat".format(outdir,cnt),msd.result())
## Exercise 7b ##
# Finalize the angular velocity auto-correlation function (AVACF)
# correlator and write the result to a file.
...
np.savetxt("{}/avacf_{}.dat".format(outdir,cnt),avacf.result())
|
gpl-3.0
| -1,595,223,305,182,569,700
| 31.071186
| 115
| 0.530599
| false
| 3.808776
| false
| false
| false
|
heilaaks/snippy
|
tests/test_api_search_snippet.py
|
1
|
58285
|
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# snippy - software development and maintenance notes manager.
# Copyright 2017-2020 Heikki J. Laaksonen <laaksonen.heikki.j@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""test_api_search_snippet: Test GET /snippets API endpoint."""
from falcon import testing
import falcon
import pytest
from tests.lib.content import Content
from tests.lib.content import Storage
from tests.lib.snippet import Snippet
pytest.importorskip('gunicorn')
# pylint: disable=unsubscriptable-object
class TestApiSearchSnippet(object): # pylint: disable=too-many-public-methods, too-many-lines
"""Test GET /snippets API."""
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_001(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to two snippets and both of them are returned. The
search is sorted based on one field. The limit defined in the search
query is not exceeded.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1523'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/vnd.api+json'},
query_string='sall=docker%2Cswarm&limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_002(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to four snippets but limit defined in search query
results only two of them sorted by the brief field. The sorting must
be applied before limit is applied.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1658'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_003(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to two snippets but only one of them is returned
because the limit parameter was set to one. In this case the sort is
descending and the last match must be returned. The resulting fields
are limited only to brief and category.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '218'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': {field: Storage.forced[field] for field in ['brief', 'category']}
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker&limit=1&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_004(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields but return
only two fields. This syntax that separates the sorted fields causes
the parameter to be processed in string context which must handle
multiple fields.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '218'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': {field: Storage.forced[field] for field in ['brief', 'category']}
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker&limit=1&sort=-brief&fields=brief%2Ccategory')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_005(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields. The search
query matches to four snippets but limit defined in search query
results only two of them sorted by the utc field in descending order.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1626'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=-created,-brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_006(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all fields sorted with
two fields. This syntax that separates the sorted fields causes the
parameter to be processed in string context which must handle multiple
fields.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1626'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=-created%2C-brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_007(server):
"""Search snippets with GET.
Try to send GET /snippets with sort parameter set to field name
that does not exist. In this case sorting must fall to default
sorting.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '385'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'sort option validation failed for non existent field=notexisting'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cswarm&limit=20&sort=notexisting')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_008(server):
"""Search snippets with GET.
Send GET /snippets to return only defined fields. In this case the
fields are defined by setting the 'fields' parameter multiple times.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '218'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': {field: Storage.forced[field] for field in ['brief', 'category']}
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker&limit=1&sort=-brief&fields=brief&fields=category')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_009(server):
"""Search snippets with GET.
Try to send GET /snippets with search keywords that do not result
any matches.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=notfound&limit=10&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_010(server):
"""Search snippets with GET from tag fields.
Try to send GET /snippets with search tag keywords that do not
result any matches.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='stag=notfound&limit=10&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_011(server):
"""Search snippet from groups fields.
Try to send GET /snippets with search groups keywords that do not
result any matches.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sgrp=notfound&limit=10&sort=-brief&fields=brief,category')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_012(server):
"""Search snippet with digets.
Send GET /snippets/{id} to read a snippet based on digest. In this
case the snippet is found. In this case the URI path contains 15 digit
digest. The returned self link must be the 16 digit link.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '871'
}
expect_body = {
'meta': {
'count': 1,
'limit': 20,
'offset': 0,
'total': 1
},
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/' + Snippet.REMOVE_UUID
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52a02b6',
headers={'accept': 'application/json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_013(server):
"""Search snippet with digets.
Try to send GET /snippets/{id} with a digest that is not found.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '395'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'content identity: 101010101010101 was not unique and matched to: 0 resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/101010101010101',
headers={'accept': 'application/json'})
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_014(server):
"""Search snippet without search parameters.
Send GET /snippets without defining search parameters. In this
case all content should be returned.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1523'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_015(server):
"""Search snippet without search parameters.
Send GET /snippets without defining search parameters. In this
case only one snippet must be returned because the limit is set to
one. Also the sorting based on brief field causes the last snippet
to be returned.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '830'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='limit=1&sort=-brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.parametrize('server', [['server', '--server-host', 'localhost:8080', '-q']], indirect=True)
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_016(server):
"""Search snippets with GET.
Send GET /snippets and search keywords from all attributes. The
search query matches to two snippets and both of them are returned.
The response JSON is sent as pretty printed.
TODO: The groups refactoring changed the lenght from 2196 to 2278.
Why so much? Is there a problem in the result JSON?
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2709'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/vnd.api+json'},
query_string='sall=docker%2Cswarm&limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_001(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
zero and limit is bigger that the amount of search results so that
all results fit into one response. Because all results fit into the
same response, there is no need for next and prev links and those
must not be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '3425'
}
expect_body = {
'meta': {
'count': 4,
'limit': 10,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}, {
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=10&offset=0&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=10&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=10&offset=0&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=0&limit=10&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_002(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
zero and limit is smaller that the amount of search results so that
all results do not fit into one response. Because this is the first
page, the prev link must not be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2110'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=0&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_003(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and second page is requested. The requested second page is
the last page. Because of this, there next link must not be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1942'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 2,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}, {
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=2&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_004(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and second page is requested. The requested second page is
not the last page. In this case the last page has as many hits that
will fit into one page (even last page). All pagination links must
be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1528'
}
expect_body = {
'meta': {
'count': 1,
'limit': 1,
'offset': 1,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=1&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=0&sall=docker%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=2&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=1&offset=3&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=1&limit=1&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_005(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and second page is requested. The requested second page is
not the last page. In this case the last page has less items than
will fit to last page (uneven last page). Also the first page is
not even and must be correctly set to zero. All pagination links must
be set.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2289'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 1,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=1&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=1&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_006(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset is
non zero and the last page is requested. Because original request
was not started with offset zero, the first and prev pages are not
having offset based on limit. In here the offset is also exactly
the same as total amount of hits.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1181'
}
expect_body = {
'meta': {
'count': 1,
'limit': 2,
'offset': 3,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.NETCAT_UUID,
'attributes': Storage.netcat
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cnmap&sort=brief',
'prev': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=1&sall=docker%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=3&sall=docker%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=3&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'import-umount')
def test_api_search_snippet_paginate_007(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied. The offset and
limit are set so that the last page contains less hits than the limit
and the requested page is not the last or the second last page.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '2146'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 5
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}],
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cumount%2Cnmap&sort=brief',
'next': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=2&sall=docker%2Cumount%2Cnmap&sort=brief',
'first': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=0&sall=docker%2Cumount%2Cnmap&sort=brief',
'last': 'http://falconframework.org/api/snippy/rest/snippets?limit=2&offset=4&sall=docker%2Cumount%2Cnmap&sort=brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cumount%2Cnmap&offset=0&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_008(server):
"""Search snippets with GET.
Try to send GET /snippets with pagination offset that is the same
as the amount of snippets stored into the database.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=4&limit=2&sort=brief')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_009(server):
"""Search snippets with GET.
Try to send GET /snippets with pagination offset that is one bigger
than the maximum amount of hits.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '340'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'cannot find resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=10&limit=10&sort=brief')
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited')
def test_api_search_snippet_paginate_010(server):
"""Search snippets with GET.
Send GET /snippets so that pagination is applied with limit zero.
This is a special case that returns the metadata but the data list
is empty.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '71'
}
expect_body = {
'meta': {
'count': 0,
'limit': 0,
'offset': 0,
'total': 4
},
'data': [],
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=0&limit=0&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_011(server):
"""Search snippets with GET.
Try to send GET /snippets with negative offset.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '364'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search offset is not a positive integer: -4'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=-4&limit=2&sort=brief')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_012(server):
"""Search snippets with GET.
Try to send GET /snippets with negative offset and limit.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '520'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search result limit is not a positive integer: -2'
}, {
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search offset is not a positive integer: -4'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=-4&limit=-2&sort=brief')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'import-netcat', 'import-exited', 'caller')
def test_api_search_snippet_paginate_013(server):
"""Search snippets with GET.
Try to send GET /snippets when offset and limit are not numbers.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '533'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search result limit is not a positive integer: 0xdeadbeef'
}, {
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'search offset is not a positive integer: ABCDEFG'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&offset=ABCDEFG&limit=0xdeadbeef&sort=brief')
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_001(server):
"""Get specific snippet field.
Send GET /snippets/{id}/data for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '267'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'data': Storage.remove['data']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/data'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52a02b63/data',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_002(server):
"""Get specific snippet field.
Send GET /snippets/{id}/brief for existing snippet. In this case
the URI digest is only 10 octets. The returned link must contain 16
octet digest in the link.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '262'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'brief': Storage.remove['brief']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/brief',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_003(server):
"""Get specific snippet field.
Send GET /snippets/{id}/groups for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '231'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'groups': Storage.remove['groups']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/groups'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/groups',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_004(server):
"""Get specific snippet field.
Send GET /snippets/{id}/tags for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '272'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'tags': Storage.remove['tags']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/tags'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/tags',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_005(server):
"""Get specific snippet field.
Send GET /snippets/{id}/links for existing snippet.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '279'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': {
'links': Storage.remove['links']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/11cd5827-b6ef-4067-b5ac-3ceac07dde9f/links'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/links',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_field_006(server):
"""Get specific snippet field.
Try to send GET /snippets/{id}/notexist for existing snippet. In
this case the field name does not exist.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '360'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '400',
'statusString': '400 Bad Request',
'module': 'snippy.testing.testing:123',
'title': 'resource field does not exist: notexist'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/54e41e9b52/notexist',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_400
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_field_007(server):
"""Get specific snippet field.
Try to send GET /snippets/0101010101/brief for non existing
snippet with valid field.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '390'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'content identity: 0101010101 was not unique and matched to: 0 resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/0101010101/brief',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_api_search_snippet_field_008(server):
"""Get specific snippet field.
Send GET /snippets/{id}/brief for existing snippet. In this case
the URI id is full length UUID that must be found.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '251'
}
expect_body = {
'data': {
'type': 'snippet',
'id': Storage.forced['uuid'],
'attributes': {
'brief': Storage.forced['brief']
}
},
'links': {
'self': 'http://falconframework.org/api/snippy/rest/snippets/12cd5827-b6ef-4067-b5ac-3ceac07dde9f/brief'
}
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/12cd5827-b6ef-4067-b5ac-3ceac07dde9f/brief',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets', 'caller')
def test_api_search_snippet_field_009(server):
"""Get specific snippet field.
Try to send GET /snippets/{id} for existing snippet with short form
from UUID. The short form must not be accepted and no results must be
returned. The UUID is intended to be used as fully matching identity.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '416'
}
expect_body = {
'meta': Content.get_api_meta(),
'errors': [{
'status': '404',
'statusString': '404 Not Found',
'module': 'snippy.testing.testing:123',
'title': 'content identity: 116cd5827-b6ef-4067-b5ac-3ceac07dde9 was not unique and matched to: 0 resources'
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets/116cd5827-b6ef-4067-b5ac-3ceac07dde9',
headers={'accept': 'application/vnd.api+json'})
assert result.status == falcon.HTTP_404
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('default-snippets')
def test_pytest_fixtures(server):
"""Test pytest fixtures with pytest specific mocking.
Send GET /snippets and search keywords from all fields. The search
query matches to two snippets and both of them are returned. The
search is sorted based on one field. The limit defined in the search
query is not exceeded.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1523'
}
expect_body = {
'meta': {
'count': 2,
'limit': 20,
'offset': 0,
'total': 2
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.FORCED_UUID,
'attributes': Storage.forced
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cswarm&limit=20&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@staticmethod
@pytest.mark.usefixtures('import-remove', 'import-forced', 'import-exited', 'import-netcat')
def test_pytest_fixtures2(server):
"""Test pytest fixtures with pytest specific mocking.
Send GET /snippets and search keywords from all fields. The search
query matches to four snippets but limit defined in search query
results only two of them sorted by the brief field. The sorting must
be applied before limit is applied.
"""
expect_headers = {
'content-type': 'application/vnd.api+json; charset=UTF-8',
'content-length': '1658'
}
expect_body = {
'meta': {
'count': 2,
'limit': 2,
'offset': 0,
'total': 4
},
'data': [{
'type': 'snippet',
'id': Snippet.REMOVE_UUID,
'attributes': Storage.remove
}, {
'type': 'snippet',
'id': Snippet.EXITED_UUID,
'attributes': Storage.exited
}]
}
result = testing.TestClient(server.server.api).simulate_get(
path='/api/snippy/rest/snippets',
headers={'accept': 'application/json'},
query_string='sall=docker%2Cnmap&limit=2&sort=brief')
assert result.status == falcon.HTTP_200
assert result.headers == expect_headers
Content.assert_restapi(result.json, expect_body)
@classmethod
def teardown_class(cls):
"""Teardown tests."""
Content.delete()
|
agpl-3.0
| -5,150,466,451,538,383,000
| 38.30209
| 135
| 0.556455
| false
| 4.027154
| true
| false
| false
|
DawidvanGraan/HomeAutomation
|
Raspberry/api.py
|
1
|
3437
|
#!/bin/sh
from flask import Flask, jsonify
import smbus
import time
import RPi.GPIO as io
import requests
# Plex Call
plexUrl = 'http://192.168.1.100/jsonrpc?request={"jsonrpc": "2.0", "method": "Player.GetItem", "params": { "properties": ["title", "album", "duration", "showtitle"], "playerid": 1 }, "id": "VideoGetItem"}';
I2C_ADDRESS = 0x4a
app = Flask(__name__)
gpioBigGate = 18 # Big Gate
gpioSmallGate = 23 # Small Gate
gpioGarageRight = 24 # Garage Right
gpioGarageLeft = 25 # Garage Left
mag_switch1 = 22 # Garage Door Right
mag_switch2 = 17 # Garage Door Left
# I2C BUS
bus = smbus.SMBus(0)
# GPIO
io.setmode(io.BCM)
io.setup(mag_switch1, io.IN, pull_up_down=io.PUD_UP)
io.setup(mag_switch2, io.IN, pull_up_down=io.PUD_UP)
io.setup(gpioBigGate, io.OUT)
io.setup(gpioSmallGate, io.OUT)
io.setup(gpioGarageRight, io.OUT)
io.setup(gpioGarageLeft, io.OUT)
@app.route('/api/v1/hello', methods=['GET'])
def get_hello():
return jsonify({
"status": 200,
"message": "Hello API. I'm Alive and waiting for your Commands!"
})
@app.route('/api/v1/plex', methods=['GET'])
def plex():
r = requests.get(plexUrl)
if r.status_code != 200:
return jsonify({
"status": 500,
"message": "Oops, could not make call to Plex!"
})
return jsonify(r.content)
@app.route('/api/v1/biggate', methods=['GET'])
def get_biggate():
io.output(gpioBigGate, io.HIGH)
time.sleep(2)
io.output(gpioBigGate, io.LOW)
return jsonify({
"status": 200,
"message": "Big Gate Busy..."
})
@app.route('/api/v1/smallgate', methods=['GET'])
def get_smallgate():
io.output(gpioSmallGate, io.HIGH)
time.sleep(2)
io.output(gpioSmallGate, io.LOW)
return jsonify({
"status": 200,
"message": "Small Gate Busy..."
})
@app.route('/api/v1/garageright', methods=['GET'])
def get_garage_right():
io.output(gpioGarageRight, io.HIGH)
time.sleep(2)
io.output(gpioGarageRight, io.LOW)
rightSensor = io.input(mag_switch1)
return jsonify({
"status": 200,
"message": "Garage Door Right",
"garageRight": rightSensor
})
@app.route('/api/v1/garageleft', methods=['GET'])
def get_garage_left():
io.output(gpioGarageLeft, io.HIGH)
time.sleep(2)
io.output(gpioGarageLeft, io.LOW)
leftSensor = io.input(mag_switch2)
return jsonify({
"status": 200,
"message": "Garage Door Left",
"garageLeft": leftSensor
})
@app.route('/api/v1/garagedoors', methods=['GET'])
def get_garage_doors():
rightSensor = io.input(mag_switch1)
leftSensor = io.input(mag_switch2)
return jsonify({
"status": 200,
"message": "States of the Garage Doors",
"garageRight": rightSensor,
"garageLeft": leftSensor
})
@app.route('/api/v1/temp1', methods=['GET'])
def temp1():
values = bus.read_i2c_block_data(I2C_ADDRESS, 0x00, 2)
tempMSB = values[0]
tempLSB = values[1]
temp = (((tempMSB << 8) | tempLSB) >> 7) * 0.5
if temp > 125:
temp = (((((tempMSB << 8) | tempLSB) >> 7) * 0.5) - 256)
return jsonify({
"status": 200,
"message": "Temperature 1 Sensor Value",
"temp": temp
})
@app.route('/')
def index():
return "Hello, Home Remote!!"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
mit
| -7,297,475,318,174,467,000
| 22.380952
| 206
| 0.603142
| false
| 2.925106
| false
| false
| false
|
hirofumi0810/asr_preprocessing
|
swbd/main.py
|
1
|
15071
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make dataset for the End-to-End model (Switchboard corpus).
Note that feature extraction depends on transcripts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import sys
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
from collections import Counter
import pickle
sys.path.append('../')
from swbd.path import Path
from swbd.input_data import read_audio
from swbd.labels.ldc97s62.character import read_trans
from swbd.labels.fisher.character import read_trans as read_trans_fisher
from swbd.labels.eval2000.stm import read_stm
from utils.util import mkdir_join
from utils.inputs.wav_split import split_wav
from utils.dataset import add_element
parser = argparse.ArgumentParser()
parser.add_argument('--swbd_audio_path', type=str,
help='path to LDC97S62 audio files')
parser.add_argument('--swbd_trans_path', type=str,
help='path to LDC97S62 transciption files')
parser.add_argument('--fisher_path', type=str, help='path to Fisher dataset')
parser.add_argument('--eval2000_audio_path', type=str,
help='path to audio files of eval2000 dataset')
parser.add_argument('--eval2000_trans_path', type=str,
help='path to transcript files of eval2000 dataset')
parser.add_argument('--dataset_save_path', type=str,
help='path to save dataset')
parser.add_argument('--feature_save_path', type=str,
help='path to save input features')
parser.add_argument('--run_root_path', type=str,
help='path to run this script')
parser.add_argument('--tool', type=str,
choices=['htk', 'python_speech_features', 'librosa'])
parser.add_argument('--wav_save_path', type=str, help='path to wav files.')
parser.add_argument('--htk_save_path', type=str, help='path to htk files.')
parser.add_argument('--normalize', type=str,
choices=['global', 'speaker', 'utterance', 'no'])
parser.add_argument('--save_format', type=str, choices=['numpy', 'htk', 'wav'])
parser.add_argument('--feature_type', type=str, choices=['fbank', 'mfcc'])
parser.add_argument('--channels', type=int,
help='the number of frequency channels')
parser.add_argument('--window', type=float,
help='window width to extract features')
parser.add_argument('--slide', type=float, help='extract features per slide')
parser.add_argument('--energy', type=int, help='if 1, add the energy feature')
parser.add_argument('--delta', type=int, help='if 1, add the energy feature')
parser.add_argument('--deltadelta', type=int,
help='if 1, double delta features are also extracted')
parser.add_argument('--fisher', type=int,
help='If True, create large-size dataset (2000h).')
args = parser.parse_args()
path = Path(swbd_audio_path=args.swbd_audio_path,
swbd_trans_path=args.swbd_trans_path,
fisher_path=args.fisher_path,
eval2000_audio_path=args.eval2000_audio_path,
eval2000_trans_path=args.eval2000_trans_path,
wav_save_path=args.wav_save_path,
htk_save_path=args.htk_save_path,
run_root_path='./')
CONFIG = {
'feature_type': args.feature_type,
'channels': args.channels,
'sampling_rate': 8000,
'window': args.window,
'slide': args.slide,
'energy': bool(args.energy),
'delta': bool(args.delta),
'deltadelta': bool(args.deltadelta)
}
if args.save_format == 'htk':
assert args.tool == 'htk'
def main(data_size):
print('=' * 50)
print(' data_size: %s' % data_size)
print('=' * 50)
########################################
# labels
########################################
print('=> Processing transcripts...')
speaker_dict_dict = {} # dict of speaker_dict
print('---------- train ----------')
if data_size == '300h':
speaker_dict_dict['train'] = read_trans(
label_paths=path.trans(corpus='swbd'),
word_boundary_paths=path.word(corpus='swbd'),
run_root_path='./',
vocab_file_save_path=mkdir_join('./config/vocab_files'),
save_vocab_file=True)
elif data_size == '2000h':
speaker_dict_a, char_set_a, char_capital_set_a, word_count_dict_a = read_trans_fisher(
label_paths=path.trans(corpus='fisher'),
target_speaker='A')
speaker_dict_b, char_set_b, char_capital_set_b, word_count_dict_b = read_trans_fisher(
label_paths=path.trans(corpus='fisher'),
target_speaker='B')
# Meage 2 dictionaries
speaker_dict = merge_dicts([speaker_dict_a, speaker_dict_b])
char_set = char_set_a | char_set_b
char_capital_set = char_capital_set_a | char_capital_set_b
word_count_dict_fisher = dict(
Counter(word_count_dict_a) + Counter(word_count_dict_b))
speaker_dict_dict['train'] = read_trans(
label_paths=path.trans(corpus='swbd'),
word_boundary_paths=path.word(corpus='swbd'),
run_root_path='./',
vocab_file_save_path=mkdir_join('./config/vocab_files'),
save_vocab_file=True,
speaker_dict_fisher=speaker_dict,
char_set=char_set,
char_capital_set=char_capital_set,
word_count_dict=word_count_dict_fisher)
del speaker_dict
print('---------- eval2000 (swbd + ch) ----------')
speaker_dict_dict['eval2000_swbd'], speaker_dict_dict['eval2000_ch'] = read_stm(
stm_path=path.stm_path,
pem_path=path.pem_path,
glm_path=path.glm_path,
run_root_path='./')
########################################
# inputs
########################################
print('\n=> Processing input data...')
input_save_path = mkdir_join(
args.feature_save_path, args.save_format, data_size)
for data_type in ['train', 'eval2000_swbd', 'eval2000_ch']:
print('---------- %s ----------' % data_type)
if isfile(join(input_save_path, data_type, 'complete.txt')):
print('Already exists.')
else:
if args.save_format == 'wav':
########################################
# Split WAV files per utterance
########################################
if data_type == 'train':
wav_paths = path.wav(corpus='swbd')
if data_size == '2000h':
wav_paths += path.wav(corpus='fisher')
else:
wav_paths = path.wav(corpus=data_type)
split_wav(wav_paths=wav_paths,
speaker_dict=speaker_dict_dict[data_type],
save_path=mkdir_join(input_save_path, data_type))
# NOTE: ex.) save_path:
# swbd/feature/save_format/data_size/data_type/speaker/utt_name.npy
elif args.save_format in ['numpy', 'htk']:
if data_type == 'train':
if args.tool == 'htk':
audio_paths = path.htk(corpus='swbd')
if data_size == '2000h':
audio_paths += path.htk(corpus='fisher')
else:
audio_paths = path.wav(corpus='swbd')
if data_size == '2000h':
audio_paths += path.wav(corpus='fisher')
is_training = True
global_mean, global_std = None, None
else:
if args.tool == 'htk':
audio_paths = path.htk(corpus=data_type)
else:
audio_paths = path.wav(corpus=data_type)
is_training = False
# Load statistics over train dataset
global_mean = np.load(
join(input_save_path, 'train/global_mean.npy'))
global_std = np.load(
join(input_save_path, 'train/global_std.npy'))
read_audio(audio_paths=audio_paths,
tool=args.tool,
config=CONFIG,
normalize=args.normalize,
speaker_dict=speaker_dict_dict[data_type],
is_training=is_training,
save_path=mkdir_join(input_save_path, data_type),
save_format=args.save_format,
global_mean=global_mean,
global_std=global_std)
# NOTE: ex.) save_path:
# swbd/feature/save_format/data_size/data_type/speaker/*.npy
# Make a confirmation file to prove that dataset was saved
# correctly
with open(join(input_save_path, data_type, 'complete.txt'), 'w') as f:
f.write('')
########################################
# dataset (csv)
########################################
print('\n=> Saving dataset files...')
dataset_save_path = mkdir_join(
args.dataset_save_path, args.save_format, data_size, data_type)
print('---------- %s ----------' % data_type)
df_columns = ['frame_num', 'input_path', 'transcript']
df_char = pd.DataFrame([], columns=df_columns)
df_char_capital = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
with open(join(input_save_path, data_type, 'frame_num.pickle'), 'rb') as f:
frame_num_dict = pickle.load(f)
utt_count = 0
df_char_list, df_char_capital_list = [], []
df_word_freq1_list, df_word_freq5_list = [], []
df_word_freq10_list, df_word_freq15_list = [], []
speaker_dict = speaker_dict_dict[data_type]
for speaker, utt_dict in tqdm(speaker_dict.items()):
for utt_index, utt_info in utt_dict.items():
if args.save_format == 'numpy':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.npy')
elif args.save_format == 'htk':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.htk')
elif args.save_format == 'wav':
input_utt_save_path = path.utt2wav(utt_index)
else:
raise ValueError('save_format is numpy or htk or wav.')
assert isfile(input_utt_save_path)
frame_num = frame_num_dict[speaker + '_' + utt_index]
char_indices, char_indices_capital, word_freq1_indices = utt_info[2:5]
word_freq5_indices, word_freq10_indices, word_freq15_indices = utt_info[5:8]
df_char = add_element(
df_char, [frame_num, input_utt_save_path, char_indices])
df_char_capital = add_element(
df_char_capital, [frame_num, input_utt_save_path, char_indices_capital])
df_word_freq1 = add_element(
df_word_freq1, [frame_num, input_utt_save_path, word_freq1_indices])
df_word_freq5 = add_element(
df_word_freq5, [frame_num, input_utt_save_path, word_freq5_indices])
df_word_freq10 = add_element(
df_word_freq10, [frame_num, input_utt_save_path, word_freq10_indices])
df_word_freq15 = add_element(
df_word_freq15, [frame_num, input_utt_save_path, word_freq15_indices])
utt_count += 1
# Reset
if utt_count == 10000:
df_char_list.append(df_char)
df_char_capital_list.append(df_char_capital)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
df_char = pd.DataFrame([], columns=df_columns)
df_char_capital = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
utt_count = 0
# Last dataframe
df_char_list.append(df_char)
df_char_capital_list.append(df_char_capital)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
# Concatenate all dataframes
df_char = df_char_list[0]
df_char_capital = df_char_capital_list[0]
df_word_freq1 = df_word_freq1_list[0]
df_word_freq5 = df_word_freq5_list[0]
df_word_freq10 = df_word_freq10_list[0]
df_word_freq15 = df_word_freq15_list[0]
for df_i in df_char_list[1:]:
df_char = pd.concat([df_char, df_i], axis=0)
for df_i in df_char_list[1:]:
df_char_capital = pd.concat([df_char_capital, df_i], axis=0)
for df_i in df_word_freq1_list[1:]:
df_word_freq1 = pd.concat([df_word_freq1, df_i], axis=0)
for df_i in df_word_freq5_list[1:]:
df_word_freq5 = pd.concat([df_word_freq5, df_i], axis=0)
for df_i in df_word_freq10_list[1:]:
df_word_freq10 = pd.concat([df_word_freq10, df_i], axis=0)
for df_i in df_word_freq15_list[1:]:
df_word_freq15 = pd.concat([df_word_freq15, df_i], axis=0)
df_char.to_csv(join(dataset_save_path, 'character.csv'))
df_char_capital.to_csv(
join(dataset_save_path, 'character_capital_divide.csv'))
df_word_freq1.to_csv(join(dataset_save_path, 'word_freq1.csv'))
df_word_freq5.to_csv(join(dataset_save_path, 'word_freq5.csv'))
df_word_freq10.to_csv(join(dataset_save_path, 'word_freq10.csv'))
df_word_freq15.to_csv(join(dataset_save_path, 'word_freq15.csv'))
def merge_dicts(dicts):
return {k: v for dic in dicts for k, v in dic.items()}
if __name__ == '__main__':
data_sizes = ['2000h']
# data_sizes = ['300h']
# if bool(args.fisher):
# data_sizes += ['2000h']
for data_size in data_sizes:
main(data_size)
|
mit
| -8,193,133,252,302,720,000
| 43.196481
| 96
| 0.544224
| false
| 3.602055
| false
| false
| false
|
ar0551/Wasp
|
devFiles/data/waspCatalogFix.py
|
1
|
54170
|
# Wasp: Discrete Design with Grasshopper plug-in (GPL) initiated by Andrea Rossi
#
# This file is part of Wasp.
#
# Copyright (c) 2017, Andrea Rossi <a.rossi.andrea@gmail.com>
# Wasp is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Wasp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Wasp; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0 <https://www.gnu.org/licenses/gpl.html>
#
# Significant parts of Wasp have been developed by Andrea Rossi
# as part of research on digital materials and discrete design at:
# DDU Digital Design Unit - Prof. Oliver Tessmann
# Technische Universitat Darmstadt
#########################################################################
## IMPORTS ##
#########################################################################
import random
import math
import bisect
from Rhino.RhinoDoc import ActiveDoc
import Rhino.Geometry as rg
#########################################################################
## GLOBAL VARIABLES ##
#########################################################################
global_tolerance = ActiveDoc.ModelAbsoluteTolerance*2
#########################################################################
## CLASSES ##
#########################################################################
#################################################################### Connection ####################################################################
class Connection(object):
## constructor
def __init__(self, _plane, _type, _part, _id):
self.pln = _plane
flip_pln_Y = rg.Vector3d(self.pln.YAxis)
flip_pln_Y.Reverse()
self.flip_pln = rg.Plane(self.pln.Origin, self.pln.XAxis, flip_pln_Y)
self.type = _type
self.part = _part
self.id = _id
self.rules_table = []
self.active_rules = []
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspConnection [id: %s, type: %s]" % (self.id, self.type)
## return a transformed copy of the connection
def transform(self, trans):
pln_trans = rg.Plane(self.pln.Origin, self.pln.XAxis, self.pln.YAxis)
conn_trans = Connection(pln_trans, self.type, self.part, self.id)
conn_trans.pln.Transform(trans)
conn_trans.flip_pln.Transform(trans)
return conn_trans
## return a copy of the connection
def copy(self):
pln_copy = rg.Plane(self.pln.Origin, self.pln.XAxis, self.pln.YAxis)
conn_copy = Connection(pln_copy, self.type, self.part, self.id)
return conn_copy
## generate the rules-table for the connection
def generate_rules_table(self, rules):
count = 0
self.rules_table = []
self.active_rules = []
for rule in rules:
if rule.part1 == self.part and rule.conn1 == self.id:
self.rules_table.append(rule)
self.active_rules.append(count)
count += 1
#################################################################### Base Part ####################################################################
class Part(object):
## constructor
def __init__(self, name, geometry, connections, collider, attributes, dim=None, id=None, field=None):
self.name = name
self.id = id
self.geo = geometry
self.field = field
self.connections = []
self.active_connections = []
count = 0
for conn in connections:
conn.part = self.name
conn.id = count
self.connections.append(conn)
self.active_connections.append(count)
count += 1
self.transformation = rg.Transform.Identity
self.center = self.geo.GetBoundingBox(False).Center
self.collider = collider
##part size
if dim is not None:
self.dim = dim
else:
max_collider_dist = None
for coll_geo in self.collider.geometry:
for v in coll_geo.Vertices:
dist = self.center.DistanceTo(v)
if dist > max_collider_dist or max_collider_dist is None:
max_collider_dist = dist
self.dim = max_collider_dist
self.parent = None
self.children = []
self.attributes = []
if len(attributes) > 0:
self.attributes = attributes
self.is_constrained = False
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspPart [name: %s, id: %s]" % (self.name, self.id)
## reset the part and connections according to new provided aggregation rules
def reset_part(self, rules):
count = 0
self.active_connections = []
for conn in self.connections:
conn.generate_rules_table(rules)
self.active_connections.append(count)
count += 1
## return a dictionary containing all part data
def return_part_data(self):
data_dict = {}
data_dict['name'] = self.name
data_dict['id'] = self.id
data_dict['geo'] = self.geo
data_dict['connections'] = self.connections
data_dict['transform'] = self.transformation
data_dict['collider'] = self.collider
data_dict['center'] = self.center
data_dict['parent'] = self.parent
data_dict['children'] = self.children
data_dict['attributes'] = self.attributes
return data_dict
## return a transformed copy of the part
def transform(self, trans, transform_sub_parts=False):
geo_trans = self.geo.Duplicate()
geo_trans.Transform(trans)
collider_trans = self.collider.transform(trans)
connections_trans = []
for conn in self.connections:
connections_trans.append(conn.transform(trans))
attributes_trans = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_trans.append(attr.transform(trans))
part_trans = Part(self.name, geo_trans, connections_trans, collider_trans, attributes_trans, dim=self.dim, id=self.id, field=self.field)
part_trans.transformation = trans
return part_trans
## return a copy of the part
def copy(self):
geo_copy = self.geo.Duplicate()
collider_copy = self.collider.copy()
connections_copy = []
for conn in self.connections:
connections_copy.append(conn.copy())
attributes_copy = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_copy.append(attr.copy())
part_copy = Part(self.name, geo_copy, connections_copy, collider_copy, attributes_copy, dim=self.dim, id=self.id, field=self.field)
part_copy.transformation = self.transformation
return part_copy
## return transformed center point of the part
def transform_center(self, trans):
center_trans = rg.Point3d(self.center)
center_trans.Transform(trans)
return center_trans
## return transformed collider
def transform_collider(self, trans):
return self.collider.transform(trans)
#################################################################### Constrained Part ####################################################################
class AdvancedPart(Part):
## constructor
def __init__(self, name, geometry, connections, collider, attributes, additional_collider, supports, dim = None, id=None, field=None, sub_parts=[]):
super(self.__class__, self).__init__(name, geometry, connections, collider, attributes, dim=dim, id=id, field=field)
self.add_collider = None
if additional_collider != None:
self.add_collider = additional_collider
self.supports = []
if len(supports) > 0:
self.supports = supports
## hierarchical sub-parts
self.sub_parts = sub_parts
self.hierarchy_level = 0
if len(self.sub_parts) > 0:
self.hierarchy_level = self.sub_parts[0].hierarchy_level + 1
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspAdvPart [name: %s, id: %s]" % (self.name, self.id)
## return all part data
def return_part_data(self):
data_dict = {}
data_dict['name'] = self.name
data_dict['id'] = self.id
data_dict['geo'] = self.geo
data_dict['connections'] = self.connections
data_dict['transform'] = self.transformation
data_dict['collider'] = self.collider
data_dict['center'] = self.center
data_dict['parent'] = self.parent
data_dict['children'] = self.children
data_dict['attributes'] = self.attributes
data_dict['add_collider'] = self.add_collider
return data_dict
## return a transformed copy of the part
def transform(self, trans, transform_sub_parts=False, sub_level = 0):
geo_trans = self.geo.Duplicate()
geo_trans.Transform(trans)
collider_trans = self.collider.transform(trans)
connections_trans = []
for conn in self.connections:
connections_trans.append(conn.transform(trans))
attributes_trans = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_trans.append(attr.transform(trans))
add_collider_trans = None
if(self.add_collider != None):
add_collider_trans = self.add_collider.transform(trans, transform_connections=True, maintain_valid=True)
supports_trans = []
if len(self.supports) > 0:
for sup in self.supports:
sup_trans = sup.transform(trans)
supports_trans.append(sup_trans)
if transform_sub_parts and len(self.sub_parts) > 0 and sub_level > 0:
sub_parts_trans = []
for sp in self.sub_parts:
sp_trans = sp.transform(trans, transform_sub_parts = True, sub_level = sub_level - 1)
sub_parts_trans.append(sp_trans)
part_trans = AdvancedPart(self.name, geo_trans, connections_trans, collider_trans, attributes_trans, add_collider_trans, supports_trans, dim=self.dim, id=self.id, field=self.field, sub_parts=sub_parts_trans)
part_trans.transformation = trans
part_trans.is_constrained = True
return part_trans
else:
part_trans = AdvancedPart(self.name, geo_trans, connections_trans, collider_trans, attributes_trans, add_collider_trans, supports_trans, dim=self.dim, id=self.id, field=self.field, sub_parts=self.sub_parts)
part_trans.transformation = trans
part_trans.is_constrained = True
return part_trans
## return a copy of the part
def copy(self):
geo_copy = self.geo.Duplicate()
collider_copy = self.collider.copy()
connections_copy = []
for conn in self.connections:
connections_copy.append(conn.copy())
attributes_copy = []
if len(self.attributes) > 0:
for attr in self.attributes:
attributes_copy.append(attr.copy())
add_collider_copy = None
if(self.add_collider != None):
add_collider_copy = self.add_collider.copy()
supports_copy = []
if len(self.supports) > 0:
for sup in self.supports:
sup_copy = sup.copy()
supports_copy.append(sup_copy)
if len(self.sub_parts) > 0:
sub_parts_copy = []
for sp in self.sub_parts:
sp_copy = sp.copy()
sub_parts_copy.append(sp_copy)
part_copy = AdvancedPart(self.name, geo_copy, connections_copy, collider_copy, attributes_copy, add_collider_copy, supports_copy, dim=self.dim, id=self.id, field=self.field, sub_parts=sub_parts_copy)
part_copy.transformation = self.transformation
part_copy.is_constrained = True
return part_copy
else:
part_copy = AdvancedPart(self.name, geo_copy, connections_copy, collider_copy, attributes_copy, add_collider_copy, supports_copy, dim=self.dim, id=self.id, field=self.field, sub_parts=self.sub_parts)
part_copy.transformation = self.transformation
part_copy.is_constrained = True
return part_copy
#################################################################### Rule ####################################################################
class Rule(object):
def __init__(self, _part1, _conn1, _part2, _conn2, _active = True):
self.part1 = _part1
self.conn1 = _conn1
self.part2 = _part2
self.conn2 = _conn2
self.active = _active
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspRule [%s|%s_%s|%s]" % (self.part1, self.conn1, self.part2, self.conn2)
#################################################################### Field ####################################################################
class Field(object):
## constructor
def __init__(self, name, boundaries, pts, count_vec, resolution, values = []):
self.name = name
self.resolution = resolution
self.boundaries = boundaries
self.pts = pts
self.bbox = rg.BoundingBox(pts)
self.x_count = int(count_vec.X)
self.y_count = int(count_vec.Y)
self.z_count = int(count_vec.Z)
self.vals = []
pts_count = 0
self.is_tensor_field = False
try:
v = values[0][2]
self.is_tensor_field = True
except:
pass
if len(values) > 0:
for z in range(0, self.z_count):
self.vals.append([])
for y in range(0, self.y_count):
self.vals[z].append([])
for x in range(0, self.x_count):
if len(self.boundaries) > 0:
inside = False
for bou in self.boundaries:
if bou.IsPointInside(self.pts[pts_count], global_tolerance, True) == True:
self.vals[z][y].append(values[pts_count])
inside = True
break
if inside == False:
if self.is_tensor_field:
self.vals[z][y].append(rg.Vector3d(0,0,0))
else:
self.vals[z][y].append(0.0)
else:
self.vals[z][y].append(values[pts_count])
pts_count += 1
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspField [name: %s, res: %s, count: %s]" % (self.name, self.resolution, len(self.pts))
def return_values_list(self):
values_list = []
for z in range(0, self.z_count):
for y in range(0, self.y_count):
for x in range(0, self.x_count):
values_list.append(self.vals[z][y][x])
return values_list
## return value associated to the closest point of the field to the given point
def return_pt_val(self, pt):
pt_trans = pt - self.bbox.Min
x = int(math.floor(pt_trans.X/self.resolution))
y = int(math.floor(pt_trans.Y/self.resolution))
z = int(math.floor(pt_trans.Z/self.resolution))
value = self.vals[z][y][x]
return value
## find and return highest value in the field
def return_highest_pt(self, constraints = None):
max_val = -1
max_coords = None
for z in range(0, self.z_count):
for y in range(0, self.y_count):
for x in range(0, self.x_count):
value = self.vals[z][y][x]
## tensor field aggregation (WIP)
if self.is_tensor_field:
if value.Length > max_val:
if constraints is not None:
constraint_check = False
pt = rg.Point3d(x*self.resolution, y*self.resolution, z*self.resolution)
pt += self.bbox.Min
for constraint in constraints:
if constraint.check_soft(pt) == False:
constraint_check = True
break
if constraint_check == False:
max_val = value.Length
max_coords = (x,y,z)
else:
max_val = value.Length
max_coords = (x,y,z)
else:
if value > max_val:
if constraints is not None:
constraint_check = False
pt = rg.Point3d(x*self.resolution, y*self.resolution, z*self.resolution)
pt += self.bbox.Min
for constraint in constraints:
if constraint.check_soft(pt) == False:
constraint_check = True
break
if constraint_check == False:
max_val = value
max_coords = (x,y,z)
else:
max_val = value
max_coords = (x,y,z)
highest_pt = rg.Point3d(max_coords[0]*self.resolution, max_coords[1]*self.resolution, max_coords[2]*self.resolution)
highest_pt = highest_pt + self.bbox.Min
return highest_pt
#################################################################### Attribute ####################################################################
class Attribute(object):
## constructor
def __init__(self, name, values, transformable):
self.name = name
self.values = values
self.transformable = transformable
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspAttribute [name: %s]" % (self.name)
## return a transformed copy of the attribute
def transform(self, trans):
if self.transformable == True:
values_trans = []
for val in self.values:
val_trans = None
if type(val) == rg.Point3d:
val_trans = rg.Point3d(val)
elif type(val) == rg.Plane:
val_trans = rg.Plane(val)
elif type(val) == rg.Line:
val_trans = rg.Line(val.From, val.To)
else:
val_trans = val.Duplicate()
val_trans.Transform(trans)
values_trans.append(val_trans)
attr_trans = Attribute(self.name, values_trans, self.transformable)
else:
attr_trans = Attribute(self.name, self.values, self.transformable)
return attr_trans
## return a copy of the attribute
def copy(self):
if self.transformable == True:
values_copy = []
for val in self.values:
val_copy = None
if type(val) == rg.Point3d:
val_copy = rg.Point3d(val)
elif type(val) == rg.Plane:
val_copy = rg.Plane(val)
elif type(val) == rg.Line:
val_copy = rg.Line(val.From, val.To)
else:
val_copy = val.Duplicate()
values_copy.append(val_copy)
attr_copy = Attribute(self.name, values_copy, self.transformable)
else:
attr_copy = Attribute(self.name, self.values, self.transformable)
return attr_copy
#################################################################### Support ####################################################################
class Support(object):
## constructor
def __init__(self, support_directions):
self.sup_dir = support_directions
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspSupport [len: %s]" % (len(self.sup_dir))
## return a transformed copy of the support
def transform(self, trans):
sup_dir_trans = []
for dir in self.sup_dir:
dir = dir.ToNurbsCurve()
start_trans = dir.PointAtStart
end_trans = dir.PointAtEnd
start_trans.Transform(trans)
end_trans.Transform(trans)
dir_trans = rg.Line(start_trans, end_trans)
sup_dir_trans.append(dir_trans)
sup_trans = Support(sup_dir_trans)
return sup_trans
## return a copy of the support
def copy(self):
sup_dir_copy = []
for dir in self.sup_dir:
dir = dir.ToNurbsCurve()
start_copy = dir.PointAtStart
end_copy = dir.PointAtEnd
dir_copy = rg.Line(start_copy, end_copy)
sup_dir_copy.append(dir_copy)
sup_copy = Support(sup_dir_copy)
return sup_copy
#################################################################### Aggregation ####################################################################
class Aggregation(object):
## class constructor
def __init__(self, _name, _parts, _rules, _mode, _prev = [], _coll_check = True, _field = [], _global_constraints = [], _catalog = None):
## basic parameters
self.name = _name
self.parts = {}
for part in _parts:
self.parts[part.name] = part
self.rules = _rules
self.mode = _mode
self.coll_check = _coll_check
self.aggregated_parts = []
## fields
self.multiple_fields = False
if len(_field) == 0 or _field is None:
self.field = None
elif len(_field) == 1:
self.field = _field[0]
else:
self.field = {}
for f in _field:
self.field[f.name] = f
self.multiple_fields = True
## reset base parts
self.reset_base_parts()
## temp list to store possible colliders to newly added parts
self.possible_collisions = []
## aggregation queue, storing sorted possible next states in the form (part, f_val)
self.aggregation_queue = []
self.queue_values = []
self.queue_count = 0
## previous aggregated parts
self.prev_num = 0
if len(_prev) > 0:
self.prev_num = len(_prev)
for prev_p in _prev:
prev_p_copy = prev_p.copy()
prev_p_copy.reset_part(self.rules)
prev_p_copy.id = len(self.aggregated_parts)
self.aggregated_parts.append(prev_p_copy)
if self.field is not None:
self.compute_next_w_field(prev_p_copy)
## global constraints applied to the aggregation
self.global_constraints = _global_constraints
self.catalog = _catalog
#### WIP ####
self.collision_shapes = []
self.graph = None
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspAggregation [name: %s, size: %s]" % (self.name, len(self.aggregated_parts))
## reset base parts
def reset_base_parts(self, new_parts = None):
if new_parts != None:
self.parts = {}
for part in new_parts:
self.parts[part.name] = part
for p_key in self.parts:
self.parts[p_key].reset_part(self.rules)
## reset rules and regenerate rule tables for each part
def reset_rules(self, rules):
if rules != self.rules:
self.rules = rules
self.reset_base_parts()
for part in self.aggregated_parts:
part.reset_part(rules)
## recompute aggregation queue
def recompute_aggregation_queue(self):
self.aggregation_queue = []
self.queue_values = []
self.queue_count = 0
for part in self.aggregated_parts:
self.compute_next_w_field(part)
## trim aggregated parts list to a specific length
def remove_elements(self, num):
self.aggregated_parts = self.aggregated_parts[:num]
for part in self.aggregated_parts:
part.reset_part(self.rules)
if self.field is not None:
self.recompute_aggregation_queue()
## compute all possible parts which can be placed given an existing part and connection
def compute_possible_children(self, part_id, conn_id, check_constraints = False):
possible_children = []
current_part = self.aggregated_parts[part_id]
if conn_id in current_part.active_connections:
current_conn = current_part.connections[conn_id]
for rule_id in current_conn.active_rules:
rule = current_conn.rules_table[rule_id]
next_part = self.parts[rule.part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[rule.conn2].flip_pln, current_conn.pln)
## boolean checks for all constraints
coll_check = False
add_coll_check = False
valid_connections = []
missing_sup_check = False
global_const_check = False
if check_constraints:
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
possible_children.append(next_part_trans)
return possible_children
else:
return -1
## add a custom pre-computed part which has been already transformed in place and checked for constraints
def add_custom_part(self, part_id, conn_id, next_part):
next_part.reset_part(self.rules)
next_part.id = len(self.aggregated_parts)
self.aggregated_parts[part_id].children.append(next_part)
next_part.parent = self.aggregated_parts[part_id]
self.aggregated_parts.append(next_part)
for i in range(len(self.aggregated_parts[part_id].active_connections)):
if self.aggregated_parts[part_id].active_connections[i] == conn_id:
self.aggregated_parts[part_id].active_connections.pop(i)
break
#### constraints checks ####
## function grouping all constraints checks (not yet implemented)
def constraints_check(self, part, trans):
pass
## overlap // part-part collision check
def collision_check(self, part, trans):
part_center = part.transform_center(trans)
## overlap check
coll_count = 0
for ex_part in self.aggregated_parts:
dist = ex_part.center.DistanceTo(part_center)
if dist < global_tolerance:
return True
elif dist < ex_part.dim + part.dim:
self.possible_collisions.append(coll_count)
coll_count += 1
## collision check
if self.coll_check == True:
part_collider = part.transform_collider(trans)
if part_collider.check_collisions_by_id(self.aggregated_parts, self.possible_collisions):
return True
return False
## additional collider check
def additional_collider_check(self, part, trans):
if part.add_collider != None:
add_collider = part.add_collider.transform(trans, transform_connections=True, maintain_valid = False)
if add_collider.check_collisions_w_parts(self.aggregated_parts):
return True
## assign computed valid connections according to collider location
part.add_collider.valid_connections = list(add_collider.valid_connections)
return False
## support check
def missing_supports_check(self, part, trans):
if len(part.supports) > 0:
for sup in part.supports:
supports_count = 0
sup_trans = sup.transform(trans)
for dir in sup_trans.sup_dir:
for id in self.possible_collisions:
if self.aggregated_parts[id].collider.check_intersection_w_line(dir):
supports_count += 1
break
if supports_count == len(sup_trans.sup_dir):
return False
return True
else:
return False
## global constraints check
def global_constraints_check(self, part, trans):
for constraint in self.global_constraints:
part_center = part.transform_center(trans)
if constraint.soft:
if constraint.check(pt = part_center) == False:
return True
else:
part_collider = part.transform_collider(trans)
if constraint.check(pt = part_center, collider = part_collider) == False:
return True
return False
#### aggregation methods ####
## sequential aggregation with Graph Grammar
def aggregate_sequence(self, graph_rules):
for rule in graph_rules:
## first part
if len(self.aggregated_parts) == 0:
aggr_rule = rule.split(">")[0]
rule_parts = aggr_rule.split("_")
part1 = str(rule_parts[0].split("|")[0])
conn1 = int(rule_parts[0].split("|")[1])
part2 = str(rule_parts[1].split("|")[0])
conn2 = int(rule_parts[1].split("|")[1])
rule_ids = rule.split(">")[1].split("_")
first_part = self.parts[part1]
first_part_trans = first_part.transform(rg.Transform.Identity)
first_part_trans.id = rule_ids[0]
next_part = self.parts[part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[conn2].flip_pln, first_part.connections[conn1].pln)
next_part_trans = next_part.transform(orientTransform)
next_part_trans.id = rule_ids[1]
## check additional collider (for fabrication constraints)
self.additional_collider_check(next_part, orientTransform)
## parent-child tracking
first_part_trans.children.append(next_part_trans)
next_part_trans.parent = first_part_trans
self.aggregated_parts.append(first_part_trans)
self.aggregated_parts.append(next_part_trans)
first_part_trans.children.append(next_part_trans)
else:
aggr_rule = rule.split(">")[0]
rule_parts = aggr_rule.split("_")
part1_id = str(rule_parts[0].split("|")[0])
conn1 = int(rule_parts[0].split("|")[1])
part2 = str(rule_parts[1].split("|")[0])
conn2 = int(rule_parts[1].split("|")[1])
rule_ids = rule.split(">")[1].split("_")
first_part = None
for part in self.aggregated_parts:
if part.id == part1_id:
first_part = part
break
if first_part is not None:
first_part.id = rule_ids[0]
next_part = self.parts[part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[conn2].flip_pln, first_part.connections[conn1].pln)
next_part_trans = next_part.transform(orientTransform)
next_part_trans.id = rule_ids[1]
## parent-child tracking
first_part.children.append(next_part_trans.id)
next_part_trans.parent = first_part.id
self.aggregated_parts.append(next_part_trans)
else:
pass ## implement error handling
## stochastic aggregation
def aggregate_rnd(self, num, use_catalog = False):
added = 0
loops = 0
while added < num:
loops += 1
if loops > num*100:
break
## if no part is present in the aggregation, add first random part
if len(self.aggregated_parts) == 0:
first_part = None
if use_catalog:
first_part = self.parts[self.catalog.return_weighted_part()]
else:
first_part = self.parts[random.choice(self.parts.keys())]
if first_part is not None:
first_part_trans = first_part.transform(rg.Transform.Identity)
for conn in first_part_trans.connections:
conn.generate_rules_table(self.rules)
first_part_trans.id = 0
self.aggregated_parts.append(first_part_trans)
added += 1
if use_catalog:
self.catalog.update(first_part_trans.name, -1)
## otherwise add new random part
else:
next_rule = None
part_01_id = -1
conn_01_id = -1
next_rule_id = -1
new_rule_attempts = 0
while new_rule_attempts < 1000:
new_rule_attempts += 1
next_rule = None
if use_catalog:
if self.catalog.is_empty:
break
next_part = self.parts[self.catalog.return_weighted_part()]
if next_part is not None:
part_01_id = random.randint(0,len(self.aggregated_parts)-1)
part_01 = self.aggregated_parts[part_01_id]
if len(part_01.active_connections) > 0:
conn_01_id = part_01.active_connections[random.randint(0, len(part_01.active_connections)-1)]
conn_01 = part_01.connections[conn_01_id]
if len(conn_01.active_rules) > 0:
next_rule_id = conn_01.active_rules[random.randint(0, len(conn_01.active_rules)-1)]
next_rule = conn_01.rules_table[next_rule_id]
if next_rule.part2 == next_part.name:
break
else:
part_01_id = random.randint(0,len(self.aggregated_parts)-1)
part_01 = self.aggregated_parts[part_01_id]
if len(part_01.active_connections) > 0:
conn_01_id = part_01.active_connections[random.randint(0, len(part_01.active_connections)-1)]
conn_01 = part_01.connections[conn_01_id]
if len(conn_01.active_rules) > 0:
next_rule_id = conn_01.active_rules[random.randint(0, len(conn_01.active_rules)-1)]
next_rule = conn_01.rules_table[next_rule_id]
break
if next_rule is not None:
next_part = self.parts[next_rule.part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[next_rule.conn2].flip_pln, conn_01.pln)
## boolean checks for all constraints
coll_check = False
add_coll_check = False
valid_connections = []
missing_sup_check = False
global_const_check = False
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
next_part_trans.reset_part(self.rules)
for i in range(len(next_part_trans.active_connections)):
if next_part_trans.active_connections[i] == next_rule.conn2:
next_part_trans.active_connections.pop(i)
break
next_part_trans.id = len(self.aggregated_parts)
## parent-child tracking
self.aggregated_parts[part_01_id].children.append(next_part_trans.id)
next_part_trans.parent = self.aggregated_parts[part_01_id].id
self.aggregated_parts.append(next_part_trans)
if use_catalog:
self.catalog.update(next_part_trans.name, -1)
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
added += 1
## TO FIX --> do not remove rules when only caused by missing supports
else:
## remove rules if they cause collisions or overlappings
for i in range(len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules)):
if self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules[i] == next_rule_id:
self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules.pop(i)
break
## check if the connection is still active (still active rules available)
if len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules) == 0:
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
else:
## if no part is available, exit the aggregation routine and return an error message
msg = "Could not place " + str(num-added) + " parts"
return msg
## stochastic aggregation with catalog
def aggregate_rnd_catalog(self, catalog, num = None):
added = 0
loops = 0
if num is None:
num = catalog.parts_total
while added < num:
loops += 1
if loops > num*100:
break
## if no part is present in the aggregation, add first random part
if len(self.aggregated_parts) == 0:
first_part = self.parts[catalog.return_weighted_part()]
first_part_trans = first_part.transform(rg.Transform.Identity)
for conn in first_part_trans.connections:
conn.generate_rules_table(self.rules)
first_part_trans.id = 0
self.aggregated_parts.append(first_part_trans)
catalog.update(first_part.name, -1)
added += 1
## otherwise add new random part
else:
next_rule = None
part_01_id = -1
conn_01_id = -1
next_rule_id = -1
new_rule_attempts = 0
while new_rule_attempts < 10000:
new_rule_attempts += 1
selected_part = catalog.return_weighted_part()
if selected_part is None or catalog.is_empty == True:
break
if len(part_01.active_connections) > 0:
conn_01_id = part_01.active_connections[random.randint(0, len(part_01.active_connections)-1)]
conn_01 = part_01.connections[conn_01_id]
if len(conn_01.active_rules) > 0:
next_rule_id = conn_01.active_rules[random.randint(0, len(conn_01.active_rules)-1)]
if conn_01.rules_table[next_rule_id].part2 == selected_part:
next_rule = conn_01.rules_table[next_rule_id]
break
if next_rule is not None:
next_part = self.parts[next_rule.part2]
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[next_rule.conn2].flip_pln, conn_01.pln)
## boolean checks for all constraints
coll_check = False
add_coll_check = False
valid_connections = []
missing_sup_check = False
global_const_check = False
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
next_part_trans.reset_part(self.rules)
for i in range(len(next_part_trans.active_connections)):
if next_part_trans.active_connections[i] == next_rule.conn2:
next_part_trans.active_connections.pop(i)
break
next_part_trans.id = len(self.aggregated_parts)
## parent-child tracking
self.aggregated_parts[part_01_id].children.append(next_part_trans.id)
next_part_trans.parent = self.aggregated_parts[part_01_id].id
self.aggregated_parts.append(next_part_trans)
catalog.update(next_part_trans.name, -1)
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
added += 1
## TO FIX --> do not remove rules when only caused by missing supports
else:
## remove rules if they cause collisions or overlappings
for i in range(len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules)):
if self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules[i] == next_rule_id:
self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules.pop(i)
break
## check if the connection is still active (still active rules available)
if len(self.aggregated_parts[part_01_id].connections[conn_01_id].active_rules) == 0:
for i in range(len(self.aggregated_parts[part_01_id].active_connections)):
if self.aggregated_parts[part_01_id].active_connections[i] == conn_01_id:
self.aggregated_parts[part_01_id].active_connections.pop(i)
break
else:
## if no part is available, exit the aggregation routine and return an error message
msg = "Could not place " + str(num-added) + " parts"
return msg
## compute all possibilities for child-parts of the given part, and store them in the aggregation queue
def compute_next_w_field(self, part):
for i in xrange(len(part.active_connections)-1, -1, -1):
conn_id = part.active_connections[i]
conn = part.connections[conn_id]
for i2 in xrange(len(conn.active_rules)-1, -1, -1):
rule_id = conn.active_rules[i2]
rule = conn.rules_table[rule_id]
next_part = self.parts[rule.part2]
next_center = rg.Point3d(next_part.center)
orientTransform = rg.Transform.PlaneToPlane(next_part.connections[rule.conn2].flip_pln, conn.pln)
next_center.Transform(orientTransform)
if self.multiple_fields:
f_name = next_part.field
if self.field[f_name].bbox.Contains(next_center) == True:
field_val = self.field[f_name].return_pt_val(next_center)
queue_index = bisect.bisect_left(self.queue_values, field_val)
queue_entry = (next_part.name, part.id, orientTransform)
self.queue_values.insert(queue_index, field_val)
self.aggregation_queue.insert(queue_index, queue_entry)
self.queue_count += 1
else:
if self.field.bbox.Contains(next_center) == True:
field_val = self.field.return_pt_val(next_center)
queue_index = bisect.bisect_left(self.queue_values, field_val)
queue_entry = (next_part.name, part.id, orientTransform)
self.queue_values.insert(queue_index, field_val)
self.aggregation_queue.insert(queue_index, queue_entry)
self.queue_count += 1
## field-driven aggregation
def aggregate_field(self, num):
added = 0
loops = 0
while added < num:
## avoid endless loops
loops += 1
if loops > num*100:
break
## if no part is present in the aggregation, add first random part
if len(self.aggregated_parts) == 0 and self.prev_num == 0:
first_part = self.parts[random.choice(self.parts.keys())]
start_point = None
if self.multiple_fields:
f_name = first_part.field
if (self.mode == 2 or self.mode == 3) and len(self.global_constraints) > 0:
start_point = self.field[f_name].return_highest_pt(constraints=self.global_constraints)
else:
start_point = self.field[f_name].return_highest_pt()
else:
if (self.mode == 2 or self.mode == 3) and len(self.global_constraints) > 0:
start_point = self.field.return_highest_pt(constraints=self.global_constraints)
else:
start_point = self.field.return_highest_pt()
mov_vec = rg.Vector3d.Subtract(rg.Vector3d(start_point), rg.Vector3d(first_part.center))
move_transform = rg.Transform.Translation(mov_vec.X, mov_vec.Y, mov_vec.Z)
first_part_trans = first_part.transform(move_transform)
for conn in first_part_trans.connections:
conn.generate_rules_table(self.rules)
first_part_trans.id = 0
self.aggregated_parts.append(first_part_trans)
## compute all possible next parts and append to list
self.compute_next_w_field(first_part_trans)
added += 1
else:
## if no part is available, exit the aggregation routine and return an error message
if self.queue_count == 0:
msg = "Could not place " + str(num-added) + " parts"
return msg
next_data = self.aggregation_queue[self.queue_count-1]
next_part = self.parts[next_data[0]]
next_center = rg.Point3d(next_part.center)
orientTransform = next_data[2]
## boolean checks for all constraints
coll_check = False
add_coll_check = False
missing_sup_check = False
global_const_check = False
## collision check
self.possible_collisions = []
coll_check = self.collision_check(next_part, orientTransform)
## constraints check
if self.mode == 1: ## only local constraints mode
if coll_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
elif self.mode == 2: ## onyl global constraints mode
if coll_check == False and len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
elif self.mode == 3: ## local+global constraints mode
if coll_check == False:
if len(self.global_constraints) > 0:
global_const_check = self.global_constraints_check(next_part, orientTransform)
if global_const_check == False and next_part.is_constrained:
add_coll_check = self.additional_collider_check(next_part, orientTransform)
if add_coll_check == False:
missing_sup_check = self.missing_supports_check(next_part, orientTransform)
if coll_check == False and add_coll_check == False and missing_sup_check == False and global_const_check == False:
next_part_trans = next_part.transform(orientTransform)
next_part_trans.reset_part(self.rules)
for conn in next_part_trans.connections:
conn.generate_rules_table(self.rules)
next_part_trans.id = len(self.aggregated_parts)
self.aggregated_parts[next_data[1]].children.append(next_part_trans.id)
next_part_trans.parent = self.aggregated_parts[next_data[1]].id
self.aggregated_parts.append(next_part_trans)
## compute all possible next parts and append to list
self.compute_next_w_field(next_part_trans)
added += 1
self.aggregation_queue.pop()
self.queue_values.pop()
self.queue_count -=1
#################################################################### Plane Constraint ####################################################################
class Plane_Constraint(object):
## constructor
def __init__(self, _plane, _positive = True, _soft = True):
self.type = 'plane'
self.plane = _plane
self.positive = _positive
self.soft = _soft
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspPlaneConst [+: %s, soft: %s]" % (self.positive, self.soft)
## constraint check method
def check(self, pt = None, collider = None):
if self.soft:
return self.check_soft(pt)
else:
return self.check_hard(pt, collider)
## hard constraint check method
def check_hard(self, pt, collider):
if self.check_soft(pt):
for geo in collider.geometry:
if rg.Intersect.Intersection.MeshPlane(geo, self.plane) is not None:
return False
return True
else:
return False
## soft constraint check method
def check_soft(self, pt):
mapped_pt = self.plane.RemapToPlaneSpace(pt)[1]
if self.positive:
if mapped_pt.Z > 0:
return True
else:
if mapped_pt.Z < 0:
return True
return False
#################################################################### Mesh Constraint ####################################################################
class Mesh_Constraint(object):
## constructor
def __init__(self, _geo, _inside = True, _soft = True):
self.type = 'mesh_collider'
self.geo = _geo
self.inside = _inside
self.soft = _soft
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspMeshConst [in: %s, soft: %s]" % (self.inside, self.soft)
## constraint check method
def check(self, pt = None, collider = None):
if self.soft:
return self.check_soft(pt)
else:
return self.check_hard(pt, collider)
## hard constraint check method
def check_hard(self, pt, collider):
if self.check_soft(pt):
for geo in collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(self.geo, geo)) > 0:
return False
return True
else:
return False
## soft constraint check method
def check_soft(self, pt):
is_inside = self.geo.IsPointInside(pt, global_tolerance, False)
if self.inside:
if is_inside:
return True
else:
if not is_inside:
return True
return False
#########################################################################
## WIP ##
#########################################################################
#################################################################### Collider ####################################################################
class Collider(object):
## constructor
def __init__(self, _geo, _multiple=False, _check_all = False, _connections=[], _valid_connections = []):
self.geometry = _geo
self.multiple = _multiple
self.check_all = _check_all
self.connections = _connections
self.valid_connections = _valid_connections
self.set_connections = False
if len(self.connections) == len(self.geometry) and self.multiple == True:
self.set_connections = True
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "WaspCollider"
## return a transformed copy of the collider
########################################################################### check if valid connections need to be transformed or re-generated!!!
def transform(self, trans, transform_connections = False, maintain_valid = False):
geometry_trans = []
for geo in self.geometry:
geo_trans = geo.Duplicate()
geo_trans.Transform(trans)
geometry_trans.append(geo_trans)
connections_trans = []
if transform_connections:
for conn in self.connections:
connections_trans.append(conn.transform(trans))
if maintain_valid:
valid_connection_trans = list(self.valid_connections)
coll_trans = Collider(geometry_trans, _multiple=self.multiple, _check_all=self.check_all, _connections=connections_trans, _valid_connections=valid_connection_trans)
else:
coll_trans = Collider(geometry_trans, _multiple=self.multiple, _check_all=self.check_all, _connections=connections_trans)
return coll_trans
## return a copy of the collider
def copy(self):
geometry_copy = []
for geo in self.geometry:
geo_copy = geo.Duplicate()
geometry_copy.append(geo_copy)
connections_copy = []
for conn in self.connections:
connections_copy.append(conn.copy())
valid_connection_copy = list(self.valid_connections)
coll_copy = Collider(geometry_copy, _multiple=self.multiple, _check_all=self.check_all, _connections=connections_copy, _valid_connections=valid_connection_copy)
return coll_copy
## check collisions between collider and given part
def check_collisions_w_parts(self, parts):
## multiple collider with associated connections
if self.multiple:
valid_colliders = []
self.valid_connections = []
count = 0
for geo in self.geometry:
valid_coll = True
for part in parts:
for other_geo in part.collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
valid_coll = False
break
if valid_coll == False:
break
valid_colliders.append(valid_coll)
if self.set_connections and valid_coll:
self.valid_connections.append(count)
if valid_coll and self.check_all == False:
break
count+=1
if True in valid_colliders:
return False
return True
## simple collider
else:
for geo in self.geometry:
for part in parts:
for other_geo in part.collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
return True
return False
## check collisions between collider and given ids in the given parts list
def check_collisions_by_id(self, parts, ids):
## multiple collider with associated connections
if self.multiple:
valid_colliders = []
count = 0
for geo in self.geometry:
valid_coll = True
for id in ids:
for other_geo in parts[id].collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
valid_coll = False
break
valid_colliders.append(valid_coll)
if valid_coll and self.check_all == False:
break
count+=1
if True in valid_colliders:
return False
return True
## simple collider
else:
for geo in self.geometry:
for id in ids:
for other_geo in parts[id].collider.geometry:
if len(rg.Intersect.Intersection.MeshMeshFast(geo, other_geo)) > 0:
return True
return False
## check intersection between collider and line (for supports check)
def check_intersection_w_line(self, ln):
for geo in self.geometry:
if len(rg.Intersect.Intersection.MeshLine(geo, ln)[0]) > 0:
return True
return False
#### WIP ####
def check_global_constraints(self, constraint):
return False
################################################################# Parts Catalog ##################################################################
class PartCatalog(object):
##constructor
def __init__(self, _parts, _amounts):
self.parts = _parts
self.amounts = _amounts
self.dict = {}
for i in xrange(len(self.parts)):
self.dict[self.parts[i].name] = _amounts[i]
self.is_empty = False
self.parts_total = sum(self.dict.values())
## return a random part type
def return_random_part(self):
choices = [key for key in self.dict.keys() if self.dict[key] > 0]
if len(choices) > 0:
return random.choice(choices)
else:
self.is_empty = True
return None
## return a weighted-choice between the available parts, give the available parts amounts
def return_weighted_part(self):
if self.parts_total == 0:
self.is_empty = True
return None
n = random.uniform(0, self.parts_total)
for key in self.dict:
if n < self.dict[key]:
return key
n = n - self.dict[key]
return None
def update(self, part_name, difference):
self.dict[part_name] += difference
self.parts_total = sum(self.dict.values())
if self.parts_total == 0:
self.is_empty = True
def copy(self):
amounts = [self.dict[part.name] for part in self.parts]
return PartCatalog(self.parts, amounts)
|
gpl-3.0
| 6,861,793,551,399,903,000
| 33.263757
| 210
| 0.642644
| false
| 3.278659
| false
| false
| false
|
NCPlayz/CassBotPy
|
cassandra/bot.py
|
1
|
4014
|
import datetime
import json
import discord
import os
from discord.ext import commands
from discord.ext.commands.converter import *
class CassandraContext(commands.Context):
def is_float(self, argument):
"""Checks if the argument is a float."""
try:
return float(string) # True if string is a number contains a dot
except ValueError: # String is not a number
return False
async def send(self, content=None, *args, **kwargs):
"""Override for send to add message filtering"""
if content:
if self.is_float(content) or content.isdigit():
content = str(content)
content.replace("@everyone", "@\u200beveryone").replace("@here", "@\u200bhere")
sent_message = await super().send(content, *args, **kwargs)
return sent_message
@property
def session(self):
"""Returns the aiohttp.ClientSession() instance in CassandraBase."""
return self.bot.session
class CassandraBase(commands.Bot):
"""This is the class that initializes the bot."""
def __init__(self):
self.token = os.environ['TOKEN']
self.presence = discord.Game(name='in a Digital Haunt...',
url="https://www.twitch.tv/ghostofsparkles", type=1)
self.archive_file = []
def get_package_info():
"""Fetches `arg` in `package.json`."""
with open("./package.json") as f:
config = json.load(f)
return config
def get_prefix():
"""Fetches all known prefixes."""
prefixes = ["-",
"Cassandra "]
return commands.when_mentioned_or(*prefixes)
def get_description():
"""Fetches description."""
return f"{get_package_info()['name']}"
def get_game():
"""Fetches game presence."""
return self.presence
super().__init__(command_prefix=get_prefix(), game=get_game(), description=get_description(), pm_help=None,
help_attrs=dict(hidden=True))
startup_extensions = []
for file in os.listdir("./cogs"):
if file.endswith(".py"):
startup_extensions.append(file.replace('.py', ''))
for extension in startup_extensions:
try:
self.load_extension(f'cogs.{extension}')
print(f'Loaded {extension}')
except Exception as e:
error = f'{extension}\n {type(e).__name__}: {e}'
print(f'Failed to load extension {error}')
self.session = None
def run(self):
"""Runs the bot."""
super().run(self.token)
async def on_message(self, message):
"""An event triggered when a message is sent."""
ctx = await self.get_context(message, cls=CassandraContext)
await self.invoke(ctx)
async def fetch(self, url: str, headers: dict = None, timeout: float = None,
return_type: str = None, **kwargs):
"""Fetches data from a url via aiohttp."""
async with self.session.get(url, headers=headers, timeout=timeout, **kwargs) as resp:
if return_type:
cont = getattr(resp, return_type)
return resp, await cont()
else:
return resp, None
class Cassandra(CassandraBase):
pass
class ConvertError(Exception):
pass
class Union(Converter):
def __init__(self, *converters):
self.converters = converters
async def convert(self, ctx: CassandraContext, argument: str):
"""Converts an argument"""
for converter in self.converters:
try:
return await ctx.command.do_conversion(ctx, converter, argument)
except:
raise ConvertError('Conversion Failed.')
|
mit
| -1,071,383,508,198,149,500
| 32.016949
| 115
| 0.550573
| false
| 4.510112
| false
| false
| false
|
Franky333/crazyflie-clients-python
|
src/cfclient/ui/tabs/LogTab.py
|
1
|
3242
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Shows the Log TOC of available variables in the Crazyflie.
"""
import cfclient
from cfclient.ui.tab import Tab
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import Qt
__author__ = 'Bitcraze AB'
__all__ = ['LogTab']
param_tab_class = uic.loadUiType(cfclient.module_path +
"/ui/tabs/logTab.ui")[0]
class LogTab(Tab, param_tab_class):
connectedSignal = pyqtSignal(str)
disconnectedSignal = pyqtSignal(str)
def __init__(self, tabWidget, helper, *args):
super(LogTab, self).__init__(*args)
self.setupUi(self)
self.tabName = "Log TOC"
self.menuName = "Log TOC"
self.helper = helper
self.tabWidget = tabWidget
self.cf = helper.cf
# Init the tree widget
self.logTree.setHeaderLabels(['Name', 'ID', 'Unpack', 'Storage'])
self.cf.connected.add_callback(self.connectedSignal.emit)
self.connectedSignal.connect(self.connected)
# Clear the log TOC list when the Crazyflie is disconnected
self.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.disconnectedSignal.connect(self.disconnected)
@pyqtSlot('QString')
def disconnected(self, linkname):
self.logTree.clear()
@pyqtSlot(str)
def connected(self, linkURI):
self.logTree.clear()
toc = self.cf.log.toc
for group in list(toc.toc.keys()):
groupItem = QtWidgets.QTreeWidgetItem()
groupItem.setData(0, Qt.DisplayRole, group)
for param in list(toc.toc[group].keys()):
item = QtWidgets.QTreeWidgetItem()
item.setData(0, Qt.DisplayRole, param)
item.setData(1, Qt.DisplayRole, toc.toc[group][param].ident)
item.setData(2, Qt.DisplayRole, toc.toc[group][param].pytype)
item.setData(3, Qt.DisplayRole, toc.toc[group][param].ctype)
groupItem.addChild(item)
self.logTree.addTopLevelItem(groupItem)
self.logTree.expandItem(groupItem)
|
gpl-2.0
| 3,133,045,078,609,853,000
| 33.860215
| 79
| 0.614436
| false
| 3.512459
| false
| false
| false
|
spinningbytes/deep-mlsa
|
code/architectures/default_cnn.py
|
1
|
2153
|
import logging
from keras.layers import Dense, ZeroPadding1D, Embedding, Convolution1D, MaxPooling1D, Flatten, Input
from keras.models import Model
from utils.data_utils import load_embedding_matrix
def create_default_model(config_data):
nb_filter = 200
filter_length = 6
hidden_dims = nb_filter
embedding_matrix = load_embedding_matrix(config_data)
max_features = embedding_matrix.shape[0]
embedding_dims = embedding_matrix.shape[1]
max_len = config_data['max_sentence_length']
logging.info('Build Model...')
logging.info('Embedding Dimensions: ({},{})'.format(max_features, embedding_dims))
main_input = Input(batch_shape=(None, max_len), dtype='int32', name='main_input')
if not config_data.get('random_embedding', None):
logging.info('Pretrained Word Embeddings')
embeddings = Embedding(
max_features,
embedding_dims,
input_length=max_len,
weights=[embedding_matrix],
trainable=False
)(main_input)
else:
logging.info('Random Word Embeddings')
embeddings = Embedding(max_features, embedding_dims, init='lecun_uniform', input_length=max_len)(main_input)
zeropadding = ZeroPadding1D(filter_length - 1)(embeddings)
conv1 = Convolution1D(
nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1)(zeropadding)
max_pooling1 = MaxPooling1D(pool_length=4, stride=2)(conv1)
conv2 = Convolution1D(
nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1)(max_pooling1)
max_pooling2 = MaxPooling1D(pool_length=conv2._keras_shape[1])(conv2)
flatten = Flatten()(max_pooling2)
hidden = Dense(hidden_dims)(flatten)
softmax_layer1 = Dense(3, activation='softmax', name='sentiment_softmax', init='lecun_uniform')(hidden)
model = Model(input=[main_input], output=softmax_layer1)
test_model = Model(input=[main_input], output=[softmax_layer1, hidden])
return model, test_model
|
apache-2.0
| 6,691,548,136,573,418,000
| 33.190476
| 116
| 0.668834
| false
| 3.649153
| false
| false
| false
|
openstack/networking-plumgrid
|
networking_plumgrid/neutron/tests/unit/extensions/test_providernet.py
|
1
|
2158
|
# Copyright 2015 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
PLUMgrid plugin provider network extension unit tests
"""
import mock
from oslo_utils import importutils
from networking_plumgrid.neutron.plugins import plugin as plumgrid_plugin
from neutron.tests.unit.extensions import test_providernet as pnet
PLUM_DRIVER = ('networking_plumgrid.neutron.plugins.drivers.'
'fake_plumlib.Plumlib')
FAKE_DIRECTOR = '1.1.1.1'
FAKE_PORT = '1234'
FAKE_USERNAME = 'fake_admin'
FAKE_PASSWORD = 'fake_password'
FAKE_TIMEOUT = '0'
class ProviderNetworksTestCase(pnet.ProvidernetExtensionTestCase):
_plugin_name = ('networking_plumgrid.neutron.plugins.'
'plugin.NeutronPluginPLUMgridV2')
def setUp(self):
def mocked_plumlib_init(self):
director_plumgrid = FAKE_DIRECTOR
director_port = FAKE_PORT
director_username = FAKE_USERNAME
director_password = FAKE_PASSWORD
timeout = FAKE_TIMEOUT
self._plumlib = importutils.import_object(PLUM_DRIVER)
self._plumlib.director_conn(director_plumgrid,
director_port, timeout,
director_username,
director_password)
with mock.patch.object(plumgrid_plugin.NeutronPluginPLUMgridV2,
'plumgrid_init', new=mocked_plumlib_init):
super(ProviderNetworksTestCase, self).setUp()
def tearDown(self):
super(ProviderNetworksTestCase, self).tearDown()
|
apache-2.0
| -443,667,551,993,928,500
| 36.859649
| 78
| 0.659407
| false
| 4.026119
| true
| false
| false
|
rndusr/stig
|
stig/client/filters/base.py
|
1
|
22315
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
import itertools
import operator
import re
from collections import abc
from ...utils import cliparser
from ...logging import make_logger # isort:skip
log = make_logger(__name__)
BOOLEAN = 'boolean'
COMPARATIVE = 'comparative'
class BoolFilterSpec():
"""Boolean filter specification"""
type = BOOLEAN
def __init__(self, func, *, needed_keys=(), aliases=(), description='No description'):
if not func:
self.filter_function = None
needed_keys = ()
else:
self.filter_function = func
self.needed_keys = needed_keys
self.aliases = aliases
self.description = description
class CmpFilterSpec():
"""Comparative filter specification"""
type = COMPARATIVE
def __init__(self, *, value_type, value_getter=None, value_matcher=None,
value_convert=None, as_bool=None, needed_keys=(), aliases=(),
description='No description'):
"""
value_type : Subclass of `type` (i.e. something that returns an instance when
called and can be passed to `isinstance` as the second argument
value_getter : Callable that takes an item and returns one or more
values to match against the user-provided value;
Multiple values must be given as an iterator (list,
tuple, generator, etc), and the item matches if any
match
value_convert : Callable that takes a value and converts it to something
comparable (e.g. "42" (str) -> 42 (int))
value_matcher : Callable that takes (item, operator, value) and returns True/False
as_bool : Callable that takes an item and returns True/False
needed_keys : Needed keys for this filter
aliases : Alternative names of this filter
"""
self.value_type = value_type
self.needed_keys = needed_keys
self.aliases = aliases
self.description = description
self.value_convert = value_convert if value_convert is not None else value_type
if value_getter is not None:
self.value_getter = value_getter
elif len(self.needed_keys) == 1:
self.value_getter = lambda dct, k=needed_keys[0]: dct[k]
else:
raise TypeError('Missing argument with needed_keys=%r: value_getter', self.needed_keys)
if value_matcher is None:
def value_matcher(item, op, user_value, vg=self.value_getter):
item_value = vg(item)
if isinstance(item_value, abc.Iterator):
return any(op(ival, user_value) for ival in item_value)
else:
return op(item_value, user_value)
self.value_matcher = value_matcher
if as_bool is None:
def as_bool(item, vg=self.value_getter):
item_value = vg(item)
if isinstance(item_value, abc.Iterator):
return any(item_value)
else:
return bool(item_value)
self.as_bool = as_bool
def make_filter(self, operator, user_value, invert):
if operator is None and user_value is None:
# Abuse comparative filter as boolean filter
# (e.g. 'peers-connected' matches torrents with peers-connected!=0)
return (self.as_bool, self.needed_keys, invert)
elif user_value is None:
# Operator with no value matches everything
return (None, (), False)
else:
def f(obj, vm=self.value_matcher, op=operator, val=user_value):
return vm(obj, op, val)
return (f, self.needed_keys, invert)
class FilterSpecDict(abc.Mapping):
"""TODO"""
_NOT_FOUND = object()
def __init__(self, dct):
self._dct = dct
def __getitem__(self, key):
value = self._dct.get(key, self._NOT_FOUND)
if value is not self._NOT_FOUND:
return value
for value in self._dct.values():
if key in value.aliases:
return value
raise KeyError(key)
def __iter__(self):
return iter(self._dct)
def __len__(self):
return len(self._dct)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._dct)
class Filter():
"""Match sequences of objects against a single filter"""
OPERATORS = {
'=' : operator.__eq__, '~' : operator.__contains__,
'>' : operator.__gt__, '<' : operator.__lt__,
'>=' : operator.__ge__, '<=' : operator.__le__,
'=~' : lambda a, b: re.search(b, a),
}
INVERT_CHAR = '!'
POSSIBLE_OPERATORS = tuple(itertools.chain.from_iterable((op, '!' + op)
for op in OPERATORS))
DEFAULT_FILTER = None
DEFAULT_OPERATOR = '~'
BOOLEAN_FILTERS = {}
COMPARATIVE_FILTERS = {}
@classmethod
def _resolve_alias(cls, name):
"""
Return real filter name or `name` if it does not resolve
"""
if not hasattr(cls, '_aliases'):
aliases = {}
for fspecs in (cls.BOOLEAN_FILTERS, cls.COMPARATIVE_FILTERS):
for fname,f in fspecs.items():
for a in f.aliases:
if a in aliases:
raise RuntimeError('Multiple aliases: %r' % (a,))
else:
aliases[a] = fname
cls._aliases = aliases
if name is None:
name = ''
return cls._aliases.get(name.strip(), name)
@classmethod
def _get_filter_spec(cls, name):
"""
Get filter spec by `name`
Raise ValueError on error
"""
fspec = cls.BOOLEAN_FILTERS.get(name)
if fspec is not None:
return fspec
fspec = cls.COMPARATIVE_FILTERS.get(name)
if fspec is not None:
return fspec
if name:
raise ValueError('Invalid filter name: %r' % (name,))
else:
raise ValueError('No filter expression given')
@classmethod
def _make_filter(cls, name, op, user_value, invert):
"""
Return filter function, needed keys and invert
Filter function takes a value and returns whether it matches
`user_value`.
Filter function and needed keys are both `None` if everything is
matched.
Raise ValueError on error
"""
# Ensure value is wanted by filter, compatible to operator and of proper type
user_value = cls._validate_user_value(name, op, user_value)
log.debug(' Validated user_value: %r', user_value)
fspec = cls._get_filter_spec(name)
if fspec.type is BOOLEAN:
return (fspec.filter_function, fspec.needed_keys, invert)
elif fspec.type is COMPARATIVE:
return fspec.make_filter(cls.OPERATORS.get(op), user_value, invert)
@classmethod
def _validate_user_value(cls, name, op, user_value):
"""
Ensure that the `name`, `op`, and `user_value` make sense in conjunction
Return user value as correct type (e.g. `int`) for filter `name`
Raise ValueError if anything smells funky
"""
log.debug(' Validating user value: name=%r, op=%r, user_value=%r',
name, op, user_value)
if name in cls.BOOLEAN_FILTERS:
# log.debug('%r is a valid boolean filter: %r', name, cls.BOOLEAN_FILTERS[name])
if user_value:
raise ValueError('Boolean filter does not take a value: %s' % (name,))
elif op:
raise ValueError('Boolean filter does not take an operator: %s' % (name,))
if op is None or user_value is None:
# Filter `name` could still be (ab)used as boolean filter
return None
fspec = cls.COMPARATIVE_FILTERS.get(name)
if fspec is None:
if name:
raise ValueError('Invalid filter name: %r' % (name,))
else:
raise ValueError('No filter expression given')
# Convert user_value to proper type
if type(user_value) is not fspec.value_type:
log.debug(' Converting %r to %r', user_value, fspec.value_type)
try:
user_value = fspec.value_convert(user_value)
except ValueError:
raise ValueError('Invalid value for filter %r: %r' % (name, user_value))
# In case of regex operator, compile user_value
if op == '=~':
try:
user_value = re.compile(user_value)
except re.error as e:
raise ValueError('Invalid regular expression: %s: %s' % (str(e).capitalize(), user_value))
else:
# Test if target_type supports operator
try:
log.debug('Trying %r(%r [%r], %r [%r])',
cls.OPERATORS[op], user_value, type(user_value), user_value, type(user_value))
cls.OPERATORS[op](user_value, user_value)
except TypeError:
raise ValueError('Invalid operator for filter %r: %s' % (name, op))
return user_value
@classmethod
def _parse_inverter(cls, string, invert):
if not string:
return string, invert
# Find INVERT_CHAR at start or end of string
parts = cliparser.tokenize(string.strip(), delims=(cls.INVERT_CHAR,), escapes=('\\',), quotes=())
if cls.INVERT_CHAR in parts:
if parts and parts[0] == cls.INVERT_CHAR:
parts.pop(0)
invert = not invert
if parts and parts[-1] == cls.INVERT_CHAR:
parts.pop(-1)
invert = not invert
return ''.join(parts), invert
else:
# Return string unchanged
return string, invert
def __init__(self, filter_str=''):
# name: Name of filter (user-readable string)
# invert: Whether to invert filter (bool)
# op: Comparison operator as string (see OPERATORS)
# user_value: User-given value that is matched against items
# The *_raw variables contain original quotes and backslashes.
name_raw, op_raw, user_value_raw, invert = (None, None, None, False)
log.debug('Parsing %r', filter_str)
parts = cliparser.tokenize(filter_str, maxdelims=1, delims=self.OPERATORS, escapes=('\\',))
log.debug('Parts: %r', parts)
if len(parts) == 3:
name_raw, op_raw, user_value_raw = parts
elif len(parts) == 2:
if parts[0] in self.OPERATORS:
op_raw, user_value_raw = parts
name_raw = self.DEFAULT_FILTER
elif parts[1] in self.OPERATORS:
name_raw, op_raw = parts
else:
raise ValueError('Malformed filter expression: %r' % (filter_str,))
elif len(parts) == 1:
if parts[0] in self.OPERATORS:
op_raw = parts[0]
else:
name_raw = parts[0]
else:
raise ValueError('Malformed filter expression: %r' % (filter_str,))
name_raw, invert = self._parse_inverter(name_raw, invert)
log.debug('Parsed %r into raw: name=%r, invert=%r, op=%r, user_value=%r',
filter_str, name_raw, invert, op_raw, user_value_raw)
# Remove all special characters (backslashes, quotes)
name, op, user_value = map(lambda x: None if x is None else cliparser.plaintext(x),
(name_raw, op_raw, user_value_raw))
log.debug(' Plaintext: name=%r, invert=%r, op=%r, user_value=%r',
name, invert, op, user_value)
name = self._resolve_alias(name)
log.debug(' Resolved alias: name=%r, op=%r, user_value=%r', name, op, user_value)
if not name:
name = self.DEFAULT_FILTER
log.debug(' Falling back to default filter: %r', name)
try:
log.debug(' Getting filter spec: name=%r, op=%r, user_value=%r', name, op, user_value)
# Get filter spec by `name`
filter_func, needed_keys, invert = self._make_filter(name, op, user_value, invert)
except ValueError:
# Filter spec lookup failed
if self.DEFAULT_FILTER and user_value is op is None:
# No `user_value` or `op` given - use the first part of the
# filter expression (normally the filter name) as `user_value`
# for DEFAULT_FILTER.
name, op, user_value = self.DEFAULT_FILTER, self.DEFAULT_OPERATOR, name
log.debug(' Using name as value for default filter: name=%r, op=%r, user_value=%r',
name, op, user_value)
filter_func, needed_keys, invert = self._make_filter(name, op, user_value, invert)
else:
# No DEFAULT_FILTER is set, so we can't default to it
raise
log.debug(' Final filter: name=%r, invert=%r, op=%r, user_value=%r',
name, invert, op, user_value)
self._filter_func = filter_func
self._needed_keys = needed_keys
self._name, self._invert, self._op, self._user_value = name, invert, op, user_value
self._hash = hash((name, invert, op, user_value))
def apply(self, objs, invert=False, key=None):
"""Yield matching objects or `key` of each matching object"""
invert = self._invert ^ bool(invert) # xor
is_wanted = self._filter_func
if is_wanted is None:
if invert:
# This filter matches nothing
yield from ()
else:
# This filter matches everything
if key is None:
yield from objs
else:
for obj in objs:
yield obj[key]
else:
if key is None:
for obj in objs:
if bool(is_wanted(obj)) ^ invert:
yield obj
else:
for obj in objs:
if bool(is_wanted(obj)) ^ invert:
yield obj[key]
def match(self, obj):
"""Return True if `obj` matches, False otherwise"""
is_wanted = self._filter_func
if is_wanted is None:
# This filter matches everything/nothing
return not self._invert
else:
return bool(is_wanted(obj)) ^ self._invert
def __str__(self):
if self._name is None:
return self.DEFAULT_FILTER or ''
elif self._op is None:
return ('!' if self._invert else '') + self._name
else:
name = self._name if self._name != self.DEFAULT_FILTER else ''
op = ('!' if self._invert else '') + self._op
user_value = self._user_value
if user_value is None:
return name + op
else:
val = str(user_value)
if val == '':
val = "''"
elif len(val) == 1:
val = cliparser.escape(val, delims=(' ', '&', '|'), quotes=("'", '"'))
else:
val = cliparser.quote(val, delims=(' ', '&', '|'), quotes=("'", '"'))
return name + op + val
@property
def needed_keys(self):
return self._needed_keys
@property
def match_everything(self):
return not self._filter_func
@property
def inverted(self):
return self._invert
def __eq__(self, other):
if isinstance(other, type(self)):
for attr in ('_name', '_user_value', '_invert', '_op'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
else:
return NotImplemented
def __repr__(self):
return '%s(%r)' % (type(self).__name__, str(self))
def __hash__(self):
return self._hash
# The filter specs are specified on the Filter subclasses in each module, but we
# only want to export the classes derived from FilterChain, so this metalcass
# grabs attributes that are missing from FilterChain from it's 'filterclass'
# attribute.
class _forward_attrs(type):
def __getattr__(cls, name):
attr = getattr(cls.filterclass, name)
setattr(cls, name, attr)
return attr
class FilterChain(metaclass=_forward_attrs):
"""One or more filters combined with AND and OR operators"""
filterclass = NotImplemented
def __init__(self, filters=''):
if not isinstance(self.filterclass, type) or not issubclass(self.filterclass, Filter):
raise RuntimeError('Attribute "filterclass" must be set to a Filter subclass')
if isinstance(filters, str): # Because str is also instance of abc.Sequence
pass
elif isinstance(filters, abc.Sequence) and all(isinstance(f, str) for f in filters):
filters = '|'.join(filters)
elif isinstance(filters, (type(self), self.filterclass)):
filters = str(filters)
elif not isinstance(filters, str):
raise ValueError('Filters must be string or sequence of strings, not %s: %r'
% (type(filters).__name__, filters))
self._filterchains = ()
# Split `filters` at boolean operators
parts = cliparser.tokenize(filters, delims=('&', '|'))
if len(parts) > 0 and parts[0]:
if parts[0] in ('&', '|'):
raise ValueError("Filter can't start with operator: %r" % (parts[0],))
elif parts[-1] in ('&', '|'):
raise ValueError("Filter can't end with operator: %r" % (parts[-1],))
# The filter chain is represented by a tuple of tuples. Each inner
# tuple combines filters with AND. The outer tuple combines the
# inner tuples with OR.
filters = []
ops = []
expect = 'filter'
for i,part in enumerate(parts):
if expect == 'filter':
if part not in '&|':
f = self.filterclass(part)
if f.match_everything:
# One catch-all filter is the same as no filters
filters = [f]
ops.clear()
break
else:
filters.append(f)
expect = 'operator'
continue
elif expect == 'operator' and part in '&|':
if part in '&|':
ops.append(part)
expect = 'filter'
continue
raise ValueError('Consecutive operators: {!r}'.format(''.join(parts[i - 2 : i + 2])))
fchain = [[]]
for filter,op in itertools.zip_longest(filters, ops):
fchain[-1].append(filter)
if op == '|':
fchain.append([])
log.debug('Chained %r and %r to %r', filters, ops, fchain)
self._filterchains = tuple(tuple(x) for x in fchain)
def apply(self, objects):
"""Yield matching objects from iterable `objects`"""
chains = self._filterchains
if chains:
for obj in objects:
if any(all(f.match(obj) for f in AND_chain)
for AND_chain in chains):
yield obj
else:
yield from objects
def match(self, obj):
"""Whether `obj` matches this filter chain"""
# All filters in an AND_chain must match for the AND_chain to
# match. At least one AND_chain must match.
chains = self._filterchains
if not chains:
return True
else:
return any(all(f.match(obj) for f in AND_chain)
for AND_chain in chains)
@property
def needed_keys(self):
"""The object keys needed for filtering"""
keys = set()
for chain in self._filterchains:
for filter in chain:
keys.update(filter.needed_keys)
return tuple(keys)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
else:
# Compare sets because order doesn't matter (foo&bar|baz is the
# same as baz|bar&foo). Use frozensets because sets are not
# hashable.
self_fc_sets = set(frozenset(x) for x in self._filterchains)
other_fc_sets = set(frozenset(x) for x in other._filterchains)
return self_fc_sets == other_fc_sets
def __str__(self):
if len(self._filterchains) < 1:
return ''
else:
OR_chains = []
for AND_chain in self._filterchains:
OR_chains.append('&'.join(str(f) for f in AND_chain))
return '|'.join(OR_chains)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, str(self))
def __and__(self, other):
cls = type(self)
if not isinstance(other, cls):
return NotImplemented
else:
return cls(str(self) + '&' + str(other))
def __or__(self, other):
cls = type(self)
if not isinstance(other, cls):
return NotImplemented
else:
return cls(str(self) + '|' + str(other))
|
gpl-3.0
| 6,356,354,278,953,972,000
| 37.407917
| 106
| 0.540444
| false
| 4.271631
| false
| false
| false
|
sajeeshcs/nested_quota_final
|
nova/tests/unit/virt/libvirt/test_vif.py
|
1
|
48234
|
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from lxml import etree
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface=None,
vlan=99)
network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br0',
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
bridge_interface=None,
vlan=99)
vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'ovs_hybrid_plug': True,
'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid=None)
vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid='aaa')
vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
details={
'port_filter': True,
'ovs_hybrid_plug': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0',
vlan=99)
vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_PROFILEID:
'MyPortProfile'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_macvtap = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_MACVTAP,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: '100'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff"))
network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0')
network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
label=None,
bridge=None,
subnets=[subnet_bridge_4],
interface='eth0')
vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_mlnx,
type=network_model.VIF_TYPE_MLNX_DIRECT,
devname='tap-xxx-yyy-zzz')
vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_mlnx,
type=network_model.VIF_TYPE_MLNX_DIRECT,
details={'physical_network':
'fake_phy_network'},
devname='tap-xxx-yyy-zzz')
vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_IOVISOR,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
}
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
'quota:vif_inbound_average': '100',
'quota:vif_outbound_average': '10',
'quota:vif_inbound_burst': '300',
'quota:vif_outbound_burst': '30'
}
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.flags(allow_same_net_traffic=True)
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def _get_node(self, xml):
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
return ret[0]
def _assertMacEquals(self, node, vif):
mac = node.find("mac").get("address")
self.assertEqual(mac, vif['address'])
def _assertTypeEquals(self, node, type, attr, source, br_want,
prefix=None):
self.assertEqual(node.get("type"), type)
br_name = node.find(attr).get(source)
if prefix is None:
self.assertEqual(br_name, br_want)
else:
self.assertTrue(br_name.startswith(prefix))
def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
br_want=None, size=0, prefix=None):
ret = node.findall("filterref")
self.assertEqual(len(ret), size)
self._assertTypeEquals(node, type, attr, source, br_want,
prefix)
self._assertMacEquals(node, vif)
def _assertModel(self, xml, model_want=None, driver_want=None):
node = self._get_node(xml)
if model_want is None:
ret = node.findall("model")
self.assertEqual(len(ret), 0)
else:
model = node.find("model").get("type")
self.assertEqual(model, model_want)
if driver_want is None:
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
else:
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
def _assertTypeAndPciEquals(self, node, type, vif):
self.assertEqual(node.get("type"), type)
address = node.find("source").find("address")
addr_type = address.get("type")
self.assertEqual("pci", addr_type)
pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
'domain': address.get("domain")[2:],
'bus': address.get("bus")[2:],
'slot': address.get("slot")[2:],
'func': address.get("function")[2:]}
pci_slot_want = vif['profile']['pci_slot']
self.assertEqual(pci_slot, pci_slot_want)
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
return conf
def _get_instance_xml(self, driver, vif, image_meta=None, flavor=None):
if flavor is None:
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=1,
root_gb=0,
ephemeral_gb=0,
swap=0,
extra_specs=dict(self.bandwidth),
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = self._get_conf()
nic = driver.get_config(self.instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
conf.add_device(nic)
return conf.to_xml()
def test_multiple_nics(self):
conf = self._get_conf()
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(use_virtio_for_bridges=False,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_model_kvm(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_kvm_qemu_custom(self):
for virt in ('kvm', 'qemu'):
self.flags(use_virtio_for_bridges=True,
virt_type=virt,
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
supported = (network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
for model in supported:
image_meta = {'properties': {'hw_vif_model': model}}
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta)
self._assertModel(xml, model)
def test_model_kvm_bogus(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
image_meta = {'properties': {'hw_vif_model': 'acme'}}
self.assertRaises(exception.UnsupportedHardware,
self._get_instance_xml,
d,
self.vif_bridge,
image_meta)
def _test_model_qemu(self, *vif_objs, **kw):
libvirt_version = kw.get('libvirt_version')
self.flags(use_virtio_for_bridges=True,
virt_type='qemu',
group='libvirt')
for vif_obj in vif_objs:
d = vif.LibvirtGenericVIFDriver()
if libvirt_version is not None:
d.libvirt_version = libvirt_version
xml = self._get_instance_xml(d, vif_obj)
doc = etree.fromstring(xml)
bandwidth = doc.find('./devices/interface/bandwidth')
self.assertNotEqual(bandwidth, None)
inbound = bandwidth.find('inbound')
self.assertEqual(inbound.get("average"),
self.bandwidth['quota:vif_inbound_average'])
self.assertEqual(inbound.get("peak"),
self.bandwidth['quota:vif_inbound_peak'])
self.assertEqual(inbound.get("burst"),
self.bandwidth['quota:vif_inbound_burst'])
outbound = bandwidth.find('outbound')
self.assertEqual(outbound.get("average"),
self.bandwidth['quota:vif_outbound_average'])
self.assertEqual(outbound.get("peak"),
self.bandwidth['quota:vif_outbound_peak'])
self.assertEqual(outbound.get("burst"),
self.bandwidth['quota:vif_outbound_burst'])
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
def test_model_qemu_no_firewall(self):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
self.vif_ovs,
)
def test_model_qemu_iptables(self):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_ovs,
self.vif_ivs,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
)
def test_model_xen(self):
self.flags(use_virtio_for_bridges=True,
virt_type='xen',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_generic_driver_none(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.vif_none)
def _check_bridge_driver(self, d, vif, br_want):
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_bridge, br_want, 1)
def test_generic_driver_bridge(self):
d = vif.LibvirtGenericVIFDriver()
self._check_bridge_driver(d,
self.vif_bridge,
self.vif_bridge['network']['bridge'])
def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, prefix=dev_prefix)
script = node.find("script").get("path")
self.assertEqual(script, "")
def test_unplug_ivs_ethernet(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
delete.side_effect = processutils.ProcessExecutionError
d.unplug_ivs_ethernet(None, self.vif_ovs)
def test_plug_ovs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy')],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)],
'create_ovs_vif_port': [mock.call('br0',
'qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'instance-uuid')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ovs_vif_port')
) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.plug_ovs_hybrid(self.instance, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
def test_unplug_ovs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=True),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ovs_vif_port')
) as (device_exists, execute, delete_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug_ovs_hybrid(None, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
execute.assert_has_calls(calls['execute'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
@mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
def _test_hw_veb_op(self, op, vlan, mock_get_vf_num, mock_get_ifname,
mock_execute):
mock_get_ifname.side_effect = ['eth1', 'eth13']
exit_code = [0, 2, 254]
port_state = 'up' if vlan > 0 else 'down'
calls = {
'get_ifname':
[mock.call(self.vif_macvtap['profile']['pci_slot'],
pf_interface=True),
mock.call(self.vif_macvtap['profile']['pci_slot'])],
'get_vf_num':
[mock.call(self.vif_macvtap['profile']['pci_slot'])],
'execute': [mock.call('ip', 'link', 'set', 'eth1',
'vf', 1, 'mac', self.vif_macvtap['address'],
'vlan', vlan,
run_as_root=True,
check_exit_code=exit_code),
mock.call('ip', 'link', 'set',
'eth13', port_state,
run_as_root=True,
check_exit_code=exit_code)]
}
op(None, self.vif_macvtap)
mock_get_ifname.assert_has_calls(calls['get_ifname'])
mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
mock_execute.assert_has_calls(calls['execute'])
def test_plug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(
d.plug_hw_veb,
self.vif_macvtap['details'][network_model.VIF_DETAILS_VLAN])
def test_unplug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(d.unplug_hw_veb, 0)
def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(linux_net, 'delete_ovs_vif_port')
) as (device_exists, delete_ovs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug_ovs_hybrid(None, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
def test_plug_ivs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
'_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
'qvovif-xxx-yyy')],
'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
run_as_root=True),
mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
run_as_root=True),
mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
run_as_root=True),
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True)],
'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'instance-uuid')]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
mock.patch.object(linux_net, 'create_ivs_vif_port')
) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.plug_ivs_hybrid(self.instance, self.vif_ivs)
device_exists.assert_has_calls(calls['device_exists'])
_create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
execute.assert_has_calls(calls['execute'])
create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
def test_unplug_ivs_hybrid(self):
calls = {
'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
'qvbvif-xxx-yyy', run_as_root=True),
mock.call('ip', 'link', 'set',
'qbrvif-xxx-yyy', 'down', run_as_root=True),
mock.call('brctl', 'delbr',
'qbrvif-xxx-yyy', run_as_root=True)],
'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
}
with contextlib.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, 'delete_ivs_vif_port')
) as (execute, delete_ivs_vif_port):
d = vif.LibvirtGenericVIFDriver()
d.unplug_ivs_hybrid(None, self.vif_ivs)
execute.assert_has_calls(calls['execute'])
delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug_ivs_hybrid(None, self.vif_ivs)
def test_unplug_iovisor(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz',
label='mylabel')
myvif = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=mynetwork)
d.unplug_iovisor(None, myvif)
@mock.patch('nova.network.linux_net.device_exists')
def test_plug_iovisor(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid',
'project_id': 'myproject'
}
d.plug_iovisor(instance, self.vif_ivs)
def test_unplug_mlnx_with_details(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
d.unplug_mlnx_direct(None, self.vif_mlnx_net)
execute.assert_called_once_with('ebrctl', 'del-port',
'fake_phy_network',
'ca:fe:de:ad:be:ef',
run_as_root=True)
def test_plug_mlnx_with_details(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
execute.assert_called_once_with('ebrctl', 'add-port',
'ca:fe:de:ad:be:ef',
'instance-uuid',
'fake_phy_network',
'mlnx_direct',
'eth-xxx-yyy-zzz',
run_as_root=True)
def test_plug_mlnx_no_physical_network(self):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
self.assertRaises(exception.NovaException,
d.plug_mlnx_direct,
self.instance,
self.vif_mlnx)
self.assertEqual(0, execute.call_count)
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver()
self._check_ivs_ethernet_driver(d,
self.vif_ivs,
"tap")
def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
vif, vif['devname'])
def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, "br0")
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_generic_ovs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.vif_ovs,
want_iface_id)
def test_generic_ivs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ivs['ovs_interfaceid']
self._check_ivs_virtualport_driver(d,
self.vif_ivs,
want_iface_id)
def test_ivs_plug_with_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs, br_want, 1)
def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ivs_filter_hybrid, br_want, 0)
def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_ivs_filter_direct['devname']
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs_filter_direct, br_want, 0)
def test_hybrid_plug_without_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs_hybrid['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_ovs_hybrid, br_want, 0)
def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def _check_neutron_hybrid_driver(self, d, vif, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, br_want, 1)
def test_generic_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ovs,
br_want)
def test_ivs_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ivs,
br_want)
def test_mlnx_direct_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_mlnx)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth-xxx-yyy-zzz")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_mlnx)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_midonet_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_midonet)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_hw_veb_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
self._assertMacEquals(node, self.vif_hw_veb)
vlan = node.find("vlan").find("tag").get("id")
vlan_want = self.vif_hw_veb["details"]["vlan"]
self.assertEqual(vlan, vlan_want)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
def test_hw_veb_driver_macvtap(self, mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_macvtap)
vlan = node.find("vlan")
self.assertIsNone(vlan)
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_ivs['devname']
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, br_want)
def test_generic_8021qbg_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbg)
node = self._get_node(xml)
self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
self._assertMacEquals(node, self.vif_8021qbg)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.vif_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
|
apache-2.0
| 7,805,289,528,933,048,000
| 46.103516
| 79
| 0.475432
| false
| 4.137061
| true
| false
| false
|
jeremy24/494-graph-algos
|
python/hw2/timeit.py
|
1
|
1308
|
from __future__ import print_function
import time
import os
from graph import Graph
from graph import make
from graph import GraphException
from graph import Matrix
def run(name):
graph = make( name )
ret = [0,0]
start = time.time()
graph.dfs(0)
ret[1] = (time.time()-start)
start = time.time()
graph.bfs(0)
ret[0] = (time.time()-start)
return ret
def go():
names = list()
bfs = list()
dfs = list()
for name in os.listdir("./graphs"):
names.append(name)
name = "./graphs/" + name
res = run(name)
bfs.append(res[0])
dfs.append(res[1])
for index in range(0, len(names)):
name = names[index]
b = bfs[index]
d = dfs[index]
first = "%s" % str(object=name).ljust(30, " ")
second = "%s" % str(object=b).rjust(18, " ")
third = "%s" % str(object=d).ljust(20, " ")
print("dfs: " + str(d) + " bfs: " + str(b))
if d > b:
print("dfs is faster on " + first + " by " + str(abs(b-d)) + " seconds")
else:
print("bfs is faster on " + first + " by " + str(abs(b-d)) + " seconds")
# print(first + " took " + second + " " + third)
go()
|
mit
| 3,221,412,540,978,325,000
| 17.818182
| 84
| 0.482416
| false
| 3.261845
| false
| false
| false
|
gnarula/eden_deployment
|
modules/s3db/asset.py
|
1
|
56243
|
# -*- coding: utf-8 -*-
""" Sahana Eden Assets Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3AssetModel",
"S3AssetHRModel",
"S3AssetTeamModel",
#"asset_rheader",
"asset_types",
"asset_log_status",
"asset_controller",
"asset_AssetRepresent",
)
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
ASSET_TYPE_VEHICLE = 1 # => Extra Tab(s) for Registration Documents, Fuel Efficiency
ASSET_TYPE_RADIO = 2 # => Extra Tab(s) for Radio Channels/Frequencies
ASSET_TYPE_TELEPHONE = 3 # => Extra Tab(s) for Contact Details & Airtime Billing
ASSET_TYPE_OTHER = 4 # => No extra Tabs
# To pass to global scope
asset_types = {"VEHICLE" : ASSET_TYPE_VEHICLE,
"RADIO" : ASSET_TYPE_RADIO,
"TELEPHONE" : ASSET_TYPE_TELEPHONE,
"OTHER" : ASSET_TYPE_OTHER,
}
ASSET_LOG_SET_BASE = 1
ASSET_LOG_ASSIGN = 2
ASSET_LOG_RETURN = 3
ASSET_LOG_CHECK = 4
ASSET_LOG_REPAIR = 5
ASSET_LOG_DONATED = 32
ASSET_LOG_LOST = 33
ASSET_LOG_STOLEN = 34
ASSET_LOG_DESTROY = 35
# To pass to global scope
asset_log_status = {"SET_BASE" : ASSET_LOG_SET_BASE,
"ASSIGN" : ASSET_LOG_ASSIGN,
"RETURN" : ASSET_LOG_RETURN,
"CHECK" : ASSET_LOG_CHECK,
"REPAIR" : ASSET_LOG_REPAIR,
"DONATED" : ASSET_LOG_DONATED,
"LOST" : ASSET_LOG_LOST,
"STOLEN" : ASSET_LOG_STOLEN,
"DESTROY" : ASSET_LOG_DESTROY,
}
# =============================================================================
class S3AssetModel(S3Model):
"""
Asset Management
"""
names = ("asset_asset",
"asset_item",
"asset_log",
"asset_asset_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
item_id = self.supply_item_id
item_entity_id = self.supply_item_entity_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
settings = current.deployment_settings
org_site_label = settings.get_org_site_label()
vehicle = settings.has_module("vehicle")
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
#--------------------------------------------------------------------------
# Assets
#
asset_type_opts = {ASSET_TYPE_VEHICLE : T("Vehicle"),
#ASSET_TYPE_RADIO : T("Radio"),
#ASSET_TYPE_TELEPHONE : T("Telephone"),
ASSET_TYPE_OTHER : T("Other"),
}
asset_condition_opts = {1: T("Good Condition"),
2: T("Minor Damage"),
3: T("Major Damage"),
4: T("Un-Repairable"),
5: T("Needs Maintenance"),
}
ctable = self.supply_item_category
itable = self.supply_item
supply_item_represent = self.supply_item_represent
asset_items_set = db((ctable.can_be_asset == True) & \
(itable.item_category_id == ctable.id))
tablename = "asset_asset"
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
item_entity_id,
Field("number",
label = T("Asset Number"),
),
# @ToDo: We could set this automatically based on Item Category
Field("type", "integer",
default = ASSET_TYPE_OTHER,
label = T("Type"),
represent = lambda opt: \
asset_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_type_opts),
readable = vehicle,
writable = vehicle,
),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(asset_items_set,
"supply_item.id",
supply_item_represent,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("kit", "boolean",
default = False,
label = T("Kit?"),
represent = lambda opt: \
(opt and [T("Yes")] or [NONE])[0],
# Enable in template if-required
readable = False,
writable = False,
),
organisation_id(requires=self.org_organisation_requires(
updateable=True,
#required=True
),
required = True,
script = '''
S3OptionsFilter({
'triggerName':'organisation_id',
'targetName':'site_id',
'lookupResource':'site',
'lookupPrefix':'org',
'lookupField':'site_id',
'lookupURL':S3.Ap.concat('/org/sites_for_org/'),
})''',
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = org_site_label,
ondelete = "RESTRICT",
readable = True,
writable = True,
represent = self.org_site_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Warehouse"),
# messages.AUTOCOMPLETE_HELP)),
),
Field("sn",
label = T("Serial Number"),
),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
Field("purchase_price", "double",
#default = 0.00,
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable = False,
writable = False,
),
# Populated onaccept of the log to make a component tab
person_id("assigned_to_id",
readable = False,
writable = False,
comment = self.pr_person_comment(child="assigned_to_id"),
),
# Populated onaccept of the log for reporting/filtering
Field("cond", "integer",
label = T("Condition"),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
#readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Asset"),
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
title_upload = T("Import Assets"),
label_list_button = T("List Assets"),
label_delete_button = T("Delete Asset"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset deleted"),
msg_list_empty = T("No Assets currently registered"))
asset_represent = asset_AssetRepresent(show_link=True)
# Reusable Field
asset_id = S3ReusableField("asset_id", "reference %s" % tablename,
label = T("Asset"),
ondelete = "CASCADE",
represent = asset_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "asset_asset.id",
asset_represent,
sort=True)),
sortby = "number",
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
list_fields = ["id",
"item_id$item_category_id",
"item_id",
"number",
#"type",
#"purchase_date",
(T("Assigned To"), "assigned_to_id"),
"organisation_id",
"site_id",
]
report_fields = ["number",
(T("Category"), "item_id$item_category_id"),
(T("Item"), "item_id"),
"organisation_id",
"site_id",
"cond",
]
text_fields = ["number",
"item_id$name",
#"item_id$category_id$name",
"comments",
]
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields.append(lfield)
list_fields.extend(("cond",
"comments"))
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
comment = T("You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets."),
#_class = "filter-search",
),
S3OptionsFilter("item_id$item_category_id",
),
S3OptionsFilter("organisation_id",
represent = "%(name)s",
hidden = True,
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("cond",
hidden = True,
),
]
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = [(T("Number of items"), "count(number)")],
defaults=Storage(cols = "location_id$%s" % levels[0], # Highest-level of hierarchy
fact = "count(number)",
rows = "item_id$item_category_id",
totals = True,
)
)
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
]
# Resource Configuration
configure(tablename,
# Open Tabs after creation
create_next = URL(c="asset", f="asset",
args=["[id]"]),
deduplicate = self.asset_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
mark_required = ["organisation_id"],
onaccept = self.asset_onaccept,
realm_components = ["log", "presence"],
report_options = report_options,
summary = summary,
super_entity = ("supply_item_entity", "sit_trackable"),
update_realm = True,
)
# Components
add_components(tablename,
asset_group = "asset_id",
asset_item = "asset_id",
asset_log = "asset_id",
asset_human_resource = "asset_id",
hrm_human_resource = {"link": "asset_human_resource",
"joinby": "asset_id",
"key": "human_resource_id",
"actuate": "hide",
},
vehicle_gps = "asset_id",
vehicle_vehicle = {"joinby": "asset_id",
"multiple": False,
},
)
# =====================================================================
# Asset Items
# - to allow building ad-hoc Kits
#
tablename = "asset_item"
define_table(tablename,
item_entity_id,
asset_id(ondelete="CASCADE"),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(asset_items_set,
"supply_item.id",
supply_item_represent,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("quantity", "integer", notnull=True,
default = 1,
label = T("Quantity"),
requires = IS_INT_IN_RANGE(1, 1000),
),
Field("sn",
label = T("Serial Number")),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL"),
s3_date("purchase_date",
label = T("Purchase Date")),
Field("purchase_price", "double",
#default=0.00,
represent=lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2)),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable=False,
writable=False),
s3_comments(comment=None),
*s3_meta_fields())
# =====================================================================
# Asset Log
#
asset_log_status_opts = {ASSET_LOG_SET_BASE : T("Base %(facility)s Set") % dict(facility = org_site_label),
ASSET_LOG_ASSIGN : T("Assigned"),
ASSET_LOG_RETURN : T("Returned"),
ASSET_LOG_CHECK : T("Checked"),
ASSET_LOG_REPAIR : T("Repaired"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
}
if auth.permission.format == "html":
# T isn't JSON serializable
site_types = auth.org_site_types
for key in site_types.keys():
site_types[key] = str(site_types[key])
site_types = json.dumps(site_types)
script = '''
S3OptionsFilter({
'triggerName':'organisation_id',
'targetName':'site_id',
'lookupPrefix':'org',
'lookupResource':'site',
'lookupField':'site_id',
'fncRepresent': function(record,PrepResult){
var InstanceTypeNice=%(instance_type_nice)s
return record.name+" ("+InstanceTypeNice[record.instance_type]+")"
}})''' % dict(instance_type_nice = site_types)
else:
script = None
tablename = "asset_log"
define_table(tablename,
asset_id(),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
asset_log_status_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_log_status_opts),
),
s3_datetime("datetime",
default = "now",
empty = False,
represent = "date",
),
s3_datetime("datetime_until",
label = T("Date Until"),
represent = "date",
),
person_id(label = T("Assigned To")),
Field("check_in_to_person", "boolean",
#label = T("Mobile"), # Relabel?
label = T("Track with this Person?"),
comment = DIV(_class="tooltip",
#_title="%s|%s" % (T("Mobile"),
_title="%s|%s" % (T("Track with this Person?"),
T("If selected, then this Asset's Location will be updated whenever the Person's Location is updated."))),
readable = False,
writable = False,
),
# The Organisation to whom the loan is made
organisation_id(readable = False,
widget = None,
writable = False,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
label = org_site_label,
#filterby = "site_id",
#filter_opts = auth.permitted_facilities(redirect_on_error=False),
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
#default = user.site_id if is_logged_in() else None,
readable = True,
writable = True,
empty = False,
represent = self.org_site_represent,
#widget = S3SiteAutocompleteWidget(),
script = script,
),
self.org_room_id(),
#location_id(),
Field("cancel", "boolean",
default = False,
label = T("Cancel Log Entry"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cancel Log Entry"),
T("'Cancel' will indicate an asset log entry did not occur")))
),
Field("cond", "integer", # condition is a MySQL reserved word
label = T("Condition"),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_condition_opts,
zero = "%s..." % T("Please select")),
),
person_id("by_person_id",
default = auth.s3_logged_in_person(), # This can either be the Asset controller if signed-out from the store
label = T("Assigned By"), # or the previous owner if passed on directly (e.g. to successor in their post)
comment = self.pr_person_comment(child="by_person_id"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ASSIGN = T("New Entry in Asset Log")
crud_strings[tablename] = Storage(
label_create = ADD_ASSIGN,
title_display = T("Asset Log Details"),
title_list = T("Asset Log"),
title_update = T("Edit Asset Log Entry"),
label_list_button = T("Asset Log"),
label_delete_button = T("Delete Asset Log Entry"),
msg_record_created = T("Entry added to Asset Log"),
msg_record_modified = T("Asset Log Entry updated"),
msg_record_deleted = T("Asset Log Entry deleted"),
msg_list_empty = T("Asset Log Empty"))
# Resource configuration
configure(tablename,
listadd = False,
list_fields = ["id",
"datetime",
"status",
"datetime_until",
"organisation_id",
"site_id",
"room_id",
"person_id",
#"location_id",
"cancel",
"cond",
"comments",
],
onaccept = self.asset_log_onaccept,
orderby = "asset_log.datetime desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(asset_asset_id = asset_id,
asset_represent = asset_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(asset_asset_id = lambda **attr: dummy("asset_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def asset_duplicate(item):
"""
Deduplication of Assets
"""
if item.tablename != "asset_asset":
return
table = item.table
data = item.data
number = data.get("number", None)
query = (table.number == number)
organisation_id = data.get("organisation_id", None)
if organisation_id:
query &= (table.organisation_id == organisation_id)
site_id = data.get("site_id", None)
if site_id:
query &= (table.site_id == site_id)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def asset_onaccept(form):
"""
After DB I/O
"""
if current.response.s3.bulk:
# Import or Sync
return
db = current.db
atable = db.asset_asset
form_vars = form.vars
kit = form_vars.get("kit", None)
site_id = form_vars.get("site_id", None)
if site_id:
stable = db.org_site
asset_id = form_vars.id
# Set the Base Location
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
asset_tracker.set_base_location(location_id)
if kit:
# Also populate location_id field in component items
aitable = db.asset_item
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Add a log entry for this
ltable = db.asset_log
ltable.insert(asset_id = asset_id,
status = ASSET_LOG_SET_BASE,
organisation_id = form_vars.get("organisation_id", None),
site_id = site_id,
cond = 1,
)
if kit:
# Empty any inappropriate fields
db(atable.id == asset_id).update(supplier_org_id = None,
purchase_date = None,
purchase_price = None,
purchase_currency = None,
)
else:
# Delete any component items
aitable = db.asset_item
ids = db(aitable.asset_id == asset_id).select(aitable.id).as_list()
if ids:
resource = current.s3db.resource("asset_item", id=ids)
resource.delete()
return
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onaccept(form):
"""
After DB I/O
"""
request = current.request
get_vars = request.get_vars
status = get_vars.get("status", None)
if not status:
if not current.response.s3.asset_import:
# e.g. Record merger or Sync
return
# Import
db = current.db
form_vars = form.vars
asset_id = form_vars.asset_id
status = int(form_vars.status)
if status == ASSET_LOG_ASSIGN:
# Only type supported right now
# @ToDo: Support more types
type == "person"
new = True
else:
# Interactive
form_vars = form.vars
status = int(form_vars.status or status)
db = current.db
ltable = db.asset_log
row = db(ltable.id == form_vars.id).select(ltable.asset_id,
limitby=(0, 1)
).first()
try:
asset_id = row.asset_id
except:
return
current_log = asset_get_current_log(asset_id)
type = get_vars.get("type", None)
log_time = current_log.datetime
current_time = form_vars.get("datetime", None).replace(tzinfo=None)
new = log_time <= current_time
if new:
# This is a current assignment
atable = db.asset_asset
aitable = db.asset_item
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
if status == ASSET_LOG_SET_BASE:
# Set Base Location
site_id = form_vars.get("site_id", None)
stable = db.org_site
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
asset_tracker.set_base_location(location_id)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
elif status == ASSET_LOG_ASSIGN:
if type == "person":
if form_vars.check_in_to_person:
asset_tracker.check_in(db.pr_person, form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
# @ToDo: Have these move when the person moves
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
location_id = asset_tracker.set_location(form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update main record for component
db(atable.id == asset_id).update(assigned_to_id=form_vars.person_id)
elif type == "site":
asset_tracker.check_in(db.org_site, form_vars.site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
elif type == "organisation":
site_id = form_vars.get("site_id", None)
if site_id:
asset_tracker.check_in(db.org_site, site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
# We can no longer track location
asset_tracker.check_out()
elif status == ASSET_LOG_RETURN:
# Set location to base location
location_id = asset_tracker.set_location(asset_tracker,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update condition in main record
db(atable.id == asset_id).update(cond=form_vars.cond)
return
# =============================================================================
class S3AssetHRModel(S3Model):
"""
Optionally link Assets to Human Resources
- useful for staffing a vehicle
"""
names = ("asset_human_resource",)
def model(self):
#T = current.T
#--------------------------------------------------------------------------
# Assets <> Human Resources
#
tablename = "asset_human_resource"
self.define_table(tablename,
self.asset_asset_id(empty = False),
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
class S3AssetTeamModel(S3Model):
"""
Optionally link Assets to Teams
"""
names = ("asset_group",)
def model(self):
#T = current.T
#--------------------------------------------------------------------------
# Assets <> Groups
#
tablename = "asset_group"
self.define_table(tablename,
self.asset_asset_id(empty = False),
self.pr_group_id(comment = None,
empty = False,
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
def asset_get_current_log(asset_id):
"""
Get the current log entry for this asset
"""
table = current.s3db.asset_log
query = (table.asset_id == asset_id) & \
(table.cancel == False) & \
(table.deleted == False)
# Get the log with the maximum time
asset_log = current.db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.organisation_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
organisation_id = asset_log.organisation_id,
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage()
# =============================================================================
def asset_log_prep(r):
"""
Called by Controller
"""
T = current.T
db = current.db
request = current.request
table = db.asset_log
if r.record:
asset = Storage(r.record)
else:
# This is a new record
asset = Storage()
table.cancel.readable = False
table.cancel.writable = False
# This causes an error with the dataTables paginate
# if used only in r.interactive & not also r.representation=="aadata"
if r.method != "read" and r.method != "update":
table.cancel.readable = False
table.cancel.writable = False
current_log = asset_get_current_log(asset.id)
if request.vars.status:
status = int(request.vars.status)
else:
status = 0
if status and status != "None":
field = table.status
field.default = status
field.readable = False
field.writable = False
elif current_log:
table.status.default = current_log.status
if current_log.organisation_id:
table.organisation_id.default = current_log.organisation_id
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent,
filterby = "organisation_id",
filter_opts = (current_log.organisation_id,))
crud_strings = current.response.s3.crud_strings.asset_log
if status == ASSET_LOG_SET_BASE:
crud_strings.msg_record_created = T("Base Facility/Site Set")
table.by_person_id.label = T("Set By")
table.site_id.writable = True
table.datetime_until.readable = False
table.datetime_until.writable = False
table.person_id.readable = False
table.person_id.writable = False
table.organisation_id.readable = True
table.organisation_id.writable = True
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent)
elif status == ASSET_LOG_RETURN:
crud_strings.msg_record_created = T("Returned")
table.person_id.label = T("Returned From")
table.person_id.default = current_log.person_id
table.site_id.readable = False
table.site_id.writable = False
elif status == ASSET_LOG_ASSIGN:
type = request.vars.type
# table["%s_id" % type].required = True
if type == "person":
crud_strings.msg_record_created = T("Assigned to Person")
table["person_id"].requires = IS_ONE_OF(db, "pr_person.id",
table.person_id.represent,
orderby="pr_person.first_name",
sort=True,
error_message="Person must be specified!")
table.check_in_to_person.readable = True
table.check_in_to_person.writable = True
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent))
elif type == "site":
crud_strings.msg_record_created = T("Assigned to Facility/Site")
elif type == "organisation":
crud_strings.msg_record_created = T("Assigned to Organization")
table.organisation_id.readable = True
table.organisation_id.writable = True
table.organisation_id.requires = IS_ONE_OF(db, "org_organisation.id",
table.organisation_id.represent,
orderby="org_organisation.name",
sort=True)
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent))
elif "status" in request.get_vars:
crud_strings.msg_record_created = T("Status Updated")
table.person_id.label = T("Updated By")
field = table.status
field.readable = True
field.writable = True
field.requires = IS_IN_SET({ASSET_LOG_CHECK : T("Check"),
ASSET_LOG_REPAIR : T("Repair"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
})
# =============================================================================
def asset_rheader(r):
""" Resource Header for Assets """
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
s3 = current.response.s3
NONE = current.messages["NONE"]
if record.type == ASSET_TYPE_VEHICLE:
STAFF = current.deployment_settings.get_hrm_staff_label()
tabs = [(T("Asset Details"), None, {"native": True}),
(T("Vehicle Details"), "vehicle"),
(STAFF, "human_resource"),
(T("Assign %(staff)s") % dict(staff=STAFF), "assign"),
(T("Check-In"), "check-in"),
(T("Check-Out"), "check-out"),
(T("GPS Data"), "gps"),
]
else:
tabs = [(T("Edit Details"), None)]
#elif record.type == s3.asset.ASSET_TYPE_RADIO:
# tabs.append((T("Radio Details"), "radio"))
#elif record.type == s3.asset.ASSET_TYPE_TELEPHONE:
# tabs.append((T("Telephone Details"), "phone"))
tabs.append((T("Log"), "log"))
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if current.request.controller == "vehicle":
func = "vehicle"
else:
func = "asset"
# @ToDo: Check permissions before displaying buttons
asset_action_btns = [
A(T("Set Base Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_SET_BASE)
),
_class = "action-btn",
)
]
current_log = asset_get_current_log(record.id)
status = current_log.status
#if record.location_id:
# A Base Site has been set
# Return functionality removed - as it doesn't set site_id & organisation_id in the logs
#if status == ASSET_LOG_ASSIGN:
# asset_action_btns += [ A( T("Return"),
# _href = URL(f=func,
# args = [record.id, "log", "create"],
# vars = dict(status = ASSET_LOG_RETURN)
# ),
# _class = "action-btn"
# )
# ]
if status < ASSET_LOG_DONATED:
# @ToDo: deployment setting to prevent assigning assets before returning them
# The Asset is available for assignment (not disposed)
asset_action_btns += [
A(T("Assign to Person"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "person")
),
_class = "action-btn",
),
A(T("Assign to Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "site")
),
_class = "action-btn",
),
A(T("Assign to Organization"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "organisation")
),
_class = "action-btn",
),
]
asset_action_btns += [
A(T("Update Status"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = None
),
_class = "action-btn",
),
]
table = r.table
ltable = s3db.asset_log
rheader = DIV(TABLE(TR(TH("%s: " % table.number.label),
record.number,
TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id)
),
TR(TH("%s: " % ltable.cond.label),
ltable.cond.represent(current_log.cond),
TH("%s: " % ltable.status.label),
ltable.status.represent(status),
),
TR(TH("%s: " % ltable.person_id.label),
ltable.person_id.represent(current_log.person_id),
TH("%s: " % ltable.site_id.label),
ltable.site_id.represent(current_log.site_id),
),
),
DIV(_style = "margin-top:5px", # @ToDo: Move to CSS
*asset_action_btns
),
rheader_tabs)
return rheader
return None
# =============================================================================
def asset_controller():
""" RESTful CRUD controller """
s3db = current.s3db
s3 = current.response.s3
# Pre-process
def prep(r):
# Location Filter
current.s3db.gis_location_filter(r)
if r.component_name == "log":
asset_log_prep(r)
return True
s3.prep = prep
# Import pre-process
def import_prep(data):
"""
Flag that this is an Import (to distinguish from Sync)
@ToDo: Find Person records from their email addresses
"""
current.response.s3.asset_import = True
return
# @ToDo: get this working
ctable = s3db.pr_contact
ptable = s3db.pr_person
resource, tree = data
elements = tree.getroot().xpath("/s3xml//resource[@name='pr_person']/data[@field='first_name']")
persons = {}
for element in elements:
email = element.text
if email in persons:
# Replace email with uuid
element.text = persons[email]["uuid"]
# Don't check again
continue
query = (ctable.value == email) & \
(ctable.pe_id == ptable.pe_id)
person = db(query).select(ptable.uuid,
limitby=(0, 1)
).first()
if person:
# Replace email with uuid
uuid = person.uuid
else:
# Blank it
uuid = ""
element.text = uuid
# Store in case we get called again with same value
persons[email] = dict(uuid=uuid)
s3.import_prep = import_prep
# Post-processor
def postp(r, output):
if r.interactive and r.method != "import":
script = "/%s/static/scripts/S3/s3.asset.js" % r.application
s3.scripts.append(script)
S3CRUD.action_buttons(r, deletable=False)
#if not r.component:
#s3.actions.append({"url" : URL(c="asset", f="asset",
# args = ["[id]", "log", "create"],
# vars = {"status" : eden.asset.asset_log_status["ASSIGN"],
# "type" : "person"}),
# "_class" : "action-btn",
# "label" : str(T("Assign"))})
return output
s3.postp = postp
output = current.rest_controller("asset", "asset",
rheader = asset_rheader,
)
return output
# =============================================================================
class asset_AssetRepresent(S3Represent):
""" Representation of Assets """
def __init__(self,
fields = ("number",), # unused
show_link = False,
translate = False,
multiple = False,
):
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
super(asset_AssetRepresent,
self).__init__(lookup="asset_asset",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for organisation rows, does a
left join with the parent organisation. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the organisation IDs
"""
db = current.db
s3db = current.s3db
table = s3db.asset_asset
itable = db.supply_item
btable = db.supply_brand
qty = len(values)
if qty == 1:
query = (table.id == values[0])
limitby = (0, 1)
else:
query = (table.id.belongs(values))
limitby = (0, qty)
query &= (itable.id == table.item_id)
rows = db(query).select(table.id,
table.number,
table.type,
itable.name,
btable.name,
left=btable.on(itable.brand_id == btable.id),
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the asset_asset Row
"""
# Custom Row (with the item & brand left-joined)
number = row["asset_asset.number"]
item = row["supply_item.name"]
brand = row.get("supply_brand.name", None)
if not number:
return self.default
represent = "%s (%s" % (number, item)
if brand:
represent = "%s, %s)" % (represent, brand)
else:
represent = "%s)" % represent
return s3_unicode(represent)
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (site_id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
type = row.get("asset_asset.type", None)
if type == 1:
return A(v, _href=URL(c="vehicle", f="vehicle", args=[k],
# remove the .aaData extension in paginated views
extension=""
))
k = s3_unicode(k)
return A(v, _href=self.linkto.replace("[id]", k) \
.replace("%5Bid%5D", k))
# END =========================================================================
|
mit
| 1,738,867,762,911,502,600
| 41.161169
| 178
| 0.406131
| false
| 5.070134
| false
| false
| false
|
samw3/PyTweeps
|
pytweeps.py
|
1
|
20531
|
# PyTweeps: Simple Python program to help manage your twitter followers.
# https://github.com/samw3/PyTweeps
import pkg_resources
import tweepy
import webbrowser
import shelve
import pprint
import sys
import traceback
import time
import collections
from datetime import datetime
from datetime import timedelta
from config import *
import io
import urllib2
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def initData(data):
# Set up the data shelf
if 'following' not in data.keys():
data['following'] = set()
if 'wasFollowing' not in data.keys():
data['wasFollowing'] = set()
if 'followers' not in data.keys():
data['followers'] = set()
if 'wasFollowedBy' not in data.keys():
data['wasFollowedBy'] = set()
if 'lastTweet' not in data.keys():
data['lastTweet'] = dict()
if 'followedOn' not in data.keys():
data['followedOn'] = dict()
if 'wasFollowingOn' not in data.keys():
data['wasFollowingOn'] = dict()
data.sync()
def follow(api, data, user):
api.create_friendship(user.id)
data['followedOn'][user.id] = datetime.now()
def authenticate(auth, data):
redirect_url = auth.get_authorization_url()
webbrowser.open(redirect_url)
try:
verifier = raw_input('Verifier:')
data['request_token'] = auth.request_token
# auth.set_request_token(auth.request_token.key, auth.request_token.secret)
try:
auth.get_access_token(verifier)
data['access_token_key'] = auth.access_token
data['access_token_secret'] = auth.access_token_secret
data.sync()
auth.set_access_token(data['access_token_key'], data['access_token_secret'])
except tweepy.TweepError:
print 'Error! Failed to get access token.'
except tweepy.TweepError:
print 'Error! Failed to get request token.'
def usageMessage():
print "Usage: python", sys.argv[0], "command [params]\n"
print "Commands:"
print " update"
print " Updates your list of followers and followed"
print " bury daysSinceLastTweet numberToUnfollow"
print " Remove any 'dead' tweeps. i.e. followers who no longer use twitter"
print " requite daysSinceFollowed numberToUnfollow"
print " Remove any tweeps who do not continue to follow you after daysSinceFollowed days"
print " shotgun user numTweeps "
print " Add numTweeps followers from a user. Doesn't follow previously followed users."
print " copycat user numTweeps"
print " Add numTweeps from the list of tweeps user is following. Doesn't follow previously followed users."
print " copykids numKids numTweeps"
print " Add numKids from *every* person you follow's following list. Stop after adding (approximately) numTweeps total."
print " ignore user"
print " Ignore a particular user, never try to follow them and unfollow if we are following."
print " follow user"
print " Follow a particular user, even if we retired them already."
print " unfollowers filename"
print " prints a list of unfollowers to filename"
def error(message):
usageMessage()
print "ERROR: %s\n" % message
sys.exit(-1)
def info(message):
print message
def update(api, data):
newUsers = 0
totalUsers = 0
stillFollowing = set()
for id in api.friends_ids():
stillFollowing.add(id)
if id not in data['following']:
newUsers += 1
totalUsers += 1
if id not in data['followedOn']:
data['followedOn'][id] = datetime.now()
data['wasFollowing'] |= data['following']
data['wasFollowing'] |= stillFollowing
removed = len(data['following'] - stillFollowing)
data['following'] = stillFollowing
noLongerFollowing = data['wasFollowing'] - stillFollowing
data.sync()
print "Following %d, new %d, removed %d" % (totalUsers, newUsers, removed)
newUsers = 0
totalUsers = 0
stillFollowedBy = set()
for id in api.followers_ids():
stillFollowedBy.add(id)
if id not in data['followers']:
newUsers += 1
totalUsers += 1
data['wasFollowedBy'] |= data['followers']
data['wasFollowedBy'] |= stillFollowedBy
removed = len(data['followers'] - stillFollowedBy)
data['followers'] = stillFollowedBy
noLongerFollowedBy = data['wasFollowedBy'] - stillFollowedBy
data.sync()
print "Followers %d, new %d, removed %d" % (totalUsers, newUsers, removed)
print "No Longer Following %d" % len(noLongerFollowing)
print "No Longer Followed by %d" % len(noLongerFollowedBy)
def copycat(api, data, copycatUser, numTweeps):
c = 0
x = 0
for f in tweepy.Cursor(api.friends, copycatUser).items():
x += 1
id = f.id
if id in data['wasFollowing']:
info("%d '%s' following or was following." % (x, f.screen_name))
elif id in data['wasFollowedBy']:
info("%d '%s' followed by or was followed." % (x, f.screen_name))
elif f.protected:
info("%d '%s' is protected." % (x, f.screen_name))
elif f.followers_count <= shotgunTargetMinFollowers:
info("%d '%s' not enough followers." % (x, f.screen_name))
elif f.friends_count <= shotgunTargetMinFollowing:
info("%d '%s' not following enough." % (x, f.screen_name))
elif f.description == "":
info("%d '%s' empty description." % (x, f.screen_name))
elif f.statuses_count <= shotgunTargetMinTweets:
info("%d '%s' not enough tweets." % (x, f.screen_name))
elif f.screen_name == username:
info("%d '%s' can't follow yourself!" % (x, f.screen_name))
else:
api.create_friendship(f.id)
c += 1
info("%d '%s' FOLLOWED(%d)." % (x, f.screen_name, c))
time.sleep(3)
if (c == numTweeps):
break;
return c
def main(argv):
pp = pprint.PrettyPrinter(indent=4)
print "\nPyTweeps v0.1 - using tweepy v%s\n" % pkg_resources.get_distribution('tweepy').version
if len(argv) == 0:
usageMessage()
sys.exit(-1)
data = shelve.open('pytweeps', writeback=True)
initData(data)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
if ('access_token_key' not in data.keys()) or ('access_token_secret' not in data.keys()):
authenticate(auth, data)
auth.set_access_token(data['access_token_key'], data['access_token_secret'])
api = tweepy.API(auth)
command = argv[0]
if command == "update":
update(api, data)
elif command == "bury":
# Check params
if len(argv) < 3:
error("Missing params daysSinceLastTweet or numberToUnfollow")
if not isInt(argv[1]):
error("daysSinceLastTweet is not an integer")
daysSinceLastTweet = int(argv[1])
if not isInt(argv[2]):
error("numberToUnfollow is not an integer")
numberToUnfollow = int(argv[2])
delay = 0
if len(argv) >= 4 and isInt(argv[3]):
delay = argv[3]
# death date is the cut off. if they haven't tweeted since then, bury them
cutoffDate = datetime.now() - timedelta(days=daysSinceLastTweet)
# Check the lastTweet cache, if their last tweet isn't after the cutoffDate don't bother checking against twitter
last = data['lastTweet']
lastKeys = last.keys()
toScan = set()
for f in data['following']:
if f in lastKeys:
if last[f] < cutoffDate:
toScan.add(f)
# else don't bother checking
else:
# not in cache, so check
toScan.add(f)
x = 0
numUnfollowed = 0
try:
for f in toScan:
tweets = api.user_timeline(f, count=1)
if len(tweets) == 0:
# Never tweeted? bury.
user = api.get_user(f)
if user.screen_name not in neverBury:
api.destroy_friendship(f)
print ""
info("Buried '%s' R.I.P. (No Tweets)" % user.screen_name)
numUnfollowed += 1
else:
lastTweet = tweets[0]
if (lastTweet.created_at < cutoffDate):
if lastTweet.user.screen_name not in neverBury:
api.destroy_friendship(f)
print ""
info("Buried '%s' R.I.P. (Last: %s)" % (
lastTweet.user.screen_name, unicode(lastTweet.created_at)))
numUnfollowed += 1
else:
data['lastTweet'][f] = lastTweet.created_at
data.sync()
if numUnfollowed == numberToUnfollow:
break
sys.stdout.write('.')
x += 1
if x % 100 == 0:
sys.stdout.write("[" + str(x) + "]")
sys.stdout.flush()
if delay > 0:
time.sleep(float(delay))
except tweepy.error.TweepError, e:
print ""
if e.message[0]['message'] == u'Rate limit exceeded':
info("Rate limit exceeded")
else:
print traceback.format_exc()
raise e
print ""
update(api, data)
elif command == "requite":
# Check params
if len(argv) < 3:
error("Missing params daysSinceFollowed or numberToUnfollow")
if not isInt(argv[1]):
error("daysSinceFollowed is not an integer")
daysSinceFollowed = int(argv[1])
if not isInt(argv[2]):
error("numberToUnfollow is not an integer")
numberToUnfollow = int(argv[2])
delay = 0
if len(argv) >= 4 and isInt(argv[3]):
delay = argv[3]
# death date is the cut off. if they haven't tweeted since then, bury them
cutoffDate = datetime.now() - timedelta(days=daysSinceFollowed)
# Check the wasFollowingOn cache, if their last tweet isn't after the cutoffDate don't bother checking against twitter
last = data['wasFollowingOn']
lastKeys = last.keys()
followedOn = data['followedOn']
followedOnKeys = followedOn.keys()
toScan = set()
for f in data['following']:
if f in lastKeys:
if last[f] < cutoffDate:
toScan.add(f)
# else don't bother checking
elif f in followedOnKeys:
if followedOn[f] < cutoffDate:
toScan.add(f)
else:
# doesn't have a followedOn date, so check
data['followedOn'][f] = datetime.now()
data.sync()
toScan.add(f)
print "Requiting %d tweeps. %d IDs to scan" % (numberToUnfollow, len(toScan))
x = 0
numUnfollowed = 0
me = api.me()
try:
for f in toScan:
try:
user = api.get_user(f)
except tweepy.error.TweepError, e:
if isinstance(e.message, collections.Iterable):
if e.message[0]['message'] == u'User not found.':
info("User not found, skipping...")
else:
print traceback.format_exc()
raise e
ref = api.show_friendship(source_id=f, target_id=me.id)
if ref[0].following:
# User follows me
data['wasFollowingOn'][f] = datetime.now()
data.sync()
else:
# User not following me
user = api.get_user(f)
if user.screen_name not in neverBury:
api.destroy_friendship(f)
print ""
info("Requited '%s' (Followed On: %s)" % (user.screen_name, unicode(data['followedOn'][f])))
numUnfollowed += 1
# else still has time to follow
if numUnfollowed == numberToUnfollow:
break
sys.stdout.write('.')
x += 1
if x % 100 == 0:
sys.stdout.write("[" + str(x) + "]")
sys.stdout.flush()
if delay > 0:
time.sleep(float(delay))
except tweepy.error.TweepError, e:
print ""
pp.pprint(e)
if isinstance(e.message, collections.Iterable):
if e.message[0]['message'] == u'Rate limit exceeded':
info("Rate limit exceeded")
else:
print traceback.format_exc()
raise e
else:
print traceback.format_exc()
raise e
print ""
update(api, data)
elif command == "shotgun":
if len(argv) != 3:
error("Missing params shotgun user or numTweeps")
shotgunUser = argv[1]
if not isInt(argv[2]):
error("numTweeps is not an integer")
numTweeps = int(argv[2])
info("Shotgunning '%s' for %d followers" % (shotgunUser, numTweeps))
c = 0
x = 0
try:
for f in tweepy.Cursor(api.followers, shotgunUser).items():
x += 1
id = f.id
if id in data['wasFollowing']:
info("%d '%s' following or was following." % (x, f.screen_name))
elif id in data['wasFollowedBy']:
info("%d '%s' followed by or was followed." % (x, f.screen_name))
elif f.protected:
info("%d '%s' is protected." % (x, f.screen_name))
elif f.followers_count <= shotgunTargetMinFollowers:
info("%d '%s' not enough followers." % (x, f.screen_name))
elif f.friends_count <= shotgunTargetMinFollowing:
info("%d '%s' not following enough." % (x, f.screen_name))
elif f.description == "":
info("%d '%s' empty description." % (x, f.screen_name))
elif f.statuses_count <= shotgunTargetMinTweets:
info("%d '%s' not enough tweets." % (x, f.screen_name))
elif f.screen_name == username:
info("%d '%s' can't follow yourself!" % (x, f.screen_name))
else:
try:
api.create_friendship(f.id)
c += 1
info("%d '%s' FOLLOWED(%d)." % (x, f.screen_name, c))
except tweepy.error.TweepError, e:
print ""
if e.message[0]['code'] == 162:
info("%d '%s' blocked you." % (x, f.screen_name))
api.destroy_friendship(f.id)
data['wasFollowing'].add(f.id)
else:
print traceback.format_exc()
raise e
time.sleep(3)
if (c == numTweeps):
break;
except tweepy.error.TweepError, e:
print ""
if e.message[0]['message'] == u'Rate limit exceeded':
info("Rate limit exceeded.")
else:
print traceback.format_exc()
raise e
update(api, data)
elif command == "copycat":
if len(argv) != 3:
error("Missing params copycat user or numTweeps")
copycatUser = argv[1]
if not isInt(argv[2]):
error("numTweeps is not an integer")
numTweeps = int(argv[2])
info("Copycatting '%s' for %d followers" % (copycatUser, numTweeps))
try:
copycat(api, data, copycatUser, numTweeps)
except tweepy.RateLimitError as err:
print ""
info("Rate limit exceeded")
except tweepy.error.TweepError, e:
print ""
print (e.api_code)
print traceback.format_exc()
raise e
update(api, data)
elif command == "copykids":
if len(argv) != 3:
error("Missing params numKids or numTweeps")
if not isInt(argv[1]):
error("numKids is not an integer")
numKids = int(argv[1])
if not isInt(argv[2]):
error("numTweeps is not an integer")
numTweeps = int(argv[2])
info("Copykidding %d follwers from each of your followers. %d followers total." % (numKids, numTweeps))
try:
c = 0
for f in tweepy.Cursor(api.followers).items():
info("********")
print("Copying %s's kids..." % (f.screen_name))
c += copycat(api, data, f.screen_name, numKids)
if (c >= numTweeps):
break;
except tweepy.RateLimitError as err:
print ""
info("Rate limit exceeded")
except tweepy.error.TweepError, e:
print ""
print (e.api_code)
print traceback.format_exc()
raise e
update(api, data)
elif command == "ignore":
if len(argv) != 2:
error("Missing params user")
user = api.get_user(argv[1])
api.destroy_friendship(user.id)
data['wasFollowing'].add(user.id)
print "'%s' ignored." % (user.screen_name)
elif command == "follow":
if len(argv) != 2:
error("Missing params user")
user = api.get_user(argv[1])
follow(api, data, user)
if (user.id in data['wasFollowing']):
data['wasFollowing'].remove(user.id)
print "'%s' FOLLOWED." % (user.screen_name)
elif command == "unfollow":
if len(argv) != 2:
error("Missing param fileName")
with io.open(argv[1], 'r', encoding='utf8') as f:
for line in f:
s = line.split("|",3)
if s[0] == 'x':
api.destroy_friendship(s[1])
print "Unfollowed", s[2]
elif command == "unfollowers":
if len(argv) != 2:
error("Missing param fileName")
old = []
ids = set()
try:
with io.open(argv[1], 'r', encoding='utf8') as f:
for line in f:
s = line.split("|",3)
old.append(s)
ids.add(int(s[1]))
except:
pass
print "Creating a list of unfollowers to %s" % argv[1]
me = api.me()
c = 0
with io.open(argv[1], 'a', encoding='utf8') as f:
for id in api.friends_ids():
print [id], id in ids
if id not in ids:
ref = api.show_friendship(source_id=id, target_id=me.id)
if not ref[0].following:
# User doesn't follow me
user = api.get_user(id)
desc = user.description.replace("\n",'').replace("\r",'')
try:
if user.url:
req = urllib2.urlopen(user.url)
url = req.url
else:
url = ""
except:
url = ""
f.write("|%s|%s|%s|%s|%s\n" % (id, user.screen_name, user.name, desc, url))
f.flush()
time.sleep(3)
c += 1
sys.stdout.write('.')
if c % 100 == 0:
sys.stdout.write("[" + str(c) + "]")
sys.stdout.flush()
else:
error("Unknown command '%s'" % command)
#print api.me().name
rate = api.rate_limit_status()
#pp.pprint(rate)
print ""
data.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-2.0
| 3,758,599,949,243,339,000
| 35.859964
| 132
| 0.509814
| false
| 3.972717
| false
| false
| false
|
3dfxsoftware/cbss-addons
|
invoice_report_per_journal/report/invoice_report_demo.py
|
1
|
2131
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: Luis Torres (luis_t@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp import pooler
from openerp.tools.translate import _
from openerp import tools
from openerp import tests
from openerp.osv import osv
from openerp import netsvc
import openerp
from report_webkit import webkit_report
import datetime
class invoice_report_demo_html(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(invoice_report_demo_html, self).__init__(
cr, uid, name, context=context)
self.localcontext.update({
})
webkit_report.WebKitParser('report.invoice.report.demo.webkit',
'account.invoice',
'addons/invoice_report_per_journal/report/invoice_report_demo.mako',
parser=invoice_report_demo_html)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gpl-2.0
| 633,159,658,367,753,600
| 39.980769
| 80
| 0.604411
| false
| 4.322515
| false
| false
| false
|
dataplumber/edge
|
src/main/python/libraries/edge/dateutility.py
|
2
|
1665
|
from datetime import date, datetime, timedelta
import dateutil.parser
import calendar
"""
Utility class for date and time conversion.
"""
class DateUtility(object):
RFC_822_GMT_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
@staticmethod
def convertTimeLongToIso(time):
isoTime = ''
try:
isoTime = datetime.utcfromtimestamp(float(time) / 1000).isoformat() + 'Z'
except ValueError:
pass
return isoTime
@staticmethod
def convertISOToUTCTimestamp(isoTime):
try:
#parse ISO date to datetime object
dt = dateutil.parser.parse(isoTime)
#return timestamp in milliseconds
return calendar.timegm(dt.utctimetuple()) * 1000
except:
return None
@staticmethod
def pastDateRFC822(hoursAgo):
return (datetime.utcnow() - timedelta(hours=hoursAgo)).strftime(DateUtility.RFC_822_GMT_FORMAT)
@staticmethod
def convertTimeLongToRFC822(time):
return DateUtility.convertTimeLong(time, DateUtility.RFC_822_GMT_FORMAT)
@staticmethod
def convertTimeLong(time, format):
strTime = ''
try:
strTime = datetime.utcfromtimestamp(float(time) / 1000).strftime(format)
except ValueError:
pass
return strTime
@staticmethod
def convertISOTime(isoTime, format):
try:
#parse ISO date to datetime object
dt = dateutil.parser.parse(isoTime)
#return timestamp in specified format
return dt.strftime(format)
except:
return None
|
apache-2.0
| -214,491,296,849,177,860
| 28.210526
| 103
| 0.606006
| false
| 4.5
| false
| false
| false
|
MikeFair/www.gittip.com
|
gittip/csrf.py
|
1
|
6543
|
"""Cross Site Request Forgery middleware, borrowed from Django.
See also:
https://github.com/django/django/blob/master/django/middleware/csrf.py
https://docs.djangoproject.com/en/dev/ref/contrib/csrf/
https://github.com/zetaweb/www.gittip.com/issues/88
"""
import rfc822
import re
import time
import urlparse
#from django.utils.cache import patch_vary_headers
cc_delim_re = re.compile(r'\s*,\s*')
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if 'Vary' in response.headers:
vary_headers = cc_delim_re.split(response.headers['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response.headers['Vary'] = ', '.join(vary_headers + additional_headers)
#from django.utils.http import same_origin
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
from aspen import Response
from crypto import constant_time_compare, get_random_string
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
TOKEN_LENGTH = 32
TIMEOUT = 60 * 60 * 24 * 7 * 52
def _get_new_csrf_key():
return get_random_string(TOKEN_LENGTH)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake
# of the post processing middleware.
if len(token) > TOKEN_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
def _is_secure(request):
import gittip
return gittip.canonical_scheme == 'https'
def _get_host(request):
"""Returns the HTTP host using the request headers.
"""
return request.headers.get('X-Forwarded-Host', request.headers['Host'])
def inbound(request):
"""Given a Request object, reject it if it's a forgery.
"""
try:
csrf_token = request.headers.cookie.get('csrf_token')
csrf_token = '' if csrf_token is None else csrf_token.value
csrf_token = _sanitize_token(csrf_token)
# Use same token next time
request.context['csrf_token'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's
# available to the view.
request.context['csrf_token'] = _get_new_csrf_key()
# Assume that anything not defined as 'safe' by RC2616 needs protection
if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if _is_secure(request):
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.headers.get('Referer')
if referer is None:
raise Response(403, REASON_NO_REFERER)
# Note that get_host() includes the port.
good_referer = 'https://%s/' % _get_host(request)
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
raise Response(403, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
raise Response(403, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.line.method == "POST":
request_csrf_token = request.body.get('csrf_token', '')
if request_csrf_token == "":
# Fall back to X-CSRF-TOKEN, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
raise Response(403, REASON_BAD_TOKEN)
def outbound(response):
csrf_token = response.request.context.get('csrf_token')
# If csrf_token is unset, then inbound was never called, probaby because
# another inbound hook short-circuited.
if csrf_token is None:
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.headers.cookie['csrf_token'] = csrf_token
cookie = response.headers.cookie['csrf_token']
# I am not setting domain, because it is supposed to default to what we
# want: the domain of the object requested.
#cookie['domain']
cookie['path'] = '/'
cookie['expires'] = rfc822.formatdate(time.time() + TIMEOUT)
#cookie['httponly'] = "Yes, please." Want js access for this.
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
|
cc0-1.0
| -2,196,269,443,329,078,800
| 36.388571
| 81
| 0.643741
| false
| 3.810716
| false
| false
| false
|
ksang/error-extractor
|
lib/markdown.py
|
1
|
2193
|
'''
MarkDown format generator
'''
class MarkDown:
'convert raw text to markdown syntax'
def __init__(self):
self.escape_table = {"\\": "\\\\", "`": "\`",
"*": "\*", "_": "\_",
"{": "\{", "}": "\}",
"[": "\[", "]": "\]",
"(": "\(", ")": "\)",
"#": "\#", "+": "\+",
"-": "\-", ".": "\.",
"|": "\|"
}
def __escape(self, data):
return "".join(self.escape_table.get(c,c) for c in data)
def __convert_lines(self, text='', prefix='', suffix='', olist=False):
if type(text) is str:
if olist:
return '1. ' + self.__escape(text)
else:
return prefix + self.__escape(text) + suffix
elif type(text) is list:
for idx, t in enumerate(text):
if olist:
nt = str(idx+1) + '. ' + self.__escape(t)
else:
nt = prefix + self.__escape(t) + suffix
text[idx] = nt
return text
return ''
def text(self, text):
return self.__convert_lines(text)
def error(self, text):
return self.__convert_lines(text)
def title(self, text):
return self.__convert_lines(text, '##')
def subtitle(self, text):
return self.__convert_lines(text, '###')
def ssubtitle(self, text):
return self.__convert_lines(text, '####')
def bold(self, text):
return self.__convert_lines(text, '**', '**')
def line_breaker(self, count=1):
if count > 1:
ret = []
for i in range(0,count):
ret.append("-------------")
return ret
return "-------------"
def reference(self, text):
return self.__convert_lines(text, '>')
def ordered_list(self, data):
return self.__convert_lines(data, olist=True)
def unordered_list(self, data):
return self.__convert_lines(data, '- ')
|
mit
| 8,219,994,262,008,899,000
| 29.472222
| 74
| 0.401277
| false
| 4.153409
| false
| false
| false
|
levilucio/SyVOLT
|
ECore_Copier_MM/transformation-Large/HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation.py
|
1
|
5078
|
from core.himesis import Himesis
class HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation, self).__init__(name='HeoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eoperationlefteAnnotationsSolveRefEOperationEAnnotationEOperationEAnnotation"""
self["GUID__"] = 5816395996192583717
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 8044970359314201378
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 1048396254969054700
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 4558494274367220420
self.vs[3]["associationType"] = """eAnnotations"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 2115966609539548178
self.vs[4]["associationType"] = """eAnnotations"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 1458168688512188010
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EOperation"""
self.vs[5]["mm__"] = """EOperation"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 3498868833057656827
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 1307123802579665829
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EAnnotation"""
self.vs[7]["mm__"] = """EAnnotation"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 5438034355437875093
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 347179529733664915
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EOperation"""
self.vs[9]["mm__"] = """EOperation"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 2062346932891848348
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 7369516320927345833
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EAnnotation"""
self.vs[11]["mm__"] = """EAnnotation"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 8754100728367131831
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 7003937250653372044
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 1875949330034786489
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 5523630539087955496
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 2583131276534883053
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 1181219036459105099
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 1530653583095677969
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 40237161015443598
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 7359435342082954621
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 6720296362885197874
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 7435363414672850123
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 206401628991295002
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 3235173079800635441
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 7728551407519580789
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 98859355129756548
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 6740085100061687672
|
mit
| 2,488,468,065,536,911,000
| 48.300971
| 298
| 0.522844
| false
| 3.026222
| false
| false
| false
|
Travelport-Czech/apila
|
tasks/Lambda.py
|
1
|
8808
|
import zipfile
import tempfile
import shutil
import os
import os.path
import hashlib
import base64
import json
import logging
import subprocess
import re
import botocore
import tasks.name_constructor as name_constructor
import tasks.bototools as bototools
from tasks.Task import Task
class Lambda(Task):
"""Create a lambda function and upload the code from given folder"""
known_params = {
'name': 'function name',
'code': "path to the folder with function's source code",
'role': 'name of a role for the execution of the function',
'runtime': "name and a version of interpret for the execution i.e.: 'nodejs4.3'",
'handler': 'entrypoint to the function code',
'description': 'short description of the function',
'timeout': 'maximal time for the execution of the function',
'memory_size': 'amount of memory reserved for the execution of the function',
'publish': "I'm not sure, give always True ;-)",
'babelize': "flag if the source must be converted by babel (default True)",
'babelize_skip': "list of modules to be skipped by babel"
}
required_params = ('name', 'code', 'role', 'runtime', 'handler')
required_configs = ('user', 'branch')
task_name = 'lambda'
def __str__(self):
if self.name:
return self.name
else:
return 'Create a lambda function %s' % (self.params['description'] if 'description' in self.params else self.params['name'])
def get_files(self, path, rel_part):
out = []
for root, dirs, files in os.walk(os.path.join(path, rel_part)):
rel_root = root[len(path):].lstrip('/')
for filename in files:
out.append((os.path.join(root, filename), os.path.join(rel_root, filename)))
return sorted(out)
def create_zip(self, files):
zip_name = tempfile.mkstemp(suffix='.zip', prefix='lambda_')[1]
with zipfile.ZipFile(zip_name, 'w') as myzip:
for filedef in files:
os.utime(filedef[0], (946681200, 946681200)) # date '+%s' -d '2000-01-01'
myzip.write(filedef[0], filedef[1])
zip_data = open(zip_name, 'rb').read()
os.unlink(zip_name)
return zip_data
def run_npm_install(self, path):
cwd = os.getcwd()
os.chdir(path)
try:
npm_out = subprocess.check_output(['npm', 'install', '--production'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logging.error(e.output)
raise e
finally:
os.chdir(cwd)
def babelize(self, base_path, clean_dir, babelized_dir):
cwd = os.getcwd()
if os.path.exists('../node_modules/.bin/babel'):
os.chdir('..')
if not os.path.exists('node_modules/.bin/babel'):
os.chdir(base_path)
preset_base = os.getcwd()
try:
babel_out = subprocess.check_output(' '.join(['node_modules/.bin/babel', '--no-babelrc --presets', os.path.join(preset_base, 'node_modules', 'babel-preset-es2015-node4'), '--copy-files', '--out-dir', babelized_dir, clean_dir]), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
logging.error('cwd: '+os.getcwd())
logging.error(e.output)
raise e
finally:
os.chdir(cwd)
def clean_packages(self, files, path_to_remove):
r_shasum = re.compile(r'"_shasum"[^,]+,')
for filename, rel in files:
if filename.endswith('package.json'):
with open(filename) as fin:
text = fin.read()
new_text = r_shasum.sub('', text.replace(path_to_remove, '/tmp'))
with open(filename, 'w') as fout:
fout.write(new_text)
def prepare_zipped_code(self, code_path, babelize):
excluded_mods = self.params['babelize_skip'] if 'babelize_skip' in self.params else set()
work_dir = tempfile.mkdtemp(prefix='lambda_')
clean_dir = os.path.join(work_dir, 'clean')
os.mkdir(clean_dir)
shutil.copytree(os.path.join(code_path, 'app'), os.path.join(clean_dir, 'app'))
shutil.copy(os.path.join(code_path, 'package.json'), os.path.join(clean_dir, 'package.json'))
self.run_npm_install(clean_dir)
if babelize:
babelized_dir = os.path.join(work_dir, 'babelized')
babelized_app_dir = os.path.join(babelized_dir, 'app')
babelized_mod_dir = os.path.join(babelized_dir, 'node_modules')
clean_mod_dir = os.path.join(clean_dir, 'node_modules')
os.mkdir(babelized_dir)
os.mkdir(babelized_app_dir)
os.mkdir(babelized_mod_dir)
self.babelize(code_path, os.path.join(clean_dir, 'app'), babelized_app_dir)
for module_name in os.listdir(clean_mod_dir):
src = os.path.join(clean_mod_dir, module_name)
dest = os.path.join(babelized_mod_dir, module_name)
if module_name in excluded_mods:
shutil.copytree(src, dest)
else:
os.mkdir(dest)
self.babelize(code_path, src, dest)
files = self.get_files(babelized_app_dir, '') + self.get_files(babelized_dir, 'node_modules')
else:
files = self.get_files(os.path.join(clean_dir, 'app'), '') + self.get_files(clean_dir, 'node_modules')
self.clean_packages(files, work_dir)
files_to_zip = [file_name for file_name in files if not file_name[0].endswith('.SAMPLE')]
zip_data = self.create_zip(files_to_zip)
shutil.rmtree(work_dir)
return zip_data
def run(self, clients, cache):
client = clients.get('lambda')
iam_client = clients.get('iam')
function_name = name_constructor.lambda_name(self.params['name'], self.config['user'], self.config['branch'])
role_arn = bototools.get_role_arn(iam_client, self.params['role'])
description = (self.params['description'] if 'description' in self.params else '') + self.get_version_description()
try:
zip_data = self.prepare_zipped_code(self.params['code'], True if 'babelize' not in self.params else self.params['babelize'])
except Exception as e:
logging.exception(str(e))
return (False, str(e))
if role_arn is None:
return (False, "Required role '%s' not found" % self.params['role'])
try:
function_conf = client.get_function_configuration(FunctionName=function_name)
except botocore.exceptions.ClientError:
return self.create(client, cache, function_name, role_arn, zip_data, description)
if role_arn == function_conf['Role'] and \
self.params['runtime'] == function_conf['Runtime'] and \
self.params['handler'] == function_conf['Handler'] and \
(description == function_conf['Description']) and \
('timeout' not in self.params or self.params['timeout'] == function_conf['Timeout']) and \
('memory_size' not in self.params or self.params['memory_size'] == function_conf['MemorySize']):
result = ''
else:
self.update(client, function_name, role_arn, description)
result = self.CHANGED
sha256_sumator = hashlib.sha256()
sha256_sumator.update(zip_data)
sha256_sum = sha256_sumator.digest()
sha256_sum_encoded = base64.b64encode(sha256_sum)
if sha256_sum_encoded != function_conf['CodeSha256']:
client.update_function_code(FunctionName=function_name, ZipFile=zip_data, Publish=self.params['publish'] if 'publish' in self.params else None)
result = self.CHANGED
cache.put('lambda', function_name, function_conf['FunctionArn'])
return (True, result)
def update(self, client, function_name, role_arn, description):
lambda_def = {
'FunctionName': function_name,
'Runtime': self.params['runtime'],
'Role': role_arn,
'Handler': self.params['handler']
}
lambda_def['Description'] = description
if 'timeout' in self.params:
lambda_def['Timeout'] = self.params['timeout']
if 'memory_size' in self.params:
lambda_def['MemorySize'] = self.params['memory_size']
client.update_function_configuration(**lambda_def)
def create(self, client, cache, function_name, role_arn, zip_data, description):
lambda_def = {
'FunctionName': function_name,
'Runtime': self.params['runtime'],
'Role': role_arn,
'Handler': self.params['handler'],
'Code': {'ZipFile': zip_data}
}
lambda_def['Description'] = description
if 'timeout' in self.params:
lambda_def['Timeout'] = self.params['timeout']
if 'memory_size' in self.params:
lambda_def['MemorySize'] = self.params['memory_size']
if 'publish' in self.params:
lambda_def['Publish'] = self.params['publish']
response = client.create_function(**lambda_def)
cache.put('lambda', function_name, response['FunctionArn'])
return (True, self.CREATED)
def get_version_description(self):
manifest_path = os.path.join(self.params['code'], 'package.json')
if os.path.exists(manifest_path):
manifest = json.load(open(manifest_path))
if 'version' in manifest:
return ' (v%s)' % manifest['version']
return ''
|
mit
| 2,048,214,956,815,787,300
| 41.346154
| 271
| 0.655654
| false
| 3.377301
| false
| false
| false
|
gcallah/Indra
|
indraV1/models/markov_attempts/two_pop_markov_model.py
|
1
|
1534
|
"""
You can clone this file and its companion two_pop_m_run.py
to easily get started on a new two pop markov model.
It also is a handy tool to have around for testing
new features added to the base system. The agents
don't move. They have 50% chance of changing color
from red to blue, or from blue to red.
"""
import indra.two_pop_markov as itpm
R = 0
B = 1
STATE_MAP = { R: "Red", B: "Blue" }
class TestFollower(itpm.Follower):
"""
An agent that prints its neighbors in preact
and also jumps to an empty cell: defaut behavior
from our ancestor.
Attributes:
state: Red or Blue ... whichever it is the agent
will appear to be this on the scatter plot.
ntype: node type
next_state: the next color the agent will be
"""
def __init__(self, name, goal):
super().__init__(name, goal)
self.state = R
self.ntype = STATE_MAP[R]
self.next_state = None
def postact(self):
"""
Set our type to next_state.
"""
if self.next_state is not None and self.next_state != self.state:
# print("Setting state to " + str(self.next_state))
self.set_state(self.next_state)
self.next_state = None
return self.pos
def set_state(self, new_state):
"""
Set agent's new type.
"""
old_type = self.ntype
self.state = new_state
self.ntype = STATE_MAP[new_state]
self.env.change_agent_type(self, old_type, self.ntype)
|
gpl-3.0
| 7,503,514,805,912,917,000
| 26.890909
| 73
| 0.602999
| false
| 3.617925
| false
| false
| false
|
infojasyrc/client_dataws
|
client_dataws/lib/util/file_verificator.py
|
1
|
2208
|
'''
Created on Feb 02, 2013
@author: Jose Sal y Rosas
@contact: arturo.jasyrc@gmail.com
'''
import zlib
import hashlib
class Verificator(object):
def __init__(self):
pass
def set_parameters(self, path='', algorithm='crc32', blocksize=8192):
self.path = path
self.algorithm = algorithm
self.blocksize = blocksize
def set_algorithm(self, algorithm):
self.algorithm = algorithm
def set_file(self, path):
self.path = path
def set_block_size(self, blocksize):
self.blocksize = blocksize
def get_algorithm(self):
return self.algorithm
def get_file(self):
return self.path
def get_block_size(self):
return self.blocksize
def generatechecksum(self, path='', blocksize=8192):
resultado = 0
if path == '':
path = self.path
if blocksize == 8192:
blocksize = self.blocksize
if 'crc32' in self.algorithm:
resultado = self.executecrc(path, blocksize)
elif 'md5' in self.algorithm:
resultado = self.executemd5(path, blocksize)
return resultado
def executecrc(self, path, blocksize):
crctemp = 0
with open(path, 'rb') as f:
while True:
data = f.read(blocksize)
if not data:
break
crctemp = zlib.crc32(data, crctemp)
return crctemp
def executemd5(self, path, blocksize):
with open(path, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
def verify(self, path, checksum):
if checksum == self.generatechecksum(path):
return 'suceed'
else:
return 'failed'
if __name__ == "__main__":
path = '/home/developer/Documents/database/datos/iniciales/EW_Drift+Faraday/EW_Drift/d2012219/D2012213003.r'
obj = Verificator()
obj.set_parameters(path, 'md5')
checksum = obj.generatechecksum()
print checksum
print obj.verify(path, checksum)
|
mit
| 8,283,340,951,751,417,000
| 23.544444
| 112
| 0.567029
| false
| 3.992767
| false
| false
| false
|
itdxer/django-project-template
|
{{cookiecutter.project_name}}/apps/users/migrations/0001_initial.py
|
1
|
2579
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
},
bases=(models.Model,),
),
]
|
mit
| 6,700,627,795,742,435,000
| 68.702703
| 289
| 0.651028
| false
| 4.312709
| false
| false
| false
|
andrewchenshx/vnpy
|
vnpy/app/algo_trading/algos/dma_algo.py
|
1
|
2651
|
from vnpy.trader.constant import Offset, Direction, OrderType
from vnpy.trader.object import TradeData, OrderData, TickData
from vnpy.trader.engine import BaseEngine
from vnpy.app.algo_trading import AlgoTemplate
class DmaAlgo(AlgoTemplate):
""""""
display_name = "DMA 直接委托"
default_setting = {
"vt_symbol": "",
"direction": [Direction.LONG.value, Direction.SHORT.value],
"order_type": [
OrderType.MARKET.value,
OrderType.LIMIT.value,
OrderType.STOP.value,
OrderType.FAK.value,
OrderType.FOK.value
],
"price": 0.0,
"volume": 0.0,
"offset": [
Offset.NONE.value,
Offset.OPEN.value,
Offset.CLOSE.value,
Offset.CLOSETODAY.value,
Offset.CLOSEYESTERDAY.value
]
}
variables = [
"traded",
"vt_orderid",
"order_status",
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.direction = Direction(setting["direction"])
self.order_type = OrderType(setting["order_type"])
self.price = setting["price"]
self.volume = setting["volume"]
self.offset = Offset(setting["offset"])
# Variables
self.vt_orderid = ""
self.traded = 0
self.order_status = ""
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def on_tick(self, tick: TickData):
""""""
if not self.vt_orderid:
if self.direction == Direction.LONG:
self.vt_orderid = self.buy(
self.vt_symbol,
self.price,
self.volume,
self.order_type,
self.offset
)
else:
self.vt_orderid = self.sell(
self.vt_symbol,
self.price,
self.volume,
self.order_type,
self.offset
)
self.put_variables_event()
def on_order(self, order: OrderData):
""""""
self.traded = order.traded
self.order_status = order.status
if not order.is_active():
self.stop()
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
pass
|
mit
| 3,550,596,912,636,014,000
| 25.69697
| 67
| 0.501703
| false
| 4.035115
| false
| false
| false
|
smartczm/python-learn
|
Old-day01-10/s13-day5/get/day5/Atm/src/crontab.py
|
1
|
2023
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import json
import time
from config import settings
from src.backend import logger
def main():
card_list = os.listdir(settings.USER_DIR_FOLDER)
for card in card_list:
basic_info = json.load(open(os.path.join(settings.USER_DIR_FOLDER, card, 'basic_info.json')))
struct_time = time.localtime()
# 循环账单列表,为每月的欠款计息。并写入到当月账单中
for item in basic_info['debt']:
interest = item['total_debt'] * 0.0005
if basic_info['saving'] >= interest:
basic_info['saving'] -= interest
else:
temp = interest - basic_info['saving']
basic_info['balance'] -= temp
logger_obj = logger.get_logger(card, struct_time)
logger_obj.info("欠款利息 - %f - 备注:未还款日期%s;共欠款%f,未还款%f" % (interest, item['date'], item['total_debt'], item['balance_debt'],))
json.dump(
basic_info,
open(os.path.join(settings.USER_DIR_FOLDER, basic_info['card'], "basic_info.json"), 'w')
)
# 如果当前等于10号(9号之前)
# 当前余额为负值,则将值添加到账单列表中,开始计息,同时,本月可用额度恢复。
if struct_time.tm_mday == 11 and basic_info['credit'] > basic_info['balance']:
date = time.strftime("%Y-%m-%d")
dic = {'date': date,
"total_debt": basic_info['credit'] - basic_info['balance'],
"balance_debt": basic_info['credit'] - basic_info['balance'],
}
basic_info['debt'].append(dic)
# 恢复可用额度
basic_info['balance'] = basic_info['credit']
json.dump(
basic_info,
open(os.path.join(settings.USER_DIR_FOLDER, basic_info['card'], "basic_info.json"), 'w')
)
def run():
main()
|
gpl-2.0
| -718,927,813,982,822,900
| 34.134615
| 135
| 0.536946
| false
| 2.980424
| false
| false
| false
|
IsmoilovMuhriddin/allgo
|
rasp/diagnosis.py
|
1
|
3928
|
import signal
import time
import wiringpi as wp
from rasp.allgo_utils import PCA9685
from rasp.allgo_utils import ultrasonic as uls
LOW = 0
HIGH = 1
OUTPUT = wp.OUTPUT
INPUT = wp.INPUT
CAR_DIR_FW = 0
CAR_DIR_BK = 1
CAR_DIR_LF = 2
CAR_DIR_RF = 3
CAR_DIR_ST = 4
DIR_DISTANCE_ALERT = 20
preMillis = 0
keepRunning = 1
OUT = [5, 0, 1, 2, 3] # 5:front_left_led, 0:front_right_led, 1:rear_right_led, 2:rear_left_led, 3:ultra_trig
IN = [21, 22, 26, 23] # 21:left_IR, 22:center_IR, 26:right_IR, 23:ultra_echo
ULTRASONIC_TRIG = 3 # TRIG port is to use as output signal
ULTRASONIC_ECHO = 23 # ECHO port is to use as input signal
# An instance of the motor & buzzer
pca9685 = PCA9685()
#Ultrasonic ultra; # An instance of the ultrasonic sensor
ultra = uls(ULTRASONIC_TRIG,ULTRASONIC_ECHO)
# distance range: 2cm ~ 5m
# angular range: 15deg
# resolution: 3mm
"""
void setup();
void loop();
void checkUltra();
void intHandler(int dummy);
"""
def setup():
wp.wiringPiSetup() # Initialize wiringPi to load Raspbarry Pi PIN numbering scheme
"""
for(i=0; i<sizeof(OUT); i++){
pinMode(OUT[i], OUTPUT); // Set the pin as output mode
wp.digitalWrite(OUT[i], LOW); // Transmit HIGH or LOW value to the pin(5V ~ 0V)
}"""
for i in range(len(OUT)):
wp.pinMode(OUT[i],OUTPUT)
wp.digitalWrite(OUT[i], LOW)
for i in range(len(IN)):
wp.pinMode(IN[i],INPUT)
def check_ultra():
disValue=ultra.distance()
print("Distance:%.2f\t"%disValue)
def action(menu):
global curMillis
if menu==0:
pca9685.go_forward();
time.sleep(20);
pca9685.stop();
elif menu== 1:
pca9685.go_back();
time.sleep(20);
pca9685.stop();
elif menu== 2:
# frount left
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
time.sleep(20);
wp.digitalWrite(OUT[0], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[0], LOW);
elif menu== 3:
#// frount right
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
time.sleep(20);
wp.digitalWrite(OUT[1], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[1], LOW);
elif menu== 4:
#// rear left
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
time.sleep(20);
wp.digitalWrite(OUT[3], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[3], LOW);
elif menu== 5:
# rear right
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
time.sleep(20);
wp.digitalWrite(OUT[2], HIGH);
time.sleep(20);
wp.digitalWrite(OUT[2], LOW);
elif menu ==6:
#ultrasonic
check_ultra();
elif menu== 9:
pca9685.go_right();
time.sleep(5);
pca9685.stop();
elif menu== 10:
pca9685.go_left();
time.sleep(5);
pca9685.stop();
elif menu== 8:
print("Beeping for 2 seconds\n");
pca9685.on_buzz();
time.sleep(2);
pca9685.off_buzz();
elif menu== 11:
print("EXIT\n");
keepRunning = 0;
else:
print("Check the list again\n")
print("\n")
menu = -1
def loop():
"""// return the cu
time(el
time since your arduino started) in milliseconds(1/1000 second)"""
llinevalue = 0
clinevalue = 0
rlinevalue = 0
print 'This is a diagnostic program for your mobile robot.\n'
print '0: go foward\n1: go backward\n2: front left led\n3: frount right led\n4: rear left led\n5: rear right led\n6: ultrasonic\n7: IR\n8: buzzer\n9:go right\n10: go left\n11: Exit the program\n'
print('Please select one of them: ')
menu = int(input())
action(menu)
menu = -1
"""// obstacle detection and move to another derection.
void checkUltra(){
float disValue = ultra.ReadDista
timeter();
printf("ultrasonic: %f\n",disValue);
"""
def signal_handler(dummy):
print("SIGNAL INTERRUPT",dummy)
time.sleep(1000)
keepRunning = 0;
#sda
def main (**kwargs):
setup()
signal.signal(signal.SIGINT, signal_handler)
while keepRunning:
loop()
return 0
main()
|
mit
| -4,003,689,042,530,230,300
| 20.944134
| 196
| 0.647912
| false
| 2.550649
| false
| false
| false
|
ppppn/twitter-bot
|
ReplyAndFav.py
|
1
|
6678
|
#!/usr/bin/python
# coding: UTF-8
from tweepy.error import TweepError
import random
import re
from const import *
from words import *
from replies import replies
import datetime
import logging
from API import GetAPI
logging.basicConfig(level=LOGLEVEL)
api = None
# 説明
# 関数リスト
# FUNCTION_NAME(args) > Returns(SUCCESS, FAILED)
# FetchHomeTL() > (TIMELINE, False)
# FormattingAndTweetForReply(status, content) > (True, False)
# CheckAndReplyToSpecialWord(account_screen_name, status) > (True, False)
# CheckAndReplyToNormalTweet(status) > (True, False)
# CheckAndCreateFav(status) > (True, False)
def UpdateAndNotifyAccountInfo():
__name__ = "UpdateAndNotifyAccountInfo()"
global api
account = api.me()
try:
if not account.name == BOT_NAME:
api.update_status(UPDATE_MSG)
api.update_profile(name=BOT_NAME)
logging.info("%s: Successfully finished.", __name__)
except TweepError, e:
logging.error("%s: %s", __name__, e.reason)
def FetchHomeTL():
__name__ = "FetchHomeTL()"
global api
since_id = api.user_timeline()[0].id
logging.debug("%s: Last post id: %d", __name__, since_id)
try:
return api.home_timeline(since_id=since_id)
except TweepError, e:
logging.error("%s: %s", __name__, e.reason)
return False
def FormattingAndTweetForReply(status, content):
__name__ = "FormattingAndTweetForReply()"
global api
#ツイートを最終的に投稿される形にフォーマットし、投稿する
error_counter = 0
#{name}を相手の名前で置き換える
content = content.format(name = status.author.name)
#@hogehogeをつける
formatted_tweet = "@" + status.author.screen_name + " " + content
#投稿する
while error_counter < ERROR_LIMIT:
try:
api.update_status(formatted_tweet, in_reply_to_status_id = int(status.id))
logging.debug("%s: The following tweet was successfully posted> '%s'",
__name__, formatted_tweet)
return True
except TweepError, e:
logging.error(e.reason)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
def CheckAndReplyToSpecialWord(account_screen_name, status):
__name__ = "CheckAndReplyToSpecialWord()"
global api
error_counter = 0
#ぼっと宛のメンションに限定
if status.in_reply_to_screen_name == account_screen_name:
for special_word in special_words:
if re.search(special_word, status.text):
logging.debug("%s: The special word '%s' was detected in %s's post '%s'",
__name__, special_word, status.author.screen_name, status.text)
num_max_patterns = len(special_words[special_word]) - 1
while error_counter < ERROR_LIMIT:
random.seed()
selected_num = random.randint(0, num_max_patterns)
content = special_words[special_word][selected_num]
#重複投稿によるエラー防止のため時刻を追記
content += " (%s)"%str(datetime.datetime.today())
logging.debug("%s: Special word reply was generated> '%s'", __name__, content)
if FormattingAndTweetForReply(status, content):
return True
else:
logging.error("%s: Reselect", __name__)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
logging.debug("%s: No special word was founded in %s's post '%s'",
__name__, status.author.screen_name, status.text)
return False
else:
return False
def CheckAndReplyToNormalTweet(status):
__name__ = "CheckAndReplyToNormalTweet()"
global api
error_counter = 0
num_max_tw = len(replies) - 1
for word in reply_words:
if re.search(word, status.text):
logging.debug("%s: The reply word '%s' was detected in %s's post '%s'",
__name__, word, status.author.screen_name, status.text)
while error_counter < ERROR_LIMIT:
random.seed()
tw_num = random.randint(0, num_max_tw)
content = replies[tw_num].format(name=status.author.name)
logging.debug("%s: Normal word reply selected> '%s'", __name__, content)
if FormattingAndTweetForReply(status, content):
return True
else:
logging.error("%s: Reselect", __name__)
error_counter += 1
logging.error("%s: Failed to post %d times. Aborted.", __name__, ERROR_LIMIT)
return False
def CheckAndCreateFav(status):
__name__ = "CheckAndCreateFav()"
global api
if status.favorited == False:
error_counter = 0
for fav_word in fav_words:
if re.search(fav_word, status.text):
logging.debug("%s: Favorite word '%s' was detected in %s's post '%s'",
__name__, fav_word, status.author.screen_name, status.text)
while error_counter < ERROR_LIMIT:
try:
api.create_favorite(status.id)
logging.debug("%s: Successfully favorited %s's post> '%s'",
__name__, status.author.screen_name, status.text)
return True
except TweepError, e:
logging.error(e.reason)
error_counter += 1
logging.error("%s: Failed to create fav %d times. Aborted.",
__name__, ERROR_LIMIT)
return False
def main():
global api
api = GetAPI()
UpdateAndNotifyAccountInfo()
account_screen_name = api.me().screen_name
tw_counter = 0
fav_counter = 0
result = False
Timeline = FetchHomeTL()
contains_excluded_word = False
if Timeline == False:
logging.critical("Failed to fetch home timeline. All processes are aborted.")
else:
for status in Timeline:
contains_excluded_word = False
if status.author.screen_name == account_screen_name:
pass
#ぼっとがツイートしたものは対象外
else:
#excluded_wordに登録された単語が含まれている場合、処理しない
for excluded_word in excluded_words:
if re.search(excluded_word, status.text):
contains_excluded_word = True
if contains_excluded_word == False:
result = CheckAndReplyToSpecialWord(account_screen_name, status)
if result == False:
result = CheckAndReplyToNormalTweet(status)
if result == True:
tw_counter += 1
result = CheckAndCreateFav(status)
if result == True:
fav_counter += 1
logging.info("Reply: %d, Fav: %d", tw_counter, fav_counter)
if __name__ == "__main__":
main()
|
mit
| -1,916,938,151,522,885,000
| 32.989418
| 89
| 0.622821
| false
| 3.33541
| false
| false
| false
|
futureshocked/RaspberryPi-FullStack
|
Complete_Python2_app/lab_app.py
|
1
|
6719
|
from flask import Flask, request, render_template
import time
import datetime
import arrow
app = Flask(__name__)
app.debug = True # Make this False if you are no longer debugging
@app.route("/")
def hello():
return "Hello World!"
@app.route("/lab_temp")
def lab_temp():
import sys
import Adafruit_DHT
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 4)
if humidity is not None and temperature is not None:
return render_template("lab_temp.html",temp=temperature,hum=humidity)
else:
return render_template("no_sensor.html")
@app.route("/lab_env_db", methods=['GET']) #Add date limits in the URL #Arguments: from=2015-03-04&to=2015-03-05
def lab_env_db():
temperatures, humidities, timezone, from_date_str, to_date_str = get_records()
# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
time_adjusted_temperatures = []
time_adjusted_humidities = []
for record in temperatures:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_adjusted_temperatures.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])
for record in humidities:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_adjusted_humidities.append([local_timedate.format('YYYY-MM-DD HH:mm'), round(record[2],2)])
print "rendering lab_env_db.html with: %s, %s, %s" % (timezone, from_date_str, to_date_str)
return render_template("lab_env_db.html", timezone = timezone,
temp = time_adjusted_temperatures,
hum = time_adjusted_humidities,
from_date = from_date_str,
to_date = to_date_str,
temp_items = len(temperatures),
hum_items = len(humidities),
query_string = request.query_string, #This query string is used by the Plotly link
)
def get_records():
import sqlite3
from_date_str = request.args.get('from',time.strftime("%Y-%m-%d 00:00")) #Get the from date value from the URL
to_date_str = request.args.get('to',time.strftime("%Y-%m-%d %H:%M")) #Get the to date value from the URL
timezone = request.args.get('timezone','Etc/UTC');
range_h_form = request.args.get('range_h',''); #This will return a string, if field range_h exists in the request
range_h_int = "nan" #initialise this variable with not a number
print "REQUEST:"
print request.args
try:
range_h_int = int(range_h_form)
except:
print "range_h_form not a number"
print "Received from browser: %s, %s, %s, %s" % (from_date_str, to_date_str, timezone, range_h_int)
if not validate_date(from_date_str): # Validate date before sending it to the DB
from_date_str = time.strftime("%Y-%m-%d 00:00")
if not validate_date(to_date_str):
to_date_str = time.strftime("%Y-%m-%d %H:%M") # Validate date before sending it to the DB
print '2. From: %s, to: %s, timezone: %s' % (from_date_str,to_date_str,timezone)
# Create datetime object so that we can convert to UTC from the browser's local time
from_date_obj = datetime.datetime.strptime(from_date_str,'%Y-%m-%d %H:%M')
to_date_obj = datetime.datetime.strptime(to_date_str,'%Y-%m-%d %H:%M')
# If range_h is defined, we don't need the from and to times
if isinstance(range_h_int,int):
arrow_time_from = arrow.utcnow().replace(hours=-range_h_int)
arrow_time_to = arrow.utcnow()
from_date_utc = arrow_time_from.strftime("%Y-%m-%d %H:%M")
to_date_utc = arrow_time_to.strftime("%Y-%m-%d %H:%M")
from_date_str = arrow_time_from.to(timezone).strftime("%Y-%m-%d %H:%M")
to_date_str = arrow_time_to.to(timezone).strftime("%Y-%m-%d %H:%M")
else:
#Convert datetimes to UTC so we can retrieve the appropriate records from the database
from_date_utc = arrow.get(from_date_obj, timezone).to('Etc/UTC').strftime("%Y-%m-%d %H:%M")
to_date_utc = arrow.get(to_date_obj, timezone).to('Etc/UTC').strftime("%Y-%m-%d %H:%M")
conn = sqlite3.connect('/var/www/lab_app/lab_app.db')
curs = conn.cursor()
curs.execute("SELECT * FROM temperatures WHERE rDateTime BETWEEN ? AND ?", (from_date_utc.format('YYYY-MM-DD HH:mm'), to_date_utc.format('YYYY-MM-DD HH:mm')))
temperatures = curs.fetchall()
curs.execute("SELECT * FROM humidities WHERE rDateTime BETWEEN ? AND ?", (from_date_utc.format('YYYY-MM-DD HH:mm'), to_date_utc.format('YYYY-MM-DD HH:mm')))
humidities = curs.fetchall()
conn.close()
return [temperatures, humidities, timezone, from_date_str, to_date_str]
@app.route("/to_plotly", methods=['GET']) #This method will send the data to ploty.
def to_plotly():
import plotly.plotly as py
from plotly.graph_objs import *
temperatures, humidities, timezone, from_date_str, to_date_str = get_records()
# Create new record tables so that datetimes are adjusted back to the user browser's time zone.
time_series_adjusted_tempreratures = []
time_series_adjusted_humidities = []
time_series_temprerature_values = []
time_series_humidity_values = []
for record in temperatures:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_series_adjusted_tempreratures.append(local_timedate.format('YYYY-MM-DD HH:mm'))
time_series_temprerature_values.append(round(record[2],2))
for record in humidities:
local_timedate = arrow.get(record[0], "YYYY-MM-DD HH:mm").to(timezone)
time_series_adjusted_humidities.append(local_timedate.format('YYYY-MM-DD HH:mm')) #Best to pass datetime in text
#so that Plotly respects it
time_series_humidity_values.append(round(record[2],2))
temp = Scatter(
x=time_series_adjusted_tempreratures,
y=time_series_temprerature_values,
name='Temperature'
)
hum = Scatter(
x=time_series_adjusted_humidities,
y=time_series_humidity_values,
name='Humidity',
yaxis='y2'
)
data = Data([temp, hum])
layout = Layout(
title="Temperature and humidity in Peter's lab",
xaxis=XAxis(
type='date',
autorange=True
),
yaxis=YAxis(
title='Celcius',
type='linear',
autorange=True
),
yaxis2=YAxis(
title='Percent',
type='linear',
autorange=True,
overlaying='y',
side='right'
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='lab_temp_hum')
return plot_url
def validate_date(d):
try:
datetime.datetime.strptime(d, '%Y-%m-%d %H:%M')
return True
except ValueError:
return False
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
|
mit
| 111,837,944,110,198,480
| 37.843931
| 159
| 0.651883
| false
| 2.865245
| false
| false
| false
|
michaelhkw/incubator-impala
|
tests/comparison/query_profile.py
|
1
|
30490
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from logging import getLogger
from random import choice, randint, random, shuffle
from tests.comparison.db_types import (
Boolean,
Char,
Decimal,
Float,
Int,
TYPES,
Timestamp)
from tests.comparison.query import (
InsertClause,
InsertStatement,
Query,
StatementExecutionMode,
ValuesClause)
from tests.comparison.funcs import (
AnalyticAvg,
AnalyticCount,
AnalyticFirstValue,
AnalyticLag,
AnalyticLastValue,
AnalyticLead,
AnalyticMax,
AnalyticMin,
AnalyticSum,
And,
Coalesce,
Equals,
GreaterThan,
GreaterThanOrEquals,
If,
In,
IsDistinctFrom,
IsNotDistinctFrom,
IsNotDistinctFromOp,
LessThan,
LessThanOrEquals,
NotEquals,
NotIn,
Or,
WindowBoundary)
from tests.comparison.random_val_generator import RandomValGenerator
UNBOUNDED_PRECEDING = WindowBoundary.UNBOUNDED_PRECEDING
PRECEDING = WindowBoundary.PRECEDING
CURRENT_ROW = WindowBoundary.CURRENT_ROW
FOLLOWING = WindowBoundary.FOLLOWING
UNBOUNDED_FOLLOWING = WindowBoundary.UNBOUNDED_FOLLOWING
LOG = getLogger()
class DefaultProfile(object):
def __init__(self):
# Bounds are (min, max) values, the actual value used will be selected from the
# bounds and each value within the range has an equal probability of being selected.
self._bounds = {
'MAX_NESTED_QUERY_COUNT': (0, 2),
'MAX_NESTED_EXPR_COUNT': (0, 2),
'SELECT_ITEM_COUNT': (1, 5),
'WITH_TABLE_COUNT': (1, 3),
'TABLE_COUNT': (1, 2),
'ANALYTIC_LEAD_LAG_OFFSET': (1, 100),
'ANALYTIC_WINDOW_OFFSET': (1, 100),
'INSERT_VALUES_ROWS': (1, 10)}
# Below are interdependent weights used to determine probabilities. The probability
# of any item being selected should be (item weight) / sum(weights). A weight of
# zero means the item will never be selected.
self._weights = {
'SELECT_ITEM_CATEGORY': {
'AGG': 3,
'ANALYTIC': 1,
'BASIC': 10},
'TYPES': {
Boolean: 1,
Char: 1,
Decimal: 1,
Float: 1,
Int: 10,
Timestamp: 1},
'RELATIONAL_FUNCS': {
# The weights below are "best effort" suggestions. Because QueryGenerator
# prefers to set column types first, and some functions are "supported" only
# by some types, it means functions can be pruned off from this dictionary,
# and that will shift the probabilities. A quick example if that if a Char
# column is chosen: LessThan may not have a pre-defined signature for Char
# comparison, so LessThan shouldn't be chosen with Char columns. The
# tendency to prune will shift as the "funcs" module is adjusted to
# add/remove signatures.
And: 2,
Coalesce: 2,
Equals: 40,
GreaterThan: 2,
GreaterThanOrEquals: 2,
In: 2,
If: 2,
IsDistinctFrom: 2,
IsNotDistinctFrom: 1,
IsNotDistinctFromOp: 1,
LessThan: 2,
LessThanOrEquals: 2,
NotEquals: 2,
NotIn: 2,
Or: 2},
'CONJUNCT_DISJUNCTS': {
# And and Or appear both under RELATIONAL_FUNCS and CONJUNCT_DISJUNCTS for the
# following reasons:
# 1. And and Or are considered "relational" by virtue of taking two arguments
# and returning a Boolean. The crude signature selection means they could be
# selected, so we describe weights there.
# 2. They are set here explicitly as well so that
# QueryGenerator._create_bool_func_tree() can create a "more realistic"
# expression that has a Boolean operator at the top of the tree by explicitly
# asking for an And or Or.
# IMPALA-3896 tracks a better way to do this.
And: 5,
Or: 1},
'ANALYTIC_WINDOW': {
('ROWS', UNBOUNDED_PRECEDING, None): 1,
('ROWS', UNBOUNDED_PRECEDING, PRECEDING): 2,
('ROWS', UNBOUNDED_PRECEDING, CURRENT_ROW): 1,
('ROWS', UNBOUNDED_PRECEDING, FOLLOWING): 2,
('ROWS', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', PRECEDING, None): 1,
('ROWS', PRECEDING, PRECEDING): 2,
('ROWS', PRECEDING, CURRENT_ROW): 1,
('ROWS', PRECEDING, FOLLOWING): 2,
('ROWS', PRECEDING, UNBOUNDED_FOLLOWING): 2,
('ROWS', CURRENT_ROW, None): 1,
('ROWS', CURRENT_ROW, CURRENT_ROW): 1,
('ROWS', CURRENT_ROW, FOLLOWING): 2,
('ROWS', CURRENT_ROW, UNBOUNDED_FOLLOWING): 2,
('ROWS', FOLLOWING, FOLLOWING): 2,
('ROWS', FOLLOWING, UNBOUNDED_FOLLOWING): 2,
# Ranges not yet supported
('RANGE', UNBOUNDED_PRECEDING, None): 0,
('RANGE', UNBOUNDED_PRECEDING, PRECEDING): 0,
('RANGE', UNBOUNDED_PRECEDING, CURRENT_ROW): 0,
('RANGE', UNBOUNDED_PRECEDING, FOLLOWING): 0,
('RANGE', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', PRECEDING, None): 0,
('RANGE', PRECEDING, PRECEDING): 0,
('RANGE', PRECEDING, CURRENT_ROW): 0,
('RANGE', PRECEDING, FOLLOWING): 0,
('RANGE', PRECEDING, UNBOUNDED_FOLLOWING): 0,
('RANGE', CURRENT_ROW, None): 0,
('RANGE', CURRENT_ROW, CURRENT_ROW): 0,
('RANGE', CURRENT_ROW, FOLLOWING): 0,
('RANGE', CURRENT_ROW, UNBOUNDED_FOLLOWING): 0,
('RANGE', FOLLOWING, FOLLOWING): 0,
('RANGE', FOLLOWING, UNBOUNDED_FOLLOWING): 0},
'JOIN': {
'INNER': 90,
'LEFT': 30,
'RIGHT': 10,
'FULL_OUTER': 3,
'CROSS': 1},
'SUBQUERY_PREDICATE': {
('Exists', 'AGG', 'CORRELATED'): 0, # Not supported
('Exists', 'AGG', 'UNCORRELATED'): 1,
('Exists', 'NON_AGG', 'CORRELATED'): 1,
('Exists', 'NON_AGG', 'UNCORRELATED'): 1,
('NotExists', 'AGG', 'CORRELATED'): 0, # Not supported
('NotExists', 'AGG', 'UNCORRELATED'): 0, # Not supported
('NotExists', 'NON_AGG', 'CORRELATED'): 1,
('NotExists', 'NON_AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'AGG', 'CORRELATED'): 0, # Not supported
('In', 'AGG', 'UNCORRELATED'): 0, # Not supported
('In', 'NON_AGG', 'CORRELATED'): 1,
('In', 'NON_AGG', 'UNCORRELATED'): 1,
('NotIn', 'AGG', 'CORRELATED'): 0, # Not supported
('NotIn', 'AGG', 'UNCORRELATED'): 1,
('NotIn', 'NON_AGG', 'CORRELATED'): 1,
('NotIn', 'NON_AGG', 'UNCORRELATED'): 1,
('Scalar', 'AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'AGG', 'UNCORRELATED'): 1,
('Scalar', 'NON_AGG', 'CORRELATED'): 0, # Not supported
('Scalar', 'NON_AGG', 'UNCORRELATED'): 1},
'QUERY_EXECUTION': { # Used by the discrepancy searcher
StatementExecutionMode.CREATE_TABLE_AS: 1,
StatementExecutionMode.CREATE_VIEW_AS: 1,
StatementExecutionMode.SELECT_STATEMENT: 10},
'STATEMENT': {
# TODO: Eventually make this a mix of DML and SELECT (IMPALA-4601)
Query: 1},
'INSERT_SOURCE_CLAUSE': {
Query: 3,
ValuesClause: 1},
'INSERT_COLUMN_LIST': {
'partial': 3,
'none': 1},
'VALUES_ITEM_EXPR': {
'constant': 1,
'function': 2},
'INSERT_UPSERT': {
InsertClause.CONFLICT_ACTION_IGNORE: 1,
InsertClause.CONFLICT_ACTION_UPDATE: 3}}
# On/off switches
self._flags = {
'ANALYTIC_DESIGNS': {
'TOP_LEVEL_QUERY_WITHOUT_LIMIT': True,
'DETERMINISTIC_ORDER_BY': True,
'NO_ORDER_BY': True,
'ONLY_SELECT_ITEM': True,
'UNBOUNDED_WINDOW': True,
'RANK_FUNC': True}}
# Independent probabilities where 1 means 100%. These values may be ignored depending
# on the context. For example, GROUP_BY is almost always ignored and instead
# determined by the SELECT item weights above, since mixing aggregate and
# non-aggregate items requires the use of a GROUP BY. The GROUP_BY option below is
# only applied if all of the SELECT items are non-aggregate.
self._probabilities = {
'OPTIONAL_QUERY_CLAUSES': {
'WITH': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'FROM': 1,
'WHERE': 0.5,
'GROUP_BY': 0.1, # special case, doesn't really do much, see comment above
'HAVING': 0.25,
'UNION': 0.1,
'ORDER_BY': 0.1},
'OPTIONAL_ANALYTIC_CLAUSES': {
'PARTITION_BY': 0.5,
'ORDER_BY': 0.5,
'WINDOW': 0.5}, # will only be used if ORDER BY is chosen
'MISC': {
'INLINE_VIEW': 0.1, # MAX_NESTED_QUERY_COUNT bounds take precedence
'SELECT_DISTINCT': 0.1,
'SCALAR_SUBQUERY': 0.1,
'ONLY_USE_EQUALITY_JOIN_PREDICATES': 0.8,
'ONLY_USE_AGGREGATES_IN_HAVING_CLAUSE': 0.7,
'UNION_ALL': 0.5}} # Determines use of "ALL" but not "UNION"
self.__type_weights = {}
self.constant_generator = RandomValGenerator()
def _get_config_value(self, start_config, *keys):
value = start_config
for key in keys:
value = value[key]
return value
def weights(self, *keys):
'''Convenience method for getting the values of named weights'''
return self._get_config_value(self._weights, *keys)
def bounds(self, *keys):
'''Convenience method for getting the values of named bounds'''
return self._get_config_value(self._bounds, *keys)
def probability(self, *keys):
'''Convenience method for getting the value of named probabilities'''
return self._get_config_value(self._probabilities, *keys)
def _choose_from_bounds(self, *bounds):
'''Returns a value that is within the given bounds. Each value has an equal chance
of being chosen.
'''
if isinstance(bounds[0], str):
lower, upper = self.bounds(*bounds)
else:
lower, upper = bounds
return randint(lower, upper)
def _choose_from_weights(self, *weight_args):
'''Returns a value that is selected from the keys of weights with the probability
determined by the values of weights.
'''
if isinstance(weight_args[0], str):
weights = self.weights(*weight_args)
else:
weights = weight_args[0]
total_weight = sum(weights.itervalues())
numeric_choice = randint(1, total_weight)
for choice_, weight in weights.iteritems():
if weight <= 0:
continue
if numeric_choice <= weight:
return choice_
numeric_choice -= weight
def _choose_from_filtered_weights(self, filter, *weights):
'''Convenience method, apply the given filter before choosing a value.'''
if isinstance(weights[0], str):
weights = self.weights(*weights)
else:
weights = weights[0]
return self._choose_from_weights(dict((choice_, weight) for choice_, weight
in weights.iteritems() if filter(choice_)))
def _decide_from_probability(self, *keys):
return random() < self.probability(*keys)
def get_max_nested_query_count(self):
'''Return the maximum number of queries the top level query may contain.'''
return self._choose_from_bounds('MAX_NESTED_QUERY_COUNT')
def use_with_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WITH')
def only_use_equality_join_predicates(self):
return self._decide_from_probability('MISC', 'ONLY_USE_EQUALITY_JOIN_PREDICATES')
def only_use_aggregates_in_having_clause(self):
return self._decide_from_probability('MISC', 'ONLY_USE_AGGREGATES_IN_HAVING_CLAUSE')
def get_with_clause_table_ref_count(self):
'''Return the number of table ref entries a WITH clause should contain.'''
return self._choose_from_bounds('WITH_TABLE_COUNT')
def get_select_item_count(self):
return self._choose_from_bounds('SELECT_ITEM_COUNT')
def choose_nested_expr_count(self):
return self._choose_from_bounds('MAX_NESTED_EXPR_COUNT')
def allowed_analytic_designs(self):
return [design for design, is_enabled in self._flags['ANALYTIC_DESIGNS'].iteritems()
if is_enabled]
def use_partition_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'PARTITION_BY')
def use_order_by_clause_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'ORDER_BY')
def use_window_in_analytic(self):
return self._decide_from_probability('OPTIONAL_ANALYTIC_CLAUSES', 'WINDOW')
def choose_window_type(self):
return self._choose_from_weights('ANALYTIC_WINDOW')
def get_window_offset(self):
return self._choose_from_bounds('ANALYTIC_WINDOW_OFFSET')
def get_offset_for_analytic_lead_or_lag(self):
return self._choose_from_bounds('ANALYTIC_LEAD_LAG_OFFSET')
def get_table_count(self):
return self._choose_from_bounds('TABLE_COUNT')
def use_inline_view(self):
return self._decide_from_probability('MISC', 'INLINE_VIEW')
def choose_table(self, table_exprs):
return choice(table_exprs)
def choose_join_type(self, join_types):
return self._choose_from_filtered_weights(
lambda join_type: join_type in join_types, 'JOIN')
def choose_join_condition_count(self):
return max(1, self._choose_from_bounds('MAX_NESTED_EXPR_COUNT'))
def use_where_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'WHERE')
def use_scalar_subquery(self):
return self._decide_from_probability('MISC', 'SCALAR_SUBQUERY')
def choose_subquery_predicate_category(self, func_name, allow_correlated):
weights = self.weights('SUBQUERY_PREDICATE')
func_names = set(name for name, _, _ in weights.iterkeys())
if func_name not in func_names:
func_name = 'Scalar'
allow_agg = self.weights('SELECT_ITEM_CATEGORY').get('AGG', 0)
if allow_correlated and self.bounds('TABLE_COUNT')[1] == 0:
allow_correlated = False
weights = dict(((name, use_agg, use_correlated), weight)
for (name, use_agg, use_correlated), weight in weights.iteritems()
if name == func_name and
(allow_agg or use_agg == 'NON_AGG') and
weight)
if weights:
return self._choose_from_weights(weights)
def use_distinct(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_distinct_in_func(self):
return self._decide_from_probability('MISC', 'SELECT_DISTINCT')
def use_group_by_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'GROUP_BY')
def use_having_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'HAVING')
def use_union_clause(self):
return self._decide_from_probability('OPTIONAL_QUERY_CLAUSES', 'UNION')
def use_union_all(self):
return self._decide_from_probability('MISC', 'UNION_ALL')
def get_query_execution(self):
return self._choose_from_weights('QUERY_EXECUTION')
def use_having_without_groupby(self):
return True
def use_nested_with(self):
return True
def use_lateral_join(self):
return False
def use_boolean_expr_for_lateral_join(self):
return False
def get_num_boolean_exprs_for_lateral_join(self):
return False
# Workaround for Hive null ordering differences, and lack of 'NULL FIRST', 'NULL LAST'
# specifications. The ref db will order nulls as specified for ASC sorting to make it
# identifical to Hive. Valid return values are: 'BEFORE', 'AFTER', or 'DEFAULT',
# the latter means no specification needed.
def nulls_order_asc(self):
return 'DEFAULT'
def choose_val_expr(self, val_exprs, types=TYPES):
if not val_exprs:
raise Exception('At least on value is required')
if not types:
raise Exception('At least one type is required')
available_types = set(types) & set(val_exprs.by_type)
if not available_types:
raise Exception('None of the provided values return any of the required types')
val_type = self.choose_type(available_types)
return choice(val_exprs.by_type[val_type])
def choose_constant(self, return_type=None, allow_null=True):
if not return_type:
return_type = self.choose_type()
while True:
val = self.constant_generator.generate_val(return_type)
if val is None and not allow_null:
continue
return return_type(val)
def choose_type(self, types=TYPES):
type_weights = self.weights('TYPES')
weights = dict((type_, type_weights[type_]) for type_ in types)
if not weights:
raise Exception('None of the requested types are enabled')
return self._choose_from_weights(weights)
def choose_conjunct_disjunct_fill_ratio(self):
'''Return the ratio of ANDs and ORs to use in a boolean function tree. For example,
when creating a WHERE condition that consists of 10 nested functions, a ratio of
0.1 means 1 out of the 10 functions in the WHERE clause will be an AND or OR.
'''
return random() * random()
def choose_relational_func_fill_ratio(self):
'''Return the ratio of relational functions to use in a boolean function tree. This
ratio is applied after 'choose_conjunct_disjunct_fill_ratio()'.
'''
return random() * random()
def choose_conjunct_disjunct(self):
return self._choose_from_weights('CONJUNCT_DISJUNCTS')
def choose_relational_func_signature(self, signatures):
'''Return a relational signature chosen from "signatures". A signature is considered
to be relational if it returns a boolean and accepts more than one argument.
'''
if not signatures:
raise Exception('At least one signature is required')
filtered_signatures = filter(
lambda s: s.return_type == Boolean \
and len(s.args) > 1 \
and not any(a.is_subquery for a in s.args),
signatures)
if not filtered_signatures:
raise Exception(
'None of the provided signatures corresponded to a relational function')
func_weights = self.weights('RELATIONAL_FUNCS')
missing_funcs = set(s.func for s in filtered_signatures) - set(func_weights)
if missing_funcs:
raise Exception("Weights are missing for functions: {0}".format(missing_funcs))
return self.choose_func_signature(filtered_signatures,
self.weights('RELATIONAL_FUNCS'))
def choose_func_signature(self, signatures, _func_weights=None):
'''Return a signature chosen from "signatures".'''
if not signatures:
raise Exception('At least one signature is required')
type_weights = self.weights('TYPES')
func_weights = _func_weights
if func_weights:
distinct_funcs_in_signatures = set([s.func for s in signatures])
pruned_func_weights = {f: func_weights[f] for f in distinct_funcs_in_signatures}
func_weights = pruned_func_weights
else:
# First a function will be chosen then a signature. This is done so that the number
# of signatures a function has doesn't influence its likelihood of being chosen.
# Functions will be weighted based on the weight of the types in their arguments.
# The weights will be normalized by the number of arguments in the signature. The
# weight of a function will be the maximum weight out of all of it's signatures.
# If any signature has a type with a weight of zero, the signature will not be used.
#
# Example: type_weights = {Int: 10, Float: 1},
# funcs = [foo(Int), foo(Float), bar(Int, Float)]
#
# max signature length = 2 # from bar(Int, Float)
# weight of foo(Int) = (10 * 2)
# weight of foo(Float) = (1 * 2)
# weight of bar(Int, Float) = ((10 + 1) * 1)
# func_weights = {foo: 20, bar: 11}
#
# Note that this only selects a function, the function signature will be selected
# later. This is done to prevent function with a greater number of signatures from
# being selected more frequently.
func_weights = dict()
# The length of the signature in func_weights
signature_length_by_func = dict()
for signature in signatures:
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if not signature_weight:
continue
if (signature.func not in func_weights or
signature_weight > func_weights[signature.func]):
func_weights[signature.func] = signature_weight
signature_length_by_func[signature.func] = signature_length
if not func_weights:
raise Exception('All functions disallowed based on signature types')
distinct_signature_lengths = set(signature_length_by_func.values())
for func, weight in func_weights.iteritems():
signature_length = signature_length_by_func[func]
func_weights[func] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
func_weights[func])
func = self._choose_from_weights(func_weights)
# Same idea as above but for the signatures of the selected function.
signature_weights = dict()
signature_lengths = dict()
for idx, signature in enumerate(func.signatures()):
if signature not in signatures:
continue
signature_weight = type_weights[signature.return_type]
signature_length = 1
for arg in signature.args:
if arg.is_subquery:
for subtype in arg.type:
signature_weight *= type_weights[subtype]
signature_length += 1
else:
signature_weight *= type_weights[arg.type]
signature_length += 1
if signature_weight:
signature_weights[idx] = signature_weight
signature_lengths[idx] = signature_length
distinct_signature_lengths = set(signature_lengths.values())
for idx, weight in signature_weights.iteritems():
signature_length = signature_lengths[idx]
signature_weights[idx] = reduce(
lambda x, y: x * y,
distinct_signature_lengths - set([signature_length]),
signature_weights[idx])
idx = self._choose_from_weights(signature_weights)
return func.signatures()[idx]
def allow_func_signature(self, signature):
weights = self.weights('TYPES')
if not weights[signature.return_type]:
return False
for arg in signature.args:
if arg.is_subquery:
if not all(weights[subtype] for subtype in arg.type):
return False
elif not weights[arg.type]:
return False
return True
def get_allowed_join_signatures(self, signatures):
"""
Returns all the function signatures that are allowed inside a JOIN clause. This
method is mutually exclusive with only_use_equality_join_predicates. This results of
this method are ignored if only_use_equality_join_predicates return True.
"""
return signatures
def is_non_equality_join_predicate(self, func):
"""
Returns True if the given func is considered a non-equality join condition.
"""
return func in (GreaterThan, GreaterThanOrEquals, In,
IsNotDistinctFrom, IsNotDistinctFromOp, LessThan,
LessThanOrEquals, NotEquals, NotIn)
def get_analytic_funcs_that_cannot_contain_aggs(self):
"""
Returns a list of analytic functions that should not contain aggregate functions
"""
return None
def choose_statement(self):
return self._choose_from_weights('STATEMENT')
def choose_insert_source_clause(self):
"""
Returns whether we generate an INSERT/UPSERT SELECT or an INSERT/UPSERT VALUES
"""
return self._choose_from_weights('INSERT_SOURCE_CLAUSE')
def choose_insert_column_list(self, table):
"""
Decide whether or not an INSERT/UPSERT will be in the form of:
INSERT/UPSERT INTO table SELECT|VALUES ...
or
INSERT/UPSERT INTO table (col1, col2, ...) SELECT|VALUES ...
If the second form, the column list is shuffled. The column list will always contain
the primary key columns and between 0 and all additional columns.
"""
if 'partial' == self._choose_from_weights('INSERT_COLUMN_LIST'):
columns_to_insert = list(table.primary_keys)
min_additional_insert_cols = 0 if columns_to_insert else 1
remaining_columns = [col for col in table.cols if not col.is_primary_key]
shuffle(remaining_columns)
additional_column_count = randint(min_additional_insert_cols,
len(remaining_columns))
columns_to_insert.extend(remaining_columns[:additional_column_count])
shuffle(columns_to_insert)
return columns_to_insert
else:
return None
def choose_insert_values_row_count(self):
"""
Choose the number of rows to insert in an INSERT/UPSERT VALUES
"""
return self._choose_from_bounds('INSERT_VALUES_ROWS')
def choose_values_item_expr(self):
"""
For a VALUES clause, Choose whether a particular item in a particular row will be a
constant or a function.
"""
return self._choose_from_weights('VALUES_ITEM_EXPR')
def choose_insert_vs_upsert(self):
"""
Choose whether a particular insertion-type statement will be INSERT or UPSERT.
"""
return self._choose_from_weights('INSERT_UPSERT')
class ImpalaNestedTypesProfile(DefaultProfile):
def __init__(self):
super(ImpalaNestedTypesProfile, self).__init__()
self._probabilities['OPTIONAL_QUERY_CLAUSES']['WITH'] = 0.3
self._probabilities['MISC']['INLINE_VIEW'] = 0.3
def use_lateral_join(self):
return random() < 0.5
def use_boolean_expr_for_lateral_join(self):
return random() < 0.2
def get_num_boolean_exprs_for_lateral_join(self):
if random() < 0.8:
return 0
result = 1
while random() < 0.6:
result += 1
return result
def get_table_count(self):
num = 1
while random() < (0.85 ** num):
num += 1
return num
# This profile was added for ad-hoc testing.
class TestFunctionProfile(DefaultProfile):
def choose_func_signature(self, signatures):
if not signatures:
raise Exception('At least one signature is required')
preferred_signatures = filter(lambda s: "DistinctFrom" in s.func._NAME, signatures)
if preferred_signatures:
signatures = preferred_signatures
return super(TestFunctionProfile, self).choose_func_signature(signatures)
class HiveProfile(DefaultProfile):
def __init__(self):
super(HiveProfile, self).__init__()
self._probabilities['MISC']['ONLY_USE_EQUALITY_JOIN_PREDICATES'] = 0
def use_having_without_groupby(self):
return False
def use_nested_with(self):
return False
def nulls_order_asc(self):
return 'BEFORE'
def allow_func_signature(self, signature):
if signature.func._NAME.startswith('DateAdd'):
return False
if signature.func._NAME in ('Greatest', 'Least'):
type = signature.return_type
argtypes = [arg.type for arg in signature.args]
for argtype in argtypes:
if type is None:
type = argtype
continue
else:
if type != argtype:
return False
return DefaultProfile.allow_func_signature(self, signature)
def get_allowed_join_signatures(self, signatures):
"""
Restricts the function signatures inside a JOIN clause to either be an Equals
operator, an And operator, or any operator that only takes in one argument. The reason
is that Hive only supports equi-joins, does not allow OR operators inside a JOIN, and
does not allow any other operator that operates over multiple columns.
The reason ONLY_USE_EQUALITY_JOIN_PREDICATES is not sufficient to guarantee this is
that Hive needs to restrict the functions used based on the argument size of a
function.
"""
return [signature for signature in signatures if
signature.func in (Equals, And) or len(signature.args) == 1]
def get_analytic_funcs_that_cannot_contain_aggs(self):
"""
Hive does not support aggregate functions inside AVG, COUNT, FIRSTVALUE, LAG,
LASTVALUE, LEAD, MAX, MIN, or SUM functions
"""
return (AnalyticAvg, AnalyticCount, AnalyticFirstValue, AnalyticLag,
AnalyticLastValue, AnalyticLead, AnalyticMax, AnalyticMin, AnalyticSum)
class DMLOnlyProfile(DefaultProfile):
"""
Profile that only executes DML statements
TODO: This will be useful for testing DML; eventually this should be folded into the
default profile. (IMPALA-4601)
"""
def __init__(self):
super(DMLOnlyProfile, self).__init__()
self._weights.update({
'STATEMENT': {
InsertStatement: 1}})
PROFILES = [var for var in locals().values()
if isinstance(var, type) and var.__name__.endswith('Profile')]
|
apache-2.0
| 5,076,192,376,453,511,000
| 37.546144
| 90
| 0.638373
| false
| 3.812203
| false
| false
| false
|
karlch/vimiv
|
vimiv/thumbnail_manager.py
|
1
|
10586
|
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Provides classes to store and load thumbnails from a shared cache.
The ThumbnailStore transparently creates and loads thumbnails according to the
freedesktop.org thumbnail management standard.
The ThumbnailManager provides a asynchronous mechanism to load thumbnails from
the store.
If possible, you should avoid using the store directly but use the manager
instead.
"""
import collections
import hashlib
import os
import tempfile
from multiprocessing.pool import ThreadPool as Pool
from gi._error import GError
from gi.repository import GdkPixbuf, GLib, Gtk
from gi.repository.GdkPixbuf import Pixbuf
from vimiv.helpers import get_user_cache_dir
ThumbTuple = collections.namedtuple('ThumbTuple', ['original', 'thumbnail'])
class ThumbnailManager:
"""Provides an asynchronous mechanism to load thumbnails.
Attributes:
thumbnail_store: ThumbnailStore class with the loading mechanism.
large: The thumbnail managing standard specifies two thumbnail sizes.
256x256 (large) and 128x128 (normal)
default_icon: Default icon if thumbnails are not yet loaded.
error_icon: The path to the icon which is used, when thumbnail creation
fails.
"""
_cpu_count = os.cpu_count()
if _cpu_count is None:
_cpu_count = 1
elif _cpu_count > 1:
_cpu_count -= 1
_thread_pool = Pool(_cpu_count)
_cache = {}
def __init__(self, large=True):
"""Construct a new ThumbnailManager.
Args:
large: Size of thumbnails that are created. If true 256x256 else
128x128.
"""
super(ThumbnailManager, self).__init__()
self.thumbnail_store = ThumbnailStore(large=large)
# Default icon if thumbnail creation fails
icon_theme = Gtk.IconTheme.get_default()
self.error_icon = icon_theme.lookup_icon("dialog-error", 256,
0).get_filename()
self.default_icon = icon_theme.lookup_icon("image-x-generic", 256,
0).get_filename()
def _do_get_thumbnail_at_scale(self, source_file, size, callback, index,
ignore_cache=False):
if not ignore_cache and source_file in self._cache:
pixbuf = self._cache[source_file]
else:
thumbnail_path = self.thumbnail_store.get_thumbnail(source_file,
ignore_cache)
if thumbnail_path is None:
thumbnail_path = self.error_icon
pixbuf = Pixbuf.new_from_file(thumbnail_path)
self._cache[source_file] = pixbuf
if pixbuf.get_height() != size and pixbuf.get_width != size:
pixbuf = self.scale_pixbuf(pixbuf, size)
return callback, pixbuf, index
@staticmethod
def scale_pixbuf(pixbuf, size):
"""Scale the pixbuf to the given size keeping the aspect ratio.
Either the width or the height of the returned pixbuf is `size` large,
depending on the aspect ratio.
Args:
pixbuf: The pixbuf to scale
size: The size of the new width or height
Return:
The scaled pixbuf.
"""
width = size
height = size
ratio = pixbuf.get_width() / pixbuf.get_height()
if ratio > 1:
height /= ratio
else:
width *= ratio
pixbuf = pixbuf.scale_simple(width, height,
GdkPixbuf.InterpType.BILINEAR)
return pixbuf
@staticmethod
def _do_callback(result):
GLib.idle_add(*result)
def get_thumbnail_at_scale_async(self, filename, size, callback, index,
ignore_cache=False):
"""Create the thumbnail for 'filename' and return it via 'callback'.
Creates the thumbnail for the given filename at the given size and
then calls the given callback function with the resulting pixbuf.
Args:
filename: The filename to get the thumbnail for
size: The size the returned pixbuf is scaled to
callback: A callable of form callback(pixbuf, *args)
args: Any additional arguments that can be passed to callback
ignore_cache: If true, the builtin in-memory cache is bypassed and
the thumbnail file is loaded from disk
"""
self._thread_pool.apply_async(self._do_get_thumbnail_at_scale,
(filename, size, callback, index,
ignore_cache),
callback=self._do_callback)
class ThumbnailStore(object):
"""Implements freedesktop.org's Thumbnail Managing Standard."""
KEY_URI = "Thumb::URI"
KEY_MTIME = "Thumb::MTime"
KEY_SIZE = "Thumb::Size"
KEY_WIDTH = "Thumb::Image::Width"
KEY_HEIGHT = "Thumb::Image::Height"
def __init__(self, large=True):
"""Construct a new ThumbnailStore.
Args:
large: Size of thumbnails that are created. If true 256x256 else
128x128.
"""
super(ThumbnailStore, self).__init__()
import vimiv
self.base_dir = os.path.join(get_user_cache_dir(), "thumbnails")
self.fail_dir = os.path.join(
self.base_dir, "fail", "vimiv-" + vimiv.__version__)
self.thumbnail_dir = ""
self.thumb_size = 0
self.use_large_thumbnails(large)
self._ensure_dirs_exist()
def use_large_thumbnails(self, enabled=True):
"""Specify whether this thumbnail store uses large thumbnails.
Large thumbnails have 256x256 pixels and non-large thumbnails 128x128.
Args:
enabled: If true large thumbnails will be used.
"""
if enabled:
self.thumbnail_dir = os.path.join(self.base_dir, "large")
self.thumb_size = 256
else:
self.thumbnail_dir = os.path.join(self.base_dir, "normal")
self.thumb_size = 128
def get_thumbnail(self, filename, ignore_current=False):
"""Get the path of the thumbnail of the given filename.
If the requested thumbnail does not yet exist, it will first be created
before returning its path.
Args:
filename: The filename to get the thumbnail for.
ignore_current: If True, ignore saved thumbnails and force a
recreation. Needed as transforming images from within thumbnail
mode may happen faster than in 1s.
Return:
The path of the thumbnail file or None if thumbnail creation failed.
"""
# Don't create thumbnails for thumbnail cache
if filename.startswith(self.base_dir):
return filename
thumbnail_filename = self._get_thumbnail_filename(filename)
thumbnail_path = self._get_thumbnail_path(thumbnail_filename)
if os.access(thumbnail_path, os.R_OK) \
and self._is_current(filename, thumbnail_path) \
and not ignore_current:
return thumbnail_path
fail_path = self._get_fail_path(thumbnail_filename)
if os.path.exists(fail_path):
# We already tried to create a thumbnail for the given file but
# failed; don't try again.
return None
if self._create_thumbnail(filename, thumbnail_filename):
return thumbnail_path
return None
def _ensure_dirs_exist(self):
os.makedirs(self.thumbnail_dir, 0o700, exist_ok=True)
os.makedirs(self.fail_dir, 0o700, exist_ok=True)
def _is_current(self, source_file, thumbnail_path):
source_mtime = str(self._get_source_mtime(source_file))
thumbnail_mtime = self._get_thumbnail_mtime(thumbnail_path)
return source_mtime == thumbnail_mtime
def _get_thumbnail_filename(self, filename):
uri = self._get_source_uri(filename)
return hashlib.md5(bytes(uri, "UTF-8")).hexdigest() + ".png"
@staticmethod
def _get_source_uri(filename):
return "file://" + os.path.abspath(os.path.expanduser(filename))
def _get_thumbnail_path(self, thumbnail_filename):
return os.path.join(self.thumbnail_dir, thumbnail_filename)
def _get_fail_path(self, thumbnail_filename):
return os.path.join(self.fail_dir, thumbnail_filename)
@staticmethod
def _get_source_mtime(src):
return int(os.path.getmtime(src))
def _get_thumbnail_mtime(self, thumbnail_path):
pixbuf = Pixbuf.new_from_file(thumbnail_path)
mtime = pixbuf.get_options()["tEXt::" + self.KEY_MTIME]
return mtime
def _create_thumbnail(self, source_file, thumbnail_filename):
# Cannot access source; create neither thumbnail nor fail file
if not os.access(source_file, os.R_OK):
return False
try:
image = Pixbuf.new_from_file_at_scale(source_file, self.thumb_size,
self.thumb_size, True)
dest_path = self._get_thumbnail_path(thumbnail_filename)
success = True
except GError:
image = Pixbuf.new(GdkPixbuf.Colorspace.RGB, False, 8, 1, 1)
dest_path = self._get_fail_path(thumbnail_filename)
success = False
width = 0
height = 0
try:
_, width, height = GdkPixbuf.Pixbuf.get_file_info(source_file)
except IOError:
pass
options = {
"tEXt::" + self.KEY_URI: str(self._get_source_uri(source_file)),
"tEXt::" + self.KEY_MTIME: str(self._get_source_mtime(source_file)),
"tEXt::" + self.KEY_SIZE: str(os.path.getsize(source_file))
}
if width > 0 and height > 0:
options["tEXt::" + self.KEY_WIDTH] = str(width)
options["tEXt::" + self.KEY_HEIGHT] = str(height)
# First create temporary file and then move it. This avoids problems
# with concurrent access of the thumbnail cache, since "move" is an
# atomic operation
handle, tmp_filename = tempfile.mkstemp(dir=self.base_dir)
os.close(handle)
os.chmod(tmp_filename, 0o600)
image.savev(tmp_filename, "png", list(options.keys()),
list(options.values()))
os.replace(tmp_filename, dest_path)
return success
|
mit
| 974,612,254,628,089,200
| 35.885017
| 80
| 0.601833
| false
| 4.171001
| false
| false
| false
|
OpenKMIP/PyKMIP
|
kmip/pie/objects.py
|
1
|
68704
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import abstractmethod
import sqlalchemy
from sqlalchemy import Column, event, ForeignKey, Integer, String, VARBINARY
from sqlalchemy import Boolean
from sqlalchemy.ext.associationproxy import association_proxy
import binascii
import six
from kmip.core import enums
from kmip.pie import sqltypes as sql
app_specific_info_map = sqlalchemy.Table(
"app_specific_info_map",
sql.Base.metadata,
sqlalchemy.Column(
"managed_object_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"managed_objects.uid",
ondelete="CASCADE"
)
),
sqlalchemy.Column(
"app_specific_info_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"app_specific_info.id",
ondelete="CASCADE"
)
)
)
object_group_map = sqlalchemy.Table(
"object_group_map",
sql.Base.metadata,
sqlalchemy.Column(
"managed_object_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"managed_objects.uid",
ondelete="CASCADE"
)
),
sqlalchemy.Column(
"object_group_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
"object_groups.id",
ondelete="CASCADE"
)
)
)
class ManagedObject(sql.Base):
"""
The abstract base class of the simplified KMIP object hierarchy.
A ManagedObject is a core KMIP object that is the subject of key
management operations. It contains various attributes that are common to
all types of ManagedObjects, including keys, certificates, and various
types of secret or sensitive data.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
value: The value of the ManagedObject. Type varies, usually bytes.
unique_identifier: The string ID of the ManagedObject.
names: A list of names associated with the ManagedObject.
object_type: An enumeration associated with the type of ManagedObject.
"""
__tablename__ = 'managed_objects'
unique_identifier = Column('uid', Integer, primary_key=True)
_object_type = Column('object_type', sql.EnumType(enums.ObjectType))
_class_type = Column('class_type', String(50))
value = Column('value', VARBINARY(1024))
name_index = Column(Integer, default=0)
_names = sqlalchemy.orm.relationship(
"ManagedObjectName",
back_populates="mo",
cascade="all, delete-orphan",
order_by="ManagedObjectName.id"
)
names = association_proxy('_names', 'name')
operation_policy_name = Column(
'operation_policy_name',
String(50),
default='default'
)
sensitive = Column("sensitive", Boolean, default=False)
initial_date = Column(Integer, default=0)
_owner = Column('owner', String(50), default=None)
app_specific_info = sqlalchemy.orm.relationship(
"ApplicationSpecificInformation",
secondary=app_specific_info_map,
back_populates="managed_objects",
order_by="ApplicationSpecificInformation.id",
passive_deletes=True
)
object_groups = sqlalchemy.orm.relationship(
"ObjectGroup",
secondary=object_group_map,
back_populates="managed_objects",
order_by="ObjectGroup.id",
passive_deletes=True
)
__mapper_args__ = {
'polymorphic_identity': 'ManagedObject',
'polymorphic_on': _class_type
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self):
"""
Create a ManagedObject.
"""
self.value = None
self.unique_identifier = None
self.name_index = 0
self.names = list()
self.operation_policy_name = None
self.initial_date = 0
self.sensitive = False
self._object_type = None
self._owner = None
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._application_specific_informations = list()
self._contact_information = None
self._object_groups = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._archive_date = None
self._last_change_date = None
@property
def object_type(self):
"""
Accessor and property definition for the object type attribute.
Returns:
ObjectType: An ObjectType enumeration that corresponds to the
class of the object.
"""
return self._object_type
@object_type.setter
def object_type(self, value):
"""
Set blocker for the object type attribute.
Raises:
AttributeError: Always raised to block setting of attribute.
"""
raise AttributeError("object type cannot be set")
@abstractmethod
def validate(self):
"""
Verify that the contents of the ManagedObject are valid.
"""
pass
@abstractmethod
def __repr__(self):
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def __eq__(self, other):
pass
@abstractmethod
def __ne__(self, other):
pass
class CryptographicObject(ManagedObject):
"""
The abstract base class of all ManagedObjects related to cryptography.
A CryptographicObject is a core KMIP object that is the subject of key
management operations. It contains various attributes that are common to
all types of CryptographicObjects, including keys and certificates.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_usage_masks: A list of usage mask enumerations
describing how the CryptographicObject will be used.
"""
__tablename__ = 'crypto_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('managed_objects.uid'),
primary_key=True)
cryptographic_usage_masks = Column('cryptographic_usage_mask',
sql.UsageMaskType)
state = Column('state', sql.EnumType(enums.State))
__mapper_args__ = {
'polymorphic_identity': 'CryptographicObject'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self):
"""
Create a CryptographicObject.
"""
super(CryptographicObject, self).__init__()
self.cryptographic_usage_masks = list()
self.state = enums.State.PRE_ACTIVE
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._digests = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._activation_date = None
self._compromise_date = None
self._compromise_occurrence_date = None
self._deactivation_date = None
self._destroy_date = None
self._fresh = None
self._lease_time = None
self._links = list()
self._revocation_reason = None
class Key(CryptographicObject):
"""
The abstract base class of all ManagedObjects that are cryptographic keys.
A Key is a core KMIP object that is the subject of key management
operations. It contains various attributes that are common to all types of
Keys, including symmetric and asymmetric keys.
For more information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_algorithm: A CryptographicAlgorithm enumeration defining
the algorithm the key should be used with.
cryptographic_length: An int defining the length of the key in bits.
key_format_type: A KeyFormatType enumeration defining the format of
the key value.
key_wrapping_data: A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'keys'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
cryptographic_algorithm = Column(
'cryptographic_algorithm', sql.EnumType(enums.CryptographicAlgorithm))
cryptographic_length = Column('cryptographic_length', Integer)
key_format_type = Column(
'key_format_type', sql.EnumType(enums.KeyFormatType))
# Key wrapping data fields
_kdw_wrapping_method = Column(
'_kdw_wrapping_method',
sql.EnumType(enums.WrappingMethod),
default=None
)
_kdw_eki_unique_identifier = Column(
'_kdw_eki_unique_identifier',
String,
default=None
)
_kdw_eki_cp_block_cipher_mode = Column(
'_kdw_eki_cp_block_cipher_mode',
sql.EnumType(enums.BlockCipherMode),
default=None
)
_kdw_eki_cp_padding_method = Column(
'_kdw_eki_cp_padding_method',
sql.EnumType(enums.PaddingMethod),
default=None
)
_kdw_eki_cp_hashing_algorithm = Column(
'_kdw_eki_cp_hashing_algorithm',
sql.EnumType(enums.HashingAlgorithm),
default=None
)
_kdw_eki_cp_key_role_type = Column(
'_kdw_eki_cp_key_role_type',
sql.EnumType(enums.KeyRoleType),
default=None
)
_kdw_eki_cp_digital_signature_algorithm = Column(
'_kdw_eki_cp_digital_signature_algorithm',
sql.EnumType(enums.DigitalSignatureAlgorithm),
default=None
)
_kdw_eki_cp_cryptographic_algorithm = Column(
'_kdw_eki_cp_cryptographic_algorithm',
sql.EnumType(enums.CryptographicAlgorithm),
default=None
)
_kdw_eki_cp_random_iv = Column(
'_kdw_eki_cp_random_iv',
Boolean,
default=None
)
_kdw_eki_cp_iv_length = Column(
'_kdw_eki_cp_iv_length',
Integer,
default=None
)
_kdw_eki_cp_tag_length = Column(
'_kdw_eki_cp_tag_length',
Integer,
default=None
)
_kdw_eki_cp_fixed_field_length = Column(
'_kdw_eki_cp_fixed_field_length',
Integer,
default=None
)
_kdw_eki_cp_invocation_field_length = Column(
'_kdw_eki_cp_invocation_field_length',
Integer
)
_kdw_eki_cp_counter_length = Column(
'_kdw_eki_cp_counter_length',
Integer,
default=None
)
_kdw_eki_cp_initial_counter_value = Column(
'_kdw_eki_cp_initial_counter_value',
Integer,
default=None
)
_kdw_mski_unique_identifier = Column(
'_kdw_mski_unique_identifier',
String,
default=None
)
_kdw_mski_cp_block_cipher_mode = Column(
'_kdw_mski_cp_block_cipher_mode',
sql.EnumType(enums.BlockCipherMode),
default=None
)
_kdw_mski_cp_padding_method = Column(
'_kdw_mski_cp_padding_method',
sql.EnumType(enums.PaddingMethod),
default=None
)
_kdw_mski_cp_hashing_algorithm = Column(
'_kdw_mski_cp_hashing_algorithm',
sql.EnumType(enums.HashingAlgorithm),
default=None
)
_kdw_mski_cp_key_role_type = Column(
'_kdw_mski_cp_key_role_type',
sql.EnumType(enums.KeyRoleType),
default=None
)
_kdw_mski_cp_digital_signature_algorithm = Column(
'_kdw_mski_cp_digital_signature_algorithm',
sql.EnumType(enums.DigitalSignatureAlgorithm),
default=None
)
_kdw_mski_cp_cryptographic_algorithm = Column(
'_kdw_mski_cp_cryptographic_algorithm',
sql.EnumType(enums.CryptographicAlgorithm),
default=None
)
_kdw_mski_cp_random_iv = Column(
'_kdw_mski_cp_random_iv',
Boolean,
default=None
)
_kdw_mski_cp_iv_length = Column(
'_kdw_mski_cp_iv_length',
Integer,
default=None
)
_kdw_mski_cp_tag_length = Column(
'_kdw_mski_cp_tag_length',
Integer,
default=None
)
_kdw_mski_cp_fixed_field_length = Column(
'_kdw_mski_cp_fixed_field_length',
Integer,
default=None
)
_kdw_mski_cp_invocation_field_length = Column(
'_kdw_mski_cp_invocation_field_length',
Integer,
default=None
)
_kdw_mski_cp_counter_length = Column(
'_kdw_mski_cp_counter_length',
Integer,
default=None
)
_kdw_mski_cp_initial_counter_value = Column(
'_kdw_mski_cp_initial_counter_value',
Integer,
default=None
)
_kdw_mac_signature = Column(
'_kdw_mac_signature',
VARBINARY(1024),
default=None
)
_kdw_iv_counter_nonce = Column(
'_kdw_iv_counter_nonce',
VARBINARY(1024),
default=None
)
_kdw_encoding_option = Column(
'_kdw_encoding_option',
sql.EnumType(enums.EncodingOption),
default=None
)
__mapper_args__ = {
'polymorphic_identity': 'Key'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self, key_wrapping_data=None):
"""
Create a Key object.
Args:
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(Key, self).__init__()
self.cryptographic_algorithm = None
self.cryptographic_length = None
self.key_format_type = None
self.key_wrapping_data = key_wrapping_data
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._cryptographic_parameters = list()
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._usage_limits = None
@property
def key_wrapping_data(self):
"""
Retrieve all of the relevant key wrapping data fields and return them
as a dictionary.
"""
key_wrapping_data = {}
encryption_key_info = {
'unique_identifier': self._kdw_eki_unique_identifier,
'cryptographic_parameters': {
'block_cipher_mode': self._kdw_eki_cp_block_cipher_mode,
'padding_method': self._kdw_eki_cp_padding_method,
'hashing_algorithm': self._kdw_eki_cp_hashing_algorithm,
'key_role_type': self._kdw_eki_cp_key_role_type,
'digital_signature_algorithm':
self._kdw_eki_cp_digital_signature_algorithm,
'cryptographic_algorithm':
self._kdw_eki_cp_cryptographic_algorithm,
'random_iv': self._kdw_eki_cp_random_iv,
'iv_length': self._kdw_eki_cp_iv_length,
'tag_length': self._kdw_eki_cp_tag_length,
'fixed_field_length': self._kdw_eki_cp_fixed_field_length,
'invocation_field_length':
self._kdw_eki_cp_invocation_field_length,
'counter_length': self._kdw_eki_cp_counter_length,
'initial_counter_value':
self._kdw_eki_cp_initial_counter_value
}
}
if not any(encryption_key_info['cryptographic_parameters'].values()):
encryption_key_info['cryptographic_parameters'] = {}
if not any(encryption_key_info.values()):
encryption_key_info = {}
mac_sign_key_info = {
'unique_identifier': self._kdw_mski_unique_identifier,
'cryptographic_parameters': {
'block_cipher_mode': self._kdw_mski_cp_block_cipher_mode,
'padding_method': self._kdw_mski_cp_padding_method,
'hashing_algorithm': self._kdw_mski_cp_hashing_algorithm,
'key_role_type': self._kdw_mski_cp_key_role_type,
'digital_signature_algorithm':
self._kdw_mski_cp_digital_signature_algorithm,
'cryptographic_algorithm':
self._kdw_mski_cp_cryptographic_algorithm,
'random_iv': self._kdw_mski_cp_random_iv,
'iv_length': self._kdw_mski_cp_iv_length,
'tag_length': self._kdw_mski_cp_tag_length,
'fixed_field_length': self._kdw_mski_cp_fixed_field_length,
'invocation_field_length':
self._kdw_mski_cp_invocation_field_length,
'counter_length': self._kdw_mski_cp_counter_length,
'initial_counter_value':
self._kdw_mski_cp_initial_counter_value
}
}
if not any(mac_sign_key_info['cryptographic_parameters'].values()):
mac_sign_key_info['cryptographic_parameters'] = {}
if not any(mac_sign_key_info.values()):
mac_sign_key_info = {}
key_wrapping_data['wrapping_method'] = self._kdw_wrapping_method
key_wrapping_data['encryption_key_information'] = encryption_key_info
key_wrapping_data['mac_signature_key_information'] = mac_sign_key_info
key_wrapping_data['mac_signature'] = self._kdw_mac_signature
key_wrapping_data['iv_counter_nonce'] = self._kdw_iv_counter_nonce
key_wrapping_data['encoding_option'] = self._kdw_encoding_option
if not any(key_wrapping_data.values()):
key_wrapping_data = {}
return key_wrapping_data
@key_wrapping_data.setter
def key_wrapping_data(self, value):
"""
Set the key wrapping data attributes using a dictionary.
"""
if value is None:
value = {}
elif not isinstance(value, dict):
raise TypeError("Key wrapping data must be a dictionary.")
self._kdw_wrapping_method = value.get('wrapping_method')
eki = value.get('encryption_key_information')
if eki is None:
eki = {}
self._kdw_eki_unique_identifier = eki.get('unique_identifier')
eki_cp = eki.get('cryptographic_parameters')
if eki_cp is None:
eki_cp = {}
self._kdw_eki_cp_block_cipher_mode = eki_cp.get('block_cipher_mode')
self._kdw_eki_cp_padding_method = eki_cp.get('padding_method')
self._kdw_eki_cp_hashing_algorithm = eki_cp.get('hashing_algorithm')
self._kdw_eki_cp_key_role_type = eki_cp.get('key_role_type')
self._kdw_eki_cp_digital_signature_algorithm = \
eki_cp.get('digital_signature_algorithm')
self._kdw_eki_cp_cryptographic_algorithm = \
eki_cp.get('cryptographic_algorithm')
self._kdw_eki_cp_random_iv = eki_cp.get('random_iv')
self._kdw_eki_cp_iv_length = eki_cp.get('iv_length')
self._kdw_eki_cp_tag_length = eki_cp.get('tag_length')
self._kdw_eki_cp_fixed_field_length = eki_cp.get('fixed_field_length')
self._kdw_eki_cp_invocation_field_length = \
eki_cp.get('invocation_field_length')
self._kdw_eki_cp_counter_length = eki_cp.get('counter_length')
self._kdw_eki_cp_initial_counter_value = \
eki_cp.get('initial_counter_value')
mski = value.get('mac_signature_key_information')
if mski is None:
mski = {}
self._kdw_mski_unique_identifier = mski.get('unique_identifier')
mski_cp = mski.get('cryptographic_parameters')
if mski_cp is None:
mski_cp = {}
self._kdw_mski_cp_block_cipher_mode = mski_cp.get('block_cipher_mode')
self._kdw_mski_cp_padding_method = mski_cp.get('padding_method')
self._kdw_mski_cp_hashing_algorithm = mski_cp.get('hashing_algorithm')
self._kdw_mski_cp_key_role_type = mski_cp.get('key_role_type')
self._kdw_mski_cp_digital_signature_algorithm = \
mski_cp.get('digital_signature_algorithm')
self._kdw_mski_cp_cryptographic_algorithm = \
mski_cp.get('cryptographic_algorithm')
self._kdw_mski_cp_random_iv = mski_cp.get('random_iv')
self._kdw_mski_cp_iv_length = mski_cp.get('iv_length')
self._kdw_mski_cp_tag_length = mski_cp.get('tag_length')
self._kdw_mski_cp_fixed_field_length = \
mski_cp.get('fixed_field_length')
self._kdw_mski_cp_invocation_field_length = \
mski_cp.get('invocation_field_length')
self._kdw_mski_cp_counter_length = mski_cp.get('counter_length')
self._kdw_mski_cp_initial_counter_value = \
mski_cp.get('initial_counter_value')
self._kdw_mac_signature = value.get('mac_signature')
self._kdw_iv_counter_nonce = value.get('iv_counter_nonce')
self._kdw_encoding_option = value.get('encoding_option')
class SymmetricKey(Key):
"""
The SymmetricKey class of the simplified KMIP object hierarchy.
A SymmetricKey is a core KMIP object that is the subject of key
management operations. For more information, see Section 2.2 of the KMIP
1.1 specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the SymmetricKey.
cryptographic_length: The length in bits of the SymmetricKey value.
value: The bytes of the SymmetricKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for
SymmetricKey application.
names: The string names of the SymmetricKey.
key_wrapping_data: A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'symmetric_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'SymmetricKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value, masks=None,
name='Symmetric Key', key_wrapping_data=None):
"""
Create a SymmetricKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
masks(list): A list of CryptographicUsageMask enumerations defining
how the key will be used. Optional, defaults to None.
name(string): The string name of the key. Optional, defaults to
'Symmetric Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(SymmetricKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.SYMMETRIC_KEY
self.key_format_type = enums.KeyFormatType.RAW
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.names = [name]
if masks:
self.cryptographic_usage_masks.extend(masks)
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._process_start_date = None
self._protect_stop_date = None
self.validate()
def validate(self):
"""
Verify that the contents of the SymmetricKey object are valid.
Raises:
TypeError: if the types of any SymmetricKey attributes are invalid
ValueError: if the key length and key value length do not match
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
if not self.key_wrapping_data:
if (len(self.value) * 8) != self.cryptographic_length:
msg = "key length ({0}) not equal to key value length ({1})"
msg = msg.format(
self.cryptographic_length,
len(self.value) * 8
)
raise ValueError(msg)
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "SymmetricKey({0}, {1}, {2}, {3})".format(
algorithm,
length,
value,
key_wrapping_data
)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SymmetricKey):
if self.value != other.value:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SymmetricKey):
return not (self == other)
else:
return NotImplemented
event.listen(SymmetricKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class PublicKey(Key):
"""
The PublicKey class of the simplified KMIP object hierarchy.
A PublicKey is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the PublicKey.
cryptographic_length: The length in bits of the PublicKey.
value: The bytes of the PublicKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for PublicKey
application.
names: The list of string names of the PublicKey.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'public_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'PublicKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value,
format_type=enums.KeyFormatType.X_509, masks=None,
name='Public Key', key_wrapping_data=None):
"""
Create a PublicKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
format_type(KeyFormatType): An enumeration defining the format of
the key value. Optional, defaults to enums.KeyFormatType.X_509.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used. Optional, defaults to None.
name(string): The string name of the key. Optional, defaults to
'Public Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(PublicKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.PUBLIC_KEY
self._valid_formats = [
enums.KeyFormatType.RAW,
enums.KeyFormatType.X_509,
enums.KeyFormatType.PKCS_1]
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.key_format_type = format_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_domain_parameters = list()
self.validate()
def validate(self):
"""
Verify that the contents of the PublicKey object are valid.
Raises:
TypeError: if the types of any PublicKey attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
elif not isinstance(self.key_format_type, enums.KeyFormatType):
raise TypeError("key format type must be a KeyFormatType "
"enumeration")
elif self.key_format_type not in self._valid_formats:
raise ValueError("key format type must be one of {0}".format(
self._valid_formats))
# TODO (peter-hamilton) Verify that the key bytes match the key format
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
format_type = "format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "PublicKey({0}, {1}, {2}, {3}, {4})".format(
algorithm, length, value, format_type, key_wrapping_data)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, PublicKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, PublicKey):
return not (self == other)
else:
return NotImplemented
event.listen(PublicKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class PrivateKey(Key):
"""
The PrivateKey class of the simplified KMIP object hierarchy.
A PrivateKey is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
cryptographic_algorithm: The type of algorithm for the PrivateKey.
cryptographic_length: The length in bits of the PrivateKey.
value: The bytes of the PrivateKey.
key_format_type: The format of the key value.
cryptographic_usage_masks: The list of usage mask flags for PrivateKey
application. Optional, defaults to None.
names: The list of string names of the PrivateKey. Optional, defaults
to 'Private Key'.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
"""
__tablename__ = 'private_keys'
unique_identifier = Column('uid', Integer,
ForeignKey('keys.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'PrivateKey'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, algorithm, length, value, format_type, masks=None,
name='Private Key', key_wrapping_data=None):
"""
Create a PrivateKey.
Args:
algorithm(CryptographicAlgorithm): An enumeration identifying the
type of algorithm for the key.
length(int): The length in bits of the key.
value(bytes): The bytes representing the key.
format_type(KeyFormatType): An enumeration defining the format of
the key value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the key value has been wrapped.
Optional, defaults to None.
"""
super(PrivateKey, self).__init__(
key_wrapping_data=key_wrapping_data
)
self._object_type = enums.ObjectType.PRIVATE_KEY
self._valid_formats = [
enums.KeyFormatType.RAW,
enums.KeyFormatType.PKCS_1,
enums.KeyFormatType.PKCS_8]
self.value = value
self.cryptographic_algorithm = algorithm
self.cryptographic_length = length
self.key_format_type = format_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_domain_parameters = list()
self.validate()
def validate(self):
"""
Verify that the contents of the PrivateKey object are valid.
Raises:
TypeError: if the types of any PrivateKey attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("key value must be bytes")
elif not isinstance(self.cryptographic_algorithm,
enums.CryptographicAlgorithm):
raise TypeError("key algorithm must be a CryptographicAlgorithm "
"enumeration")
elif not isinstance(self.cryptographic_length, six.integer_types):
raise TypeError("key length must be an integer")
elif not isinstance(self.key_format_type, enums.KeyFormatType):
raise TypeError("key format type must be a KeyFormatType "
"enumeration")
elif self.key_format_type not in self._valid_formats:
raise ValueError("key format type must be one of {0}".format(
self._valid_formats))
# TODO (peter-hamilton) Verify that the key bytes match the key format
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"key mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("key name {0} must be a string".format(
position))
def __repr__(self):
algorithm = "algorithm={0}".format(self.cryptographic_algorithm)
length = "length={0}".format(self.cryptographic_length)
value = "value={0}".format(binascii.hexlify(self.value))
format_type = "format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
return "PrivateKey({0}, {1}, {2}, {3}, {4})".format(
algorithm, length, value, format_type, key_wrapping_data)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, PrivateKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, PrivateKey):
return not (self == other)
else:
return NotImplemented
event.listen(PrivateKey._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class SplitKey(Key):
"""
"""
__mapper_args__ = {"polymorphic_identity": "SplitKey"}
__table_args__ = {"sqlite_autoincrement": True}
__tablename__ = "split_keys"
unique_identifier = sqlalchemy.Column(
"uid",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("keys.uid"),
primary_key=True
)
# Split Key object fields
_split_key_parts = sqlalchemy.Column(
"_split_key_parts",
sqlalchemy.Integer,
default=None
)
_key_part_identifier = sqlalchemy.Column(
"_key_part_identifier",
sqlalchemy.Integer,
default=None
)
_split_key_threshold = sqlalchemy.Column(
"_split_key_threshold",
sqlalchemy.Integer,
default=None
)
_split_key_method = sqlalchemy.Column(
"_split_key_method",
sql.EnumType(enums.SplitKeyMethod),
default=None
)
_prime_field_size = sqlalchemy.Column(
"_prime_field_size",
sqlalchemy.BigInteger,
default=None
)
def __init__(self,
cryptographic_algorithm=None,
cryptographic_length=None,
key_value=None,
cryptographic_usage_masks=None,
name="Split Key",
key_format_type=enums.KeyFormatType.RAW,
key_wrapping_data=None,
split_key_parts=None,
key_part_identifier=None,
split_key_threshold=None,
split_key_method=None,
prime_field_size=None):
"""
Create a SplitKey.
Args:
cryptographic_algorithm(enum): A CryptographicAlgorithm enumeration
identifying the type of algorithm for the split key. Required.
cryptographic_length(int): The length in bits of the split key.
Required.
key_value(bytes): The bytes representing the split key. Required.
cryptographic_usage_masks(list): A list of CryptographicUsageMask
enumerations defining how the split key will be used. Optional,
defaults to None.
name(string): The string name of the split key. Optional, defaults
to "Split Key".
key_format_type (enum): A KeyFormatType enumeration specifying the
format of the split key. Optional, defaults to Raw.
key_wrapping_data(dict): A dictionary containing key wrapping data
settings, describing how the split key has been wrapped.
Optional, defaults to None.
split_key_parts (int): An integer specifying the total number of
parts of the split key. Required.
key_part_identifier (int): An integer specifying which key part
of the split key this key object represents. Required.
split_key_threshold (int): An integer specifying the minimum
number of key parts required to reconstruct the split key.
Required.
split_key_method (enum): A SplitKeyMethod enumeration specifying
how the key was split. Required.
prime_field_size (int): A big integer specifying the prime field
size used for the Polynomial Sharing Prime Field split key
method. Optional, defaults to None.
"""
super(SplitKey, self).__init__(key_wrapping_data=key_wrapping_data)
self._object_type = enums.ObjectType.SPLIT_KEY
self.key_format_type = key_format_type
self.value = key_value
self.cryptographic_algorithm = cryptographic_algorithm
self.cryptographic_length = cryptographic_length
self.names = [name]
if cryptographic_usage_masks:
self.cryptographic_usage_masks.extend(cryptographic_usage_masks)
self.split_key_parts = split_key_parts
self.key_part_identifier = key_part_identifier
self.split_key_threshold = split_key_threshold
self.split_key_method = split_key_method
self.prime_field_size = prime_field_size
@property
def split_key_parts(self):
return self._split_key_parts
@split_key_parts.setter
def split_key_parts(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._split_key_parts = value
else:
raise TypeError("The split key parts must be an integer.")
@property
def key_part_identifier(self):
return self._key_part_identifier
@key_part_identifier.setter
def key_part_identifier(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._key_part_identifier = value
else:
raise TypeError("The key part identifier must be an integer.")
@property
def split_key_threshold(self):
return self._split_key_threshold
@split_key_threshold.setter
def split_key_threshold(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._split_key_threshold = value
else:
raise TypeError("The split key threshold must be an integer.")
@property
def split_key_method(self):
return self._split_key_method
@split_key_method.setter
def split_key_method(self, value):
if (value is None) or (isinstance(value, enums.SplitKeyMethod)):
self._split_key_method = value
else:
raise TypeError(
"The split key method must be a SplitKeyMethod enumeration."
)
@property
def prime_field_size(self):
return self._prime_field_size
@prime_field_size.setter
def prime_field_size(self, value):
if (value is None) or (isinstance(value, six.integer_types)):
self._prime_field_size = value
else:
raise TypeError("The prime field size must be an integer.")
def __repr__(self):
cryptographic_algorithm = "cryptographic_algorithm={0}".format(
self.cryptographic_algorithm
)
cryptographic_length = "cryptographic_length={0}".format(
self.cryptographic_length
)
key_value = "key_value={0}".format(binascii.hexlify(self.value))
key_format_type = "key_format_type={0}".format(self.key_format_type)
key_wrapping_data = "key_wrapping_data={0}".format(
self.key_wrapping_data
)
cryptographic_usage_masks = "cryptographic_usage_masks={0}".format(
self.cryptographic_usage_masks
)
names = "name={0}".format(self.names)
split_key_parts = "split_key_parts={0}".format(self.split_key_parts)
key_part_identifier = "key_part_identifier={0}".format(
self.key_part_identifier
)
split_key_threshold = "split_key_threshold={0}".format(
self.split_key_threshold
)
split_key_method = "split_key_method={0}".format(self.split_key_method)
prime_field_size = "prime_field_size={0}".format(self.prime_field_size)
return "SplitKey({0})".format(
", ".join(
[
cryptographic_algorithm,
cryptographic_length,
key_value,
key_format_type,
key_wrapping_data,
cryptographic_usage_masks,
names,
split_key_parts,
key_part_identifier,
split_key_threshold,
split_key_method,
prime_field_size
]
)
)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SplitKey):
if self.value != other.value:
return False
elif self.key_format_type != other.key_format_type:
return False
elif self.cryptographic_algorithm != other.cryptographic_algorithm:
return False
elif self.cryptographic_length != other.cryptographic_length:
return False
elif self.key_wrapping_data != other.key_wrapping_data:
return False
elif self.cryptographic_usage_masks != \
other.cryptographic_usage_masks:
return False
elif self.names != other.names:
return False
elif self.split_key_parts != other.split_key_parts:
return False
elif self.key_part_identifier != other.key_part_identifier:
return False
elif self.split_key_threshold != other.split_key_threshold:
return False
elif self.split_key_method != other.split_key_method:
return False
elif self.prime_field_size != other.prime_field_size:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SplitKey):
return not (self == other)
else:
return NotImplemented
event.listen(
SplitKey._names,
"append",
sql.attribute_append_factory("name_index"),
retval=False
)
class Certificate(CryptographicObject):
"""
The Certificate class of the simplified KMIP object hierarchy.
A Certificate is a core KMIP object that is the subject of key management
operations. For more information, see Section 2.2 of the KMIP 1.1
specification.
Attributes:
certificate_type: The type of the Certificate.
value: The bytes of the Certificate.
cryptographic_usage_masks: The list of usage mask flags for
Certificate application.
names: The list of string names of the Certificate.
"""
__tablename__ = 'certificates'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
certificate_type = Column(
'certificate_type', sql.EnumType(enums.CertificateType))
__mapper_args__ = {
'polymorphic_identity': 'Certificate'
}
__table_args__ = {
'sqlite_autoincrement': True
}
@abstractmethod
def __init__(self, certificate_type, value, masks=None,
name='Certificate'):
"""
Create a Certificate.
Args:
certificate_type(CertificateType): An enumeration defining the
type of the certificate.
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate.
"""
super(Certificate, self).__init__()
self._object_type = enums.ObjectType.CERTIFICATE
self.value = value
self.certificate_type = certificate_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._cryptographic_algorithm = None
self._cryptographic_length = None
self._certificate_length = None
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._cryptographic_parameters = list()
self._digital_signature_algorithm = list()
self.validate()
def validate(self):
"""
Verify that the contents of the Certificate object are valid.
Raises:
TypeError: if the types of any Certificate attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("certificate value must be bytes")
elif not isinstance(self.certificate_type,
enums.CertificateType):
raise TypeError("certificate type must be a CertificateType "
"enumeration")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"certificate mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("certificate name {0} must be a string".format(
position))
def __str__(self):
return str(binascii.hexlify(self.value))
class X509Certificate(Certificate):
"""
The X509Certificate class of the simplified KMIP object hierarchy.
An X509Certificate is a core KMIP object that is the subject of key
management operations. For more information, see Section 2.2 of the KMIP
1.1 specification.
Attributes:
value: The bytes of the Certificate.
cryptographic_usage_masks: The list of usage mask flags for
Certificate application.
names: The list of string names of the Certificate.
"""
__tablename__ = 'x509_certificates'
unique_identifier = Column('uid', Integer,
ForeignKey('certificates.uid'),
primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'X509Certificate'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, masks=None, name='X.509 Certificate'):
"""
Create an X509Certificate.
Args:
value(bytes): The bytes representing the certificate.
masks(list): A list of CryptographicUsageMask enumerations
defining how the certificate will be used.
name(string): The string name of the certificate.
"""
super(X509Certificate, self).__init__(
enums.CertificateType.X_509, value, masks, name)
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._x509_certificate_identifier = None
self._x509_certificate_subject = None
self._x509_certificate_issuer = None
self.validate()
def __repr__(self):
certificate_type = "certificate_type={0}".format(self.certificate_type)
value = "value={0}".format(binascii.hexlify(self.value))
return "X509Certificate({0}, {1})".format(certificate_type, value)
def __eq__(self, other):
if isinstance(other, X509Certificate):
if self.value != other.value:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, X509Certificate):
return not (self == other)
else:
return NotImplemented
event.listen(X509Certificate._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class SecretData(CryptographicObject):
"""
The SecretData class of the simplified KMIP object hierarchy.
SecretData is one of several CryptographicObjects and is one of the core
KMIP objects that are the subject of key management operations. For more
information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
cryptographic_usage_masks: A list of usage mask enumerations
describing how the CryptographicObject will be used.
data_type: The type of the secret value.
"""
__tablename__ = 'secret_data_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('crypto_objects.uid'),
primary_key=True)
data_type = Column('data_type', sql.EnumType(enums.SecretDataType))
__mapper_args__ = {
'polymorphic_identity': 'SecretData'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, data_type, masks=None, name='Secret Data'):
"""
Create a SecretData object.
Args:
value(bytes): The bytes representing secret data.
data_type(SecretDataType): An enumeration defining the type of the
secret value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key.
"""
super(SecretData, self).__init__()
self._object_type = enums.ObjectType.SECRET_DATA
self.value = value
self.data_type = data_type
self.names = [name]
if masks:
self.cryptographic_usage_masks = masks
# All remaining attributes are not considered part of the public API
# and are subject to change.
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self.validate()
def validate(self):
"""
Verify that the contents of the SecretData object are valid.
Raises:
TypeError: if the types of any SecretData attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("secret value must be bytes")
elif not isinstance(self.data_type, enums.SecretDataType):
raise TypeError("secret data type must be a SecretDataType "
"enumeration")
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if not isinstance(mask, enums.CryptographicUsageMask):
position = "({0} in list)".format(i)
raise TypeError(
"secret data mask {0} must be a CryptographicUsageMask "
"enumeration".format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("secret data name {0} must be a string".format(
position))
def __repr__(self):
value = "value={0}".format(binascii.hexlify(self.value))
data_type = "data_type={0}".format(self.data_type)
return "SecretData({0}, {1})".format(value, data_type)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, SecretData):
if self.value != other.value:
return False
elif self.data_type != other.data_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, SecretData):
return not (self == other)
else:
return NotImplemented
event.listen(SecretData._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class OpaqueObject(ManagedObject):
"""
The OpaqueObject class of the simplified KMIP object hierarchy.
OpaqueObject is one of several ManagedObjects and is one of the core KMIP
objects that are the subject of key management operations. For more
information, see Section 2.2 of the KMIP 1.1 specification.
Attributes:
opaque_type: The type of the opaque value.
"""
__tablename__ = 'opaque_objects'
unique_identifier = Column('uid', Integer,
ForeignKey('managed_objects.uid'),
primary_key=True)
opaque_type = Column('opaque_type', sql.EnumType(enums.OpaqueDataType))
__mapper_args__ = {
'polymorphic_identity': 'OpaqueData'
}
__table_args__ = {
'sqlite_autoincrement': True
}
def __init__(self, value, opaque_type, name='Opaque Object'):
"""
Create a OpaqueObject.
Args:
value(bytes): The bytes representing opaque data.
opaque_type(OpaqueDataType): An enumeration defining the type of
the opaque value.
name(string): The string name of the opaque object.
"""
super(OpaqueObject, self).__init__()
self._object_type = enums.ObjectType.OPAQUE_DATA
self.value = value
self.opaque_type = opaque_type
self.names.append(name)
# All remaining attributes are not considered part of the public API
# and are subject to change.
self._digest = None
self._revocation_reason = None
# The following attributes are placeholders for attributes that are
# unsupported by kmip.core
self._destroy_date = None
self._compromise_occurrence_date = None
self._compromise_date = None
self.validate()
def validate(self):
"""
Verify that the contents of the OpaqueObject are valid.
Raises:
TypeError: if the types of any OpaqueObject attributes are invalid.
"""
if not isinstance(self.value, bytes):
raise TypeError("opaque value must be bytes")
elif not isinstance(self.opaque_type, enums.OpaqueDataType):
raise TypeError("opaque data type must be an OpaqueDataType "
"enumeration")
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if not isinstance(name, six.string_types):
position = "({0} in list)".format(i)
raise TypeError("opaque data name {0} must be a string".format(
position))
def __repr__(self):
value = "value={0}".format(binascii.hexlify(self.value))
opaque_type = "opaque_type={0}".format(self.opaque_type)
return "OpaqueObject({0}, {1})".format(value, opaque_type)
def __str__(self):
return str(binascii.hexlify(self.value))
def __eq__(self, other):
if isinstance(other, OpaqueObject):
if self.value != other.value:
return False
elif self.opaque_type != other.opaque_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, OpaqueObject):
return not (self == other)
else:
return NotImplemented
event.listen(OpaqueObject._names, 'append',
sql.attribute_append_factory("name_index"), retval=False)
class ApplicationSpecificInformation(sql.Base):
__tablename__ = "app_specific_info"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_application_namespace = sqlalchemy.Column(
"application_namespace",
sqlalchemy.String
)
_application_data = sqlalchemy.Column(
"application_data",
sqlalchemy.String
)
managed_objects = sqlalchemy.orm.relationship(
"ManagedObject",
secondary=app_specific_info_map,
back_populates="app_specific_info"
)
def __init__(self,
application_namespace=None,
application_data=None):
"""
Create an ApplicationSpecificInformation attribute.
Args:
application_namespace (str): A string specifying the application
namespace. Required.
application_data (str): A string specifying the application data.
Required.
"""
super(ApplicationSpecificInformation, self).__init__()
self.application_namespace = application_namespace
self.application_data = application_data
@property
def application_namespace(self):
return self._application_namespace
@application_namespace.setter
def application_namespace(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._application_namespace = value
else:
raise TypeError("The application namespace must be a string.")
@property
def application_data(self):
return self._application_data
@application_data.setter
def application_data(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._application_data = value
else:
raise TypeError("The application data must be a string.")
def __repr__(self):
application_namespace = "application_namespace='{}'".format(
self.application_namespace
)
application_data = "application_data='{}'".format(
self.application_data
)
return "ApplicationSpecificInformation({})".format(
", ".join(
[
application_namespace,
application_data
]
)
)
def __str__(self):
return str(
{
"application_namespace": self.application_namespace,
"application_data": self.application_data
}
)
def __eq__(self, other):
if isinstance(other, ApplicationSpecificInformation):
if self.application_namespace != other.application_namespace:
return False
elif self.application_data != other.application_data:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ApplicationSpecificInformation):
return not (self == other)
else:
return NotImplemented
class ObjectGroup(sql.Base):
__tablename__ = "object_groups"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_object_group = sqlalchemy.Column(
"object_group",
sqlalchemy.String,
nullable=False
)
managed_objects = sqlalchemy.orm.relationship(
"ManagedObject",
secondary=object_group_map,
back_populates="object_groups"
)
def __init__(self, object_group=None):
"""
Create an ObjectGroup attribute.
Args:
object_group (str): A string specifying the object group. Required.
"""
super(ObjectGroup, self).__init__()
self.object_group = object_group
@property
def object_group(self):
return self._object_group
@object_group.setter
def object_group(self, value):
if (value is None) or (isinstance(value, six.string_types)):
self._object_group = value
else:
raise TypeError("The object group must be a string.")
def __repr__(self):
object_group = "object_group='{}'".format(self.object_group)
return "ObjectGroup({})".format(object_group)
def __str__(self):
return str({"object_group": self.object_group})
def __eq__(self, other):
if isinstance(other, ObjectGroup):
if self.object_group != other.object_group:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObjectGroup):
return not (self == other)
else:
return NotImplemented
|
apache-2.0
| 5,590,796,173,671,300,000
| 34.45098
| 79
| 0.593939
| false
| 4.302605
| false
| false
| false
|
rackerlabs/marconi
|
marconi/queues/transport/wsgi/v1_0/homedoc.py
|
1
|
4656
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
#------------------------------------------------------------------
# Queues
#------------------------------------------------------------------
'rel/queues': {
'href-template': '/v1/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v1/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET', 'HEAD', 'PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue-metadata': {
'href-template': '/v1/queues/{queue_name}/metadata',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET', 'PUT'],
'formats': {
'application/json': {},
},
},
},
'rel/queue-stats': {
'href-template': '/v1/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
#------------------------------------------------------------------
# Messages
#------------------------------------------------------------------
'rel/messages': {
'href-template': ('/v1/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post-messages': {
'href-template': '/v1/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
#------------------------------------------------------------------
# Claims
#------------------------------------------------------------------
'rel/claim': {
'href-template': '/v1/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
}
}
class Resource(object):
def __init__(self):
document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4)
self.document_utf8 = document.encode('utf-8')
def on_get(self, req, resp, project_id):
resp.data = self.document_utf8
resp.content_type = 'application/json-home'
resp.cache_control = ['max-age=86400']
# status defaults to 200
|
apache-2.0
| 9,216,185,514,893,187,000
| 31.788732
| 79
| 0.391323
| false
| 4.656
| false
| false
| false
|
dex4er/django-pyc
|
django_pyc/management/commands/clearpyc.py
|
1
|
2097
|
import argparse
import os
import re
import sys
from django.core.management import base
class Command(base.BaseCommand):
help = \
"""
Clears .pyc files from the project.
"""
pattern = r'^.+\.pyc$'
def add_arguments(self, parser):
parser.add_argument(
'--noinput', dest='noinput', action='store_true', default=False,
help="Do NOT prompt the user for input of any kind."
)
parser.add_argument(
'-f', '--force', dest='force', action='store_true', default=False,
help="Force the removing files without user interaction."
)
parser.add_argument(
'-p', '--with-pythonpath', dest='with_pythonpath', action='store_true', default=False,
help="Remove also PYTHONPATH libraries."
)
parser.add_argument(
'path', nargs=argparse.REMAINDER,
help="Directories with libraries"
)
def handle(self, *args, **options):
dirs = options['path'] or sys.path[:1]
if options['with_pythonpath']:
dirs += sys.path[1:]
for d in dirs:
d = d or '.'
if os.path.isdir(d) and os.access(d, os.W_OK):
for dirname, _, filenames in os.walk(d):
for filename in filenames:
fullname = os.path.join(dirname, filename)
if re.search(self.pattern, fullname):
if not options['force'] and not options['noinput']:
confirm_action = input(
"Do you want to delete '%s'? [y/N] " % fullname)
if confirm_action != 'y':
continue
os.remove(fullname)
if int(options['verbosity']) >= 2:
self.stdout.write("Removed %s" % fullname)
else:
if int(options['verbosity']) >= 2:
self.stdout.write("Skipped %s" % d)
|
lgpl-3.0
| 5,796,214,227,565,749,000
| 36.446429
| 98
| 0.48784
| false
| 4.558696
| false
| false
| false
|
daite/textparser
|
analyze_japanese.py
|
1
|
5424
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015 daite
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bs4 import BeautifulSoup as BS
from urlparse import urljoin
import requests
import argparse
import setting
import codecs
import os
class JapTextParser:
def __init__(self, mode='editorial'):
'''
:: init function of basic class
'''
self.mode = mode
self.text_encoding = setting.text_encoding
self.output_file = setting.output_file
self.dedupe_output_file = setting.dedupe_output_file
def analyze_japanese_text(self, search_filter=setting.word_filter):
'''
:: analyze japanese text when given search_filter
:: need to add error handling
'''
for sentence in self.get_text_from_url():
url = setting.app_url(setting.app_id,
search_filter,
sentence)
r = requests.get(url)
status_code = r.status_code
if status_code == 200:
print '[%s] ===> [%s] OK' %(self.mode, status_code)
self.save_text(r.text)
else:
raise RuntimeError("check it")
self.dedupe() # deduping text
def get_text_from_url(self):
'''
:: get text from url
'''
pass
def dedupe(self):
'''
:: dedupe text data
'''
print('deduping.....')
text_list = set()
with codecs.open(self.output_file, 'r',
encoding=self.text_encoding) as f:
for x in f.readlines() :
text_list.add(x)
for text in text_list:
with codecs.open(self.dedupe_output_file, 'a',
encoding=self.text_encoding) as g:
g.write(text)
print('cleaning up...')
os.remove(self.output_file)
def save_text(self, res_text):
'''
:: save useful information to txt file
:: returned by yahoo japanese analyze server
'''
for word in BS(res_text).findAll('word'):
category = word.find('pos').text
kanji = word.find('surface').text
hiragana = word.find('reading').text
try:
with codecs.open(self.output_file, 'a',
encoding=self.text_encoding) as f:
text = '%s\t%s\t%s' %(category, kanji, hiragana)
f.write(text + '\r\n')
except Exception as e:
os.remove(self.output_file)
raise RuntimeError("Error", e)
@staticmethod
def get_japanese_meaning(kanji):
'''
:: get japanese meaning from kotobank
'''
url = 'https://kotobank.jp/word/%s' %japanese_word
try:
japanese_meaning = BS(requests.get(url).text).\
find('meta', {'property':'og:description'})['content']
except:
japanese_meaning = 'errors!'
return japanese_meaning
@staticmethod
def get_response(url):
'''
:: staticmethod -> get BS response from url
'''
return BS(requests.get(url).content)
class AsahiParser(JapTextParser):
'''
:: AsahiParser class
'''
def get_text_from_url(self):
'''
:: override function from base class
'''
if self.mode == 'editorial':
url = setting.asahi_editorial_url
else:
url = setting.asahi_tensheng_url
soup = self.get_response(url)
div_tag = soup.find('div', {'class': 'ArticleText'})
for p_tag in div_tag.findAll('p'):
yield p_tag.text
class NikkeiParser(JapTextParser):
'''
:: NikkeiParser class
'''
def get_text_from_url(self):
'''
:: override function from base class
:: get the lastest 2 editorial pages
'''
nikkei_main_url = setting.nikkei_main_url
soup_main = self.get_response(nikkei_main_url).\
findAll('h4', {'class': 'cmn-article_title'})[:2]
for s in soup_main:
nikkei_editorial_url = urljoin(setting.nikkei_host_url, s.find('a')['href'])
soup_editorial = self.get_response(nikkei_editorial_url).\
find('div', {'class': 'cmn-article_text JSID_key_fonttxt'})
for text in soup_editorial.findAll('p'):
yield text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--tensheng',
help='fetch asahi tensheng',
action="store_true")
parser.add_argument('-e','--editorial',
help='fetch asahi editorial',
action="store_true")
parser.add_argument('-n','--nikkei',
help='fetch nikkei editorial',
action="store_true")
args = parser.parse_args()
if args.tensheng:
a = AsahiParser(mode='tensheng')
a.analyze_japanese_text()
elif args.editorial:
a = AsahiParser()
a.analyze_japanese_text()
elif args.nikkei:
n = NikkeiParser()
n.analyze_japanese_text()
else:
parser.print_help()
exit(1)
|
mit
| -3,033,339,715,961,497,000
| 29.133333
| 80
| 0.665007
| false
| 3.147998
| false
| false
| false
|
znick/anytask
|
anytask/tasks/models.py
|
1
|
16245
|
# coding: utf-8
import copy
import sys
import json
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q, Max
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.utils.html import escape
from courses.models import Course
from groups.models import Group
def check_json(text):
try:
text_to_json = json.loads(text, strict=False)
if not isinstance(text_to_json, dict):
raise ValueError
return text_to_json
except (ValueError, TypeError):
return False
def get_lang_text(text, lang):
text_ = check_json(text)
if text_:
lang = lang if lang in text_ else settings.LANGUAGE_CODE
return text_[lang]
return unicode(text)
class Task(models.Model):
title = models.CharField(max_length=191, db_index=True, null=True, blank=True)
short_title = models.CharField(max_length=15, db_index=True, null=True, blank=True)
course = models.ForeignKey(Course, db_index=True, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=False, related_name='groups_set')
weight = models.IntegerField(db_index=True, null=False, blank=False, default=0)
is_hidden = models.BooleanField(db_index=True, null=False, blank=False, default=False)
parent_task = models.ForeignKey('self', db_index=True, null=True, blank=True, related_name='children')
task_text = models.TextField(null=True, blank=True, default=None)
score_max = models.IntegerField(db_index=True, null=False, blank=False, default=0)
max_students = models.IntegerField(null=False, blank=False, default=0)
contest_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
rb_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
TYPE_FULL = 'All'
TYPE_SIMPLE = 'Only mark'
TYPE_SEMINAR = 'Seminar'
TYPE_MATERIAL = 'Material'
TYPE_IPYNB = 'Jupyter Notebook'
TASK_TYPE_CHOICES = (
(TYPE_FULL, _('s_obsuzhdeniem')),
(TYPE_SIMPLE, _('tolko_ocenka')),
(TYPE_MATERIAL, _('material')),
(TYPE_SEMINAR, _('seminar')),
(TYPE_IPYNB, _('jupyter notebook'))
)
type = models.CharField(db_index=False, max_length=128, choices=TASK_TYPE_CHOICES, default=TYPE_FULL)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
deadline_time = models.DateTimeField(auto_now=False, blank=True, null=True, default=None)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
contest_id = models.IntegerField(db_index=True, null=False, blank=False, default=0)
problem_id = models.CharField(max_length=128, db_index=True, null=True, blank=True)
send_to_users = models.BooleanField(db_index=False, null=False, blank=False, default=False)
sended_notify = models.BooleanField(db_index=True, null=False, blank=False, default=True)
one_file_upload = models.BooleanField(db_index=False, null=False, blank=False, default=False)
accepted_after_contest_ok = models.BooleanField(db_index=False, null=False, blank=False, default=False)
score_after_deadline = models.BooleanField(db_index=False, null=False, blank=False, default=True)
nb_assignment_name = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return unicode(self.title)
def get_title(self, lang=settings.LANGUAGE_CODE):
return escape(get_lang_text(self.title, lang))
def get_description(self, lang=settings.LANGUAGE_CODE):
return get_lang_text(self.task_text, lang)
def is_text_json(self):
return check_json(self.task_text)
@property
def max_students_on_task(self):
return self.max_students or self.course.max_students_per_task or settings.PYTHONTASK_MAX_USERS_PER_TASK
def user_can_take_task(self, user):
for task_taken in TaskTaken.objects.filter(task=self):
task_taken.update_status()
if user.is_anonymous():
return (False, 'Необходимо залогиниться')
if self.is_hidden:
return (False, 'Задача скрыта')
if not self.course.groups.filter(students=user).count():
return (False, u'Необходимо числиться в одной из групп курса')
if Task.objects.filter(parent_task=self).count() > 0:
return (False, u'')
if TaskTaken.objects.filter(task=self).filter(user=user).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED))).count() != 0:
return (False, u'')
if self.parent_task is not None:
tasks = Task.objects.filter(parent_task=self.parent_task)
if TaskTaken.objects.filter(user=user).filter(task__in=tasks) \
.exclude(status=TaskTaken.STATUS_CANCELLED) \
.exclude(status=TaskTaken.STATUS_DELETED) \
.count() > 0:
return (False, u'Вы уже взяли другую подзадачу из этой задачи')
max_not_scored_tasks = self.course.max_not_scored_tasks or \
settings.PYTHONTASK_MAX_TASKS_WITHOUT_SCORE_PER_STUDENT
if max_not_scored_tasks:
if TaskTaken.objects.filter(user=user) \
.filter(task__course=self.course) \
.filter(status=TaskTaken.STATUS_TAKEN).count() >= max_not_scored_tasks:
return (False, u'У вас слишком много неоцененных задач')
max_incomplete_tasks = self.course.max_incomplete_tasks or settings.PYTHONTASK_MAX_INCOMPLETE_TASKS
if max_incomplete_tasks:
all_scored = TaskTaken.objects.filter(user=user).filter(task__course=self.course) \
.filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(
status=TaskTaken.STATUS_SCORED)))
if sum(t.score != t.task.score_max for t in all_scored) + 1 > max_incomplete_tasks:
return (False, u'У вас слишком много не до конца доделанных задач')
max_students = self.max_students_on_task or settings.PYTHONTASK_MAX_USERS_PER_TASK
if max_students:
if TaskTaken.objects.filter(task=self).filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(
status=TaskTaken.STATUS_SCORED))).count() >= max_students:
return (
False,
u'Задача не может быть взята более чем %d студентами' % max_students)
try:
task_taken = TaskTaken.objects.filter(task=self).filter(user=user).get(status=TaskTaken.STATUS_BLACKLISTED)
blacklist_expired_date = task_taken.blacklisted_till
if blacklist_expired_date:
return (False, u'Вы сможете взять эту задачу с %s' % blacklist_expired_date.strftime("%d.%m.%Y"))
except TaskTaken.DoesNotExist:
pass
return (True, u'')
def user_can_cancel_task(self, user):
if user.is_anonymous() or self.is_hidden:
return False
if TaskTaken.objects.filter(task=self).filter(user=user).filter(status=TaskTaken.STATUS_TAKEN).count() != 0:
return True
return False
def user_can_score_task(self, user):
if user.is_anonymous():
return False
return self.course.user_is_teacher(user)
def user_can_pass_task(self, user):
if user.is_anonymous():
return False
if not self.course.is_python_task:
if self.user_can_take_task(user):
return True
try:
task_taken = self.get_task_takens().get(user=user)
return (task_taken.status == TaskTaken.STATUS_TAKEN or task_taken.status == TaskTaken.STATUS_SCORED)
except TaskTaken.DoesNotExist:
return False
return False
def has_parent(self):
return self.parent_task is not None
def has_subtasks(self):
return Task.objects.filter(parent_task=self).count() > 0
def get_subtasks(self):
return Task.objects.filter(parent_task=self)
def get_task_takens(self):
return TaskTaken.objects.filter(task=self).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED)))
def add_user_properties(self, user):
self.can_take = self.user_can_take_task(user)
self.can_cancel = self.user_can_cancel_task(user)
self.can_score = self.user_can_score_task(user)
self.can_pass = self.user_can_pass_task(user)
self.is_shown = not self.is_hidden or self.course.user_is_teacher(user)
def has_issue_access(self):
return self.type not in [self.TYPE_SIMPLE, self.TYPE_MATERIAL, self.TYPE_SEMINAR]
def set_position_in_new_group(self, groups=None):
if not groups:
groups = self.course.groups.all()
else:
for task_related in TaskGroupRelations.objects.filter(task=self).exclude(group__in=groups):
task_related.deleted = True
task_related.save()
for group in list(groups):
task_related, created = TaskGroupRelations.objects.get_or_create(task=self, group=group)
if created:
max_position = TaskGroupRelations.objects.filter(group=group).exclude(id=task_related.id) \
.aggregate(Max('position'))['position__max']
task_related.position = max_position + 1 if max_position is not None else 0
else:
task_related.deleted = False
task_related.save()
def get_url_in_course(self):
return reverse('courses.views.seminar_page', kwargs={'course_id': self.course_id, 'task_id': self.id})
class TaskLog(models.Model):
title = models.CharField(max_length=191, db_index=True, null=True, blank=True)
course = models.ForeignKey(Course, db_index=False, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=False, related_name='groups_log_set')
weight = models.IntegerField(db_index=False, null=False, blank=False, default=0)
parent_task = models.ForeignKey('self', db_index=True, null=True, blank=True, related_name='parent_task_set')
task_text = models.TextField(null=True, blank=True, default=None)
score_max = models.IntegerField(db_index=False, null=False, blank=False, default=0)
contest_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
rb_integrated = models.BooleanField(db_index=False, null=False, blank=False, default=False)
TYPE_FULL = 'All'
TYPE_SIMPLE = 'Only mark'
TASK_TYPE_CHOICES = (
(TYPE_FULL, _(u's_obsuzhdeniem')),
(TYPE_SIMPLE, _(u'tolko_ocenka')),
)
type = models.CharField(db_index=False, max_length=128, choices=TASK_TYPE_CHOICES, default=TYPE_FULL)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
deadline_time = models.DateTimeField(auto_now=False, null=True, default=None)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
contest_id = models.IntegerField(db_index=True, null=False, blank=False, default=0)
problem_id = models.CharField(max_length=128, db_index=True, null=True, blank=True)
def __unicode__(self):
return unicode(self.title)
class TaskTaken(models.Model):
STATUS_TAKEN = 0
STATUS_CANCELLED = 1
STATUS_BLACKLISTED = 2
STATUS_SCORED = 3
STATUS_DELETED = 4
user = models.ForeignKey(User, db_index=True, null=False, blank=False)
task = models.ForeignKey(Task, db_index=True, null=False, blank=False)
issue = models.ForeignKey('issues.Issue', db_index=True, null=True, blank=False)
TASK_TAKEN_STATUSES = (
(STATUS_TAKEN, u'Task taken'),
(STATUS_CANCELLED, u'Task cancelled'),
(STATUS_BLACKLISTED, u'Task blacklisted'),
(STATUS_SCORED, u'Task scored'),
(STATUS_DELETED, u'TaskTaken deleted')
)
status = models.IntegerField(choices=TASK_TAKEN_STATUSES, db_index=True, blank=False, default=0)
EDIT = 'EDIT'
QUEUE = 'QUEUE'
OK = 'OK'
STATUS_CHECK_CHOICES = (
(EDIT, u'Дорешивание'),
(QUEUE, u'Ожидает проверки'),
(OK, u'Задача зачтена и/или больше не принимается'),
)
status_check = models.CharField(db_index=True, max_length=5, choices=STATUS_CHECK_CHOICES, default=EDIT)
taken_time = models.DateTimeField(blank=True, null=True)
blacklisted_till = models.DateTimeField(blank=True, null=True)
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
@property
def score(self):
self.update_status()
if not self.issue:
return 0
return self.issue.mark
def update_status(self):
if self.issue and abs(self.issue.mark) > sys.float_info.epsilon and self.status != self.STATUS_SCORED:
self.scored()
if not self.issue.get_byname('responsible_name'):
group = self.task.course.get_user_group(self.user)
if group:
default_teacher = self.task.course.get_default_teacher(group)
if default_teacher:
self.issue.set_byname('responsible_name', default_teacher, author=None)
def take(self):
self.status = self.STATUS_TAKEN
if self.taken_time is None:
self.taken_time = timezone.now()
self.save()
def cancel(self):
dt_from_taken_delta = timezone.now() - self.taken_time
if (dt_from_taken_delta.days) <= settings.PYTHONTASK_MAX_DAYS_TO_FULL_CANCEL:
self.taken_time = None
self.status = self.STATUS_CANCELLED
self.save()
def blacklist(self):
self.status = self.STATUS_BLACKLISTED
self.blacklisted_till = timezone.now() + timedelta(days=settings.PYTHONTASK_DAYS_DROP_FROM_BLACKLIST)
self.save()
def scored(self):
self.status = self.STATUS_SCORED
self.save()
def mark_deleted(self):
self.status = self.STATUS_DELETED
self.taken_time = None
self.blacklisted_till = None
self.save()
class Meta:
unique_together = (("user", "task"),)
def __unicode__(self):
return unicode(self.task) + " (" + unicode(self.user) + ")"
class TaskGroupRelations(models.Model):
task = models.ForeignKey(Task, db_index=False, null=False, blank=False)
group = models.ForeignKey(Group, db_index=False, null=False, blank=False)
position = models.IntegerField(db_index=False, null=False, blank=False, default=0)
deleted = models.BooleanField(db_index=False, null=False, blank=False, default=False)
class Meta:
unique_together = ("task", "group")
def __unicode__(self):
return ' '.join([unicode(self.task), unicode(self.group), unicode(self.position)])
def task_save_to_log_post_save(sender, instance, created, **kwargs):
task_log = TaskLog()
task_log_dict = copy.deepcopy(instance.__dict__)
task_log_dict['id'] = None
task_log.__dict__ = task_log_dict
task_log.sended_notify = False
task_log.save()
task_log.groups.add(*instance.groups.all())
# post_save.connect(task_save_to_log_post_save, sender=Task)
|
mit
| 2,597,945,272,377,163,300
| 39.148615
| 119
| 0.650166
| false
| 3.371906
| false
| false
| false
|
elishowk/django-poser
|
poser/utils/page.py
|
1
|
2014
|
# -*- coding: utf-8 -*-
from django.conf import settings
import re
APPEND_TO_SLUG = "-copy"
COPY_SLUG_REGEX = re.compile(r'^.*-copy(?:-(\d)*)?$')
def is_valid_page_slug(page, slug, site, path=None):
"""Validates given slug depending on settings.
"""
# Exclude the page with the publisher_state == page.PUBLISHER_STATE_DELETE
from poser.models.pagemodel import Page
qs = Page.objects.filter(site=site)
## Check for slugs
if qs.filter(slug=slug).count():
return False
## Check for path
if path and qs.filter(path=path).count():
return False
return True
def get_available_slug(page, new_slug=None):
"""Smart function generates slug for title if current title slug cannot be
used. Appends APPEND_TO_SLUG to slug and checks it again.
(Used in page copy function)
Returns: slug
"""
slug = new_slug or page.slug
# We need the full path for the title to check for conflicting urls
page.slug = slug
page.update_path()
path = page.path
# This checks for conflicting slugs/overwrite_url, for both published and unpublished pages
# This is a simpler check than in page_resolver.is_valid_url which
# takes into account actualy page URL
if not is_valid_page_slug(page, slug, page.site, path):
# add nice copy attribute, first is -copy, then -copy-2, -copy-3, ....
match = COPY_SLUG_REGEX.match(slug)
if match:
try:
next = int(match.groups()[0]) + 1
slug = "-".join(slug.split('-')[:-1]) + "-%d" % next
except TypeError:
slug = slug + "-2"
else:
slug = slug + APPEND_TO_SLUG
return get_available_slug(page, slug)
else:
return slug
def check_title_slugs(page):
"""Checks page slugs for duplicity if required, used after page move/
cut/paste.
"""
old_slug = page.slug
page.slug = get_available_slug(page)
if page.slug != old_slug:
page.save()
|
agpl-3.0
| 1,039,450,840,049,646,100
| 31.483871
| 95
| 0.618669
| false
| 3.628829
| false
| false
| false
|
donkawechico/arguman.org
|
web/premises/views.py
|
1
|
23259
|
# -*- coding:utf-8 -*-
import json
from datetime import timedelta
from markdown2 import markdown
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db.models import Max, Sum
from django.utils.timezone import now
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.generic import DetailView, TemplateView, CreateView, View
from django.views.generic.edit import UpdateView
from django.utils.translation import get_language
from django.db.models import Count
from blog.models import Post
from premises.models import Contention, Premise
from premises.forms import (ArgumentCreationForm, PremiseCreationForm,
PremiseEditForm, ReportForm)
from premises.signals import (added_premise_for_premise,
added_premise_for_contention,
reported_as_fallacy,
supported_a_premise)
from premises.templatetags.premise_tags import check_content_deletion
from premises.mixins import PaginationMixin, NextURLMixin
from newsfeed.models import Entry
from profiles.mixins import LoginRequiredMixin
from profiles.models import Profile
def get_ip_address(request):
return (request.META.get('HTTP_X_FORWARDED_FOR') or
request.META.get('REMOTE_ADDR'))
class ContentionDetailView(DetailView):
queryset = (Contention.objects
.select_related('user')
.prefetch_related('premises'))
context_object_name = 'contention'
def get_template_names(self):
view = self.request.GET.get("view")
name = ("list_view" if view == "list" else "tree_view")
return ["premises/%s.html" % name]
def get_parent(self):
premise_id = self.kwargs.get("premise_id")
if premise_id:
return get_object_or_404(Premise, id=premise_id)
def get_premises(self):
contention = self.get_parent() or self.get_object()
return contention.published_children()
def get_context_data(self, **kwargs):
contention = self.get_object()
edit_mode = (
self.request.user.is_superuser or
self.request.user.is_staff or
contention.user == self.request.user)
return super(ContentionDetailView, self).get_context_data(
premises=self.get_premises(),
parent_premise=self.get_parent(),
path=contention.get_absolute_url(),
edit_mode=edit_mode,
serialized=contention.serialize(),
**kwargs)
class ContentionJsonView(DetailView):
model = Contention
def render_to_response(self, context, **response_kwargs):
contention = self.get_object(self.get_queryset())
return HttpResponse(json.dumps({
"nodes": self.build_tree(contention, self.request.user),
}), content_type="application/json")
def build_tree(self, contention, user):
return {
"name": contention.title,
"parent": None,
"pk": contention.pk,
"owner": contention.owner,
"sources": contention.sources,
"is_singular": self.is_singular(contention),
"children": self.get_premises(contention, user)
}
def get_premises(self, contention, user, parent=None):
children = [{
"pk": premise.pk,
"name": premise.text,
"parent": parent.text if parent else None,
"reportable_by_authenticated_user": self.user_can_report(
premise, user),
"report_count": premise.reports.count(),
"user": {
"id": premise.user.id,
"username": premise.user.username,
"absolute_url": reverse("auth_profile",
args=[premise.user.username])
},
"sources": premise.sources,
"premise_type": premise.premise_class(),
"children": (self.get_premises(contention, user, parent=premise)
if premise.published_children().exists() else [])
} for premise in contention.published_premises(parent)]
return children
def user_can_report(self, premise, user):
if user.is_authenticated() and user != premise.user:
return not premise.reported_by(user)
return False
def is_singular(self, contention):
result = contention.premises.all().aggregate(
max_sibling=Max('sibling_count'))
return result['max_sibling'] <= 1
class HomeView(TemplateView, PaginationMixin):
template_name = "index.html"
tab_class = "featured"
paginate_by = 20
def get_context_data(self, **kwargs):
contentions = self.get_contentions()
if self.request.user.is_authenticated():
notifications_qs = self.get_unread_notifications()
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
else:
notifications = None
return super(HomeView, self).get_context_data(
next_page_url=self.get_next_page_url(),
tab_class=self.tab_class,
notifications=notifications,
has_next_page=self.has_next_page(),
announcements=self.get_announcements(),
contentions=contentions, **kwargs)
def get_announcements(self):
return Post.objects.filter(is_announcement=True)
def get_unread_notifications(self):
return (self.request.user
.notifications
.filter(is_read=False)[:5])
def mark_as_read(self, notifications):
pks = notifications.values_list("id", flat=True)
(self.request.user
.notifications
.filter(id__in=pks)
.update(is_read=True))
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.language()
.filter(is_featured=True)
.order_by("-date_modification"))
if paginate:
contentions = (contentions[self.get_offset(): self.get_limit()])
return contentions
class NotificationsView(LoginRequiredMixin, HomeView):
template_name = "notifications.html"
def get_context_data(self, **kwargs):
notifications_qs = self.request.user.notifications.all()[:40]
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
return super(HomeView, self).get_context_data(
notifications=notifications,
**kwargs)
class SearchView(HomeView):
tab_class = 'search'
template_name = 'search/search.html'
partial_templates = {
'contentions': 'search/contention.html',
'users': 'search/profile.html',
'premises' : 'search/premise.html'
}
method_mapping = {'contentions': "get_contentions",
'users': "get_users",
'premises': "get_premises"}
def dispatch(self, request, *args, **kwargs):
self.type = request.GET.get('type', 'contentions')
if not self.method_mapping.get(self.type):
raise Http404()
return super(SearchView, self).dispatch(request, *args, **kwargs)
def get_keywords(self):
return self.request.GET.get('keywords') or ""
def has_next_page(self):
method = getattr(self, self.method_mapping[self.type])
total = method().count()
return total > (self.get_offset() + self.paginate_by)
def get_search_bundle(self):
method = getattr(self, self.method_mapping[self.type])
return [{'template': self.partial_templates[self.type],
'object': item} for item in method()]
def get_context_data(self, **kwargs):
return super(SearchView, self).get_context_data(
results=self.get_search_bundle(),
**kwargs)
def get_next_page_url(self):
offset = self.get_offset() + self.paginate_by
return '?offset=%(offset)s&keywords=%(keywords)s&type=%(type)s' % {
"offset": offset,
"type": self.type,
"keywords": self.get_keywords()
}
def get_premises(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 3:
result = Premise.objects.none()
else:
result = (Premise.objects.filter(
argument__language=get_language(),
text__contains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_users(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Profile.objects.none()
else:
result = (Profile.objects.filter(
username__icontains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
def get_contentions(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Contention.objects.none()
else:
result = (Contention
.objects
.filter(title__icontains=keywords,
language=get_language()))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
class NewsView(HomeView):
tab_class = "news"
def get_contentions(self, paginate=True):
contentions = (
Contention
.objects
.language()
.filter(is_published=True)
)
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class StatsView(HomeView):
tab_class = "stats"
template_name = "stats.html"
partial_templates = {
Profile: "stats/profile.html",
Contention: "stats/contention.html",
Premise: "stats/premise.html",
}
method_mapping = {
"active_users": "get_active_users",
"supported_users": "get_supported_users",
"disgraced_users": "get_disgraced_users",
"supported_premises": "get_supported_premises",
"fallacy_premises": "get_fallacy_premises",
"crowded_contentions": "get_crowded_contentions",
}
time_ranges = [7, 30]
def get_context_data(self, **kwargs):
return super(StatsView, self).get_context_data(
stats=self.get_stats_bundle(),
stats_type=self.get_stats_type(),
days=self.days,
**kwargs)
def get_stats_type(self):
return self.request.GET.get("what")
def build_time_filters(self, date_field="date_creation"):
days = self.request.GET.get("days")
if not days or days == "all":
self.days = None
return {}
try:
days = int(days)
except (TypeError, ValueError):
days = None
if not days or days not in self.time_ranges:
raise Http404()
self.days = days
field_expression = "%s__gt" % date_field
return {
field_expression: timezone.now() - timedelta(days=days)
}
def get_stats_bundle(self):
stat_type = self.get_stats_type()
if stat_type not in self.method_mapping:
raise Http404()
method = getattr(self, self.method_mapping[stat_type])
return [
{
"template": self.partial_templates[type(item)],
"object": item
} for item in method()
]
def get_active_users(self):
return Profile.objects.annotate(
premise_count=Sum("premise"),
).filter(
premise_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-premise_count")[:10]
def get_supported_users(self):
return Profile.objects.annotate(
supporter_count=Sum("premise__supporters"),
).filter(
supporter_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-supporter_count")[:10]
def get_disgraced_users(self):
return Profile.objects.annotate(
report_count=Sum("premise__reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="premise__date_creation")
).order_by("-report_count")[:10]
def get_supported_premises(self):
return Premise.objects.annotate(
supporter_count=Sum("supporters")
).filter(
argument__language=get_language(),
supporter_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-supporter_count")[:50]
def get_fallacy_premises(self):
return Premise.objects.annotate(
report_count=Sum("reports"),
).filter(
report_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-report_count")[:10]
def get_crowded_contentions(self):
return Contention.objects.annotate(
premise_count=Sum("premises"),
).filter(
language=get_language(),
premise_count__gt=0,
**self.build_time_filters(date_field="date_creation")
).order_by("-premise_count")[:10]
class UpdatedArgumentsView(HomeView):
tab_class = "updated"
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.filter(is_published=True)
.order_by('-date_modification'))
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class ControversialArgumentsView(HomeView):
tab_class = "controversial"
def get_contentions(self, paginate=True):
last_week = now() - timedelta(days=3)
contentions = (Contention
.objects
.annotate(num_children=Count('premises'))
.order_by('-num_children')
.filter(date_modification__gte=last_week))
if paginate:
return contentions[self.get_offset():self.get_limit()]
return contentions
class AboutView(TemplateView):
template_name = "about.html"
def get_text_file(self):
language = get_language()
return render_to_string("about-%s.md" % language)
def get_context_data(self, **kwargs):
content = markdown(self.get_text_file())
return super(AboutView, self).get_context_data(
content=content, **kwargs)
class TosView(TemplateView):
template_name = "tos.html"
def get_context_data(self, **kwargs):
content = markdown(render_to_string("tos.md"))
return super(TosView, self).get_context_data(
content=content, **kwargs)
class ArgumentCreationView(LoginRequiredMixin, CreateView):
template_name = "premises/new_contention.html"
form_class = ArgumentCreationForm
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.ip_address = get_ip_address(self.request)
form.instance.language = get_language()
form.instance.is_published = True
response = super(ArgumentCreationView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentUpdateView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_contention.html"
form_class = ArgumentCreationForm
def get_queryset(self):
contentions = Contention.objects.all()
if self.request.user.is_superuser:
return contentions
return contentions.filter(user=self.request.user)
def form_valid(self, form):
form.instance.user = self.request.user
response = super(ArgumentUpdateView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentPublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = True
contention.save()
messages.info(request, u"Argument is published now.")
return redirect(contention)
class ArgumentUnpublishView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = False
contention.save()
messages.info(request, u"Argüman yayından kaldırıldı.")
return redirect(contention)
class ArgumentDeleteView(LoginRequiredMixin, DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
if check_content_deletion(contention):
# remove notification
Entry.objects.delete(contention.get_newsfeed_type(), contention.id)
contention.delete()
messages.info(request, u"Argument has been removed.")
return redirect("home")
else:
messages.info(request, u"Argument cannot be deleted.")
return redirect(contention)
delete = post
class PremiseEditView(LoginRequiredMixin, UpdateView):
template_name = "premises/edit_premise.html"
form_class = PremiseEditForm
def get_queryset(self):
premises = Premise.objects.all()
if self.request.user.is_superuser:
return premises
return premises.filter(user=self.request.user)
def form_valid(self, form):
response = super(PremiseEditView, self).form_valid(form)
form.instance.argument.update_sibling_counts()
return response
def get_context_data(self, **kwargs):
return super(PremiseEditView, self).get_context_data(**kwargs)
class PremiseCreationView(NextURLMixin, LoginRequiredMixin, CreateView):
template_name = "premises/new_premise.html"
form_class = PremiseCreationForm
def get_context_data(self, **kwargs):
return super(PremiseCreationView, self).get_context_data(
contention=self.get_contention(),
view=self.get_view_name(),
parent=self.get_parent(),
**kwargs)
def form_valid(self, form):
contention = self.get_contention()
form.instance.user = self.request.user
form.instance.argument = contention
form.instance.parent = self.get_parent()
form.instance.is_approved = True
form.instance.ip_address = get_ip_address(self.request)
form.save()
contention.update_sibling_counts()
if form.instance.parent:
added_premise_for_premise.send(sender=self,
premise=form.instance)
else:
added_premise_for_contention.send(sender=self,
premise=form.instance)
contention.date_modification = timezone.now()
contention.save()
return redirect(
form.instance.get_parent().get_absolute_url() +
self.get_next_parameter()
)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_parent(self):
parent_pk = self.kwargs.get("pk")
if parent_pk:
return get_object_or_404(Premise, pk=parent_pk)
class PremiseSupportView(NextURLMixin, LoginRequiredMixin, View):
def get_premise(self):
premises = Premise.objects.exclude(user=self.request.user)
return get_object_or_404(premises, pk=self.kwargs['pk'])
def post(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.add(self.request.user)
supported_a_premise.send(sender=self, premise=premise,
user=self.request.user)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class PremiseUnsupportView(PremiseSupportView):
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.remove(self.request.user)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
post = delete
class PremiseDeleteView(LoginRequiredMixin, View):
def get_premise(self):
if self.request.user.is_staff:
premises = Premise.objects.all()
else:
premises = Premise.objects.filter(user=self.request.user)
return get_object_or_404(premises,
pk=self.kwargs['pk'])
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.delete()
premise.update_sibling_counts()
contention = self.get_contention()
if not contention.premises.exists():
contention.is_published = False
contention.save()
return redirect(contention)
post = delete
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class ReportView(NextURLMixin, LoginRequiredMixin, CreateView):
form_class = ReportForm
template_name = "premises/report.html"
def get_context_data(self, **kwargs):
return super(ReportView, self).get_context_data(
premise=self.get_premise(),
view=self.get_view_name(),
**kwargs)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_premise(self):
return get_object_or_404(Premise, pk=self.kwargs['pk'])
def get_initial(self):
return {
'contention': self.get_contention(),
'premise': self.get_premise(),
'reporter': self.request.user
}
def form_valid(self, form):
contention = self.get_contention()
premise = self.get_premise()
form.instance.contention = contention
form.instance.premise = premise
form.instance.reporter = self.request.user
form.save()
reported_as_fallacy.send(sender=self, report=form.instance)
return redirect(
premise.get_parent().get_absolute_url() +
self.get_next_parameter() +
"#%s" % premise.pk
)
|
mit
| -2,597,903,517,027,247,000
| 32.458993
| 79
| 0.603122
| false
| 3.853828
| false
| false
| false
|
jnosal/seth
|
seth/tests/test_authentication.py
|
1
|
2947
|
from seth import auth
from seth.tests import IntegrationTestBase
from seth.classy.rest import generics
class DefaultAuthenticatedResource(generics.GenericApiView):
authentication_policy = None
def get(self, **kwargs):
return {}
class BaseAuthenticatedTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.register_resource(DefaultAuthenticatedResource, '/test_basic')
def test_default_setup(self):
r = self.app.get('/test_basic')
self.assertEqual(r.status_int, 200)
class TokenAuthenticationPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class CheckQueryParamsResource(generics.GenericApiView):
authentication_policy = auth.SecretTokenAuthenticationPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResource, '/test_token')
def test_no_token_in_params(self):
r = self.app.get('/test_token', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_wrong_token_in_params(self):
r = self.app.get('/test_token?token=wrong_token', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_token_in_params_wrong_param_name(self):
r = self.app.get('/test_token?tokennamewrong=secret', expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_token_param_name_and_value(self):
r = self.app.get('/test_token?token=secret')
self.assertEqual(r.status_int, 200)
class CheckHeaderAuthenticatioPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class AllowHeaderAuthPolicy(auth.HeaderAuthenticationPolicy):
header_name = 'My-Header'
header_secret = 'My-Value'
class CheckQueryParamsResourceSecond(generics.GenericApiView):
authentication_policy = AllowHeaderAuthPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_header')
def test_no_header_in_request(self):
r = self.app.get('/test_header', headers={}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_header_in_request_but_incorrect_value(self):
r = self.app.get('/test_header', headers={'My-Header': '123'}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_value_in_header_but_wrong_header_name(self):
r = self.app.get('/test_header', headers={'Wrong': 'My-Value'}, expect_errors=True)
self.assertEqual(r.status_int, 401)
def test_correct_header_name_and_value(self):
r = self.app.get('/test_header', headers={'My-Header': 'My-Value'}, expect_errors=True)
self.assertEqual(r.status_int, 200)
|
mit
| 4,533,596,743,129,733,600
| 33.682353
| 95
| 0.673906
| false
| 3.79768
| true
| false
| false
|
foursquare/pants
|
tests/python/pants_test/java/test_nailgun_integration.py
|
1
|
1196
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class TestNailgunIntegration(PantsRunIntegrationTest):
def test_scala_repl_helloworld_input(self):
"""Integration test to exercise possible closed-loop breakages in NailgunClient, NailgunSession
and InputReader.
"""
target = 'examples/src/scala/org/pantsbuild/example/hello/welcome'
pants_run = self.run_pants(
command=['repl', target, '--quiet'],
stdin_data=(
'import org.pantsbuild.example.hello.welcome.WelcomeEverybody\n'
'println(WelcomeEverybody("World" :: Nil).head)\n'
),
# Override the PANTS_CONFIG_FILES="pants.travis-ci.ini" used within TravisCI to enable
# nailgun usage for the purpose of exercising that stack in the integration test.
config={'DEFAULT': {'execution_strategy': 'nailgun'}}
)
self.assert_success(pants_run)
self.assertIn('Hello, World!', pants_run.stdout_data.splitlines())
|
apache-2.0
| 4,745,170,059,667,853,000
| 43.296296
| 99
| 0.72408
| false
| 3.761006
| true
| false
| false
|
rustyhowell/raytracer_py
|
hitable.py
|
1
|
2340
|
from collections import namedtuple
from vector3 import Vec3, dot
from math import sqrt
from ray import Ray
HitRecord = namedtuple("HitRecord", ['t', 'p', 'normal', 'material'])
class Hitable:
def hit(self, ray_, t_min, t_max):
"""
Determine if the ray will hit the object
:param ray_:
:param t_min:
:param t_max:
:return: Return a tuple: true/hitrecord or False, None
"""
raise NotImplemented("Override in subclass")
class Sphere(Hitable):
def __init__(self, center, radius, material):
self.center = center
self.radius = radius
self.material = material
def hit(self, ray_, t_min, t_max):
assert isinstance(ray_, Ray)
oc = ray_.origin - self.center
a = dot(ray_.direction, ray_.direction)
b = dot(oc, ray_.direction)
c = dot(oc, oc) - self.radius * self.radius
discriminant = b * b - a * c
if discriminant > 0.0:
temp = (-b - sqrt(b*b - a * c)) / a
if t_min < temp < t_max:
p = ray_.point_at_parameter(temp)
rec = HitRecord(t=temp,
p=p,
normal=(p - self.center) / self.radius,
material=self.material
)
return True, rec
temp = (-b + sqrt(b*b - a * c)) / a
if t_min < temp < t_max:
p = ray_.point_at_parameter(temp)
rec = HitRecord(t=temp,
p=p,
normal=(p - self.center) / self.radius,
material=self.material
)
return True, rec
return False, None
class HitableList(Hitable):
def __init__(self):
self.shapes = []
def append(self, shape):
self.shapes.append(shape)
def hit(self, ray_, t_min, t_max):
hit_anything = False
closest_so_far = t_max
rec = None
for shape in self.shapes:
hit, tmprec = shape.hit(ray_, t_min, closest_so_far)
if hit:
hit_anything = True
closest_so_far = tmprec.t
rec = tmprec
return hit_anything, rec
|
mit
| 6,299,270,001,141,219,000
| 29
| 71
| 0.478205
| false
| 4.034483
| false
| false
| false
|
PaddlePaddle/models
|
PaddleRec/dssm/infer.py
|
1
|
1407
|
import paddle.fluid as fluid
import numpy as np
import sys
import args
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
def infer(args):
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(args.model_dir, exe)
#构造测试数据
sample_size = 100
l_Qs = []
pos_l_Ds = []
for i in range(sample_size):
l_Q = np.random.rand(1, args.TRIGRAM_D)
l_Qs.append(l_Q)
l_D = np.random.rand(1, args.TRIGRAM_D)
pos_l_Ds.append(l_D)
res = []
for i in range(sample_size):
con_sim = exe.run(infer_program,
feed={"query": l_Qs[i].astype('float32').reshape(1,args.TRIGRAM_D),
"doc_pos": pos_l_Ds[i].astype('float32').reshape(1,args.TRIGRAM_D)},
fetch_list=fetch_vars,
return_numpy=True)
logger.info("query_doc_sim: {:.5f}".format(np.array(con_sim).reshape(-1,1)[0][0]))
if __name__ == "__main__":
import paddle
paddle.enable_static()
args = args.parse_args()
infer(args)
|
apache-2.0
| -6,892,686,518,800,901,000
| 31.465116
| 105
| 0.55914
| false
| 3.266979
| false
| false
| false
|
andres-liiver/IAPB13_suvendatud
|
Kodutoo_16/Kodutoo_16_Andres.py
|
1
|
2985
|
'''
Kodutoo 16
14.11.2014
Andres Liiver
'''
import time
from matplotlib import pyplot as plt
from Tund16gen import *
def timeFunc(func, *args):
start = time.clock()
func(*args)
return time.clock() - start
def linear_search(lst, num):
for item in lst:
if item == num:
return True
return False
def binary_search(lst, num, sort=False):
if sort:
lst = sorted(lst)
imin = 0
imax = len(lst)-1
while imax >= imin:
imid = (imin+imax) // 2
if lst[imid] == num:
return True
elif lst[imid] < num:
imin = imid + 1
else:
imax = imid - 1
return False
def main():
linearTimes = []
binary1Times = []
binary2Times = []
ns = [2**i for i in range(1, 13)]
for n in ns:
lst, gen = gimme_my_input(n, "blah")
times = []
# linear search test
for i in range(len(lst)):
times.append(timeFunc(linear_search, lst, next(gen)))
avg_time = sum(times) / len(times)
linearTimes.append(avg_time)
# binary search test 1
times = []
sortedList = sorted(lst)
for i in range(len(lst)):
times.append(timeFunc(binary_search, sortedList, next(gen)))
avg_time = sum(times) / len(times)
binary1Times.append(avg_time)
# binary search test 2
times = []
for i in range(len(lst)):
times.append(timeFunc(binary_search, lst, next(gen), True))
avg_time = sum(times) / len(times)
binary2Times.append(avg_time)
# print table of results
print("| algorithm \t| n \t\t| time (s)")
print()
# print Linear Search
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Linear", n, linearTimes[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Linear", n, linearTimes[i]))
print()
# print Binary Search (presorted)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} | {1} \t\t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
else:
print("| {0} | {1} \t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
print()
# print Binary Search (sort)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
# plot the times
ax = plt.subplot()
ax.set_xlabel("n")
ax.set_xscale("log")
ax.set_ylabel("Time (s)")
ax.set_yscale("log")
ax.plot(ns, linearTimes, "r", label="Linear Search")
ax.plot(ns, binary1Times, "g", label="Binary Search (presorted)")
ax.plot(ns, binary2Times, "b", label="Binary Search (sort)")
ax.legend(loc="upper left", shadow=True);
plt.show()
if __name__ == "__main__":
main()
|
mit
| -7,427,043,884,688,275,000
| 23.883333
| 90
| 0.529313
| false
| 3.213132
| false
| false
| false
|
shouya/thinking-dumps
|
automata/homework/project2/CYK.py
|
1
|
4714
|
'''
CYK algorithm for Context Free Language
Author: Chenguang Zhu
CS154, Stanford University
'''
import sys,traceback
import os
import string
maxProductionNum = 100 #max number of productions
VarNum = 4
production = [[0] * 3 for i in range(maxProductionNum+1)]
'''Prouductions in Chomsky Normal Form (CNF)
production[i][0] is the number for the variable (0~3, 0: S 1: A, 2: B, 3: C)
If this production is A->BC (two variables), then production[i][1] and production[i][2] will contain the numbers for these two variables
If this production is A->a (a single terminal), then production[i][1] will contain the number for the terminal (0 or 1, 0: a, 1: b), production[i][2]=-1'''
X = [[[False]*3 for i in range(10)] for j in range(10)]
'''X[i][j][s]=true if and only if variable s (0~3, 0: S 1: A, 2: B, 3: C) is in X_ij defined in CYK
Suppose the length of string to be processed is L, then 0<=i<=j<L '''
#check whether (a,b,c) exists in production
def existProd(a, b, c):
global production
for i in range(len(production)):
if ((production[i][0]==a) and
(production[i][1]==b) and
(production[i][2]==c)):
return True
return False
'''CYK algorithm
Calculate the array X
w is the string to be processed'''
def calcCYK(w):
global X
global VarNum
L=len(w)
X=[[[False]*VarNum for i in range(L)] for j in range(L)]
# X=[[[] for i in range(L)] for j in range(L)]
for x in range(L):
calc_cell_basic(x, w)
for dist in range(1,L):
calc_row(dist, L)
tmp = [[lengthify(i) for i in j] for j in X]
X = tmp
def calc_row(dist, l):
global X
for i in range(l - dist):
head = i
tail = i + dist
calc_cell(head, tail)
def lengthify(xs):
global VarNum
result = [False] * VarNum
i = 0
for x in xs:
result[i] = x
i += 1
return result
def calc_cell_basic(col, w):
global X
ww = w[col]
poss = [False] * VarNum
for i in range(7):
if existProd(i,ww,-1):
poss[i] = True
X[col][col] = poss
def prod(xs, ys):
result = []
for x in range(len(xs)):
for y in range(len(ys)):
if xs[x] and ys[y]:
for i in range(7):
if existProd(i, x, y):
result.append(i)
return result
def calc_cell(head, tail):
global X
poss = [False] * VarNum
for i in range(tail - head):
xs = X[head][head + i]
ys = X[head + i + 1][tail]
for i in prod(xs, ys):
poss[i] = True
X[head][tail] = poss
def Start(filename):
global X
global VarNum
global production
result=''
#read data case line by line from file
try:
br=open(filename,'r')
#example on Page 8 of lecture 15_CFL5
production=[[0]*3 for i in range(7)]
production[0][0]=0; production[0][1]=1; production[0][2]=2 #S->AB
production[1][0]=1; production[1][1]=2; production[1][2]=3 #A->BC
production[2][0]=1; production[2][1]=0; production[2][2]=-1 #A->a
production[3][0]=2; production[3][1]=1; production[3][2]=3 #B->AC
production[4][0]=2; production[4][1]=1; production[4][2]=-1 #B->b
production[5][0]=3; production[5][1]=0; production[5][2]=-1 #C->a
production[6][0]=3; production[6][1]=1; production[6][2]=-1 #C->b
result=''
#Read File Line By Line
for string in br:
string=string.strip()
print 'Processing '+string+'...'
length=len(string)
w=[0]*length
for i in range(length):
w[i]=ord(string[i])-ord('a') #convert 'a' to 0 and 'b' to 1
#Use CYK algorithm to calculate X
calcCYK(w)
#Get/print the full table X
for step in range(length-1,-1,-1):
for i in range(length-step):
j=i+step
for k in range(VarNum):
if (X[i][j][k]):
result=result+str(k)
result=result+' '
result=result+'\n'
#Close the input stream
br.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout)
result=result+'error'
return result
def main(filepath):
return Start(filepath)
if __name__ == '__main__':
main(sys.argv[1])
|
mit
| 6,106,430,936,488,291,000
| 27.098765
| 157
| 0.530972
| false
| 3.317382
| false
| false
| false
|
nexusriot/cinder
|
cinder/volume/drivers/remotefs.py
|
1
|
57137
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import inspect
import json
import os
import re
import tempfile
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import compute
from cinder import db
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
nas_opts = [
# TODO(eharney): deprecate nas_ip and change this to nas_host
cfg.StrOpt('nas_ip',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.IntOpt('nas_ssh_port',
default=22,
min=1, max=65535,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
default=None,
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.')),
]
old_vol_type_opts = [cfg.DeprecatedOpt('glusterfs_sparsed_volumes'),
cfg.DeprecatedOpt('glusterfs_qcow2_volumes')]
volume_opts = [
cfg.StrOpt('nas_volume_prov_type',
default='thin',
choices=['thin', 'thick'],
deprecated_opts=old_vol_type_opts,
help=('Provisioning type that will be used when '
'creating volumes.')),
]
CONF = cfg.CONF
CONF.register_opts(nas_opts)
CONF.register_opts(volume_opts)
def locked_volume_id_operation(f, external=False):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named
with the id of the volume. This lock can be used by driver methods
to prevent conflicts with other operations modifying the same volume.
May be applied to methods that take a 'volume' or 'snapshot' argument.
"""
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('volume'):
volume_id = call_args['volume']['id']
elif call_args.get('snapshot'):
volume_id = call_args['snapshot']['volume']['id']
else:
err_msg = _('The decorated method must accept either a volume or '
'a snapshot object')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, volume_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = 'remotefs'
volume_backend_name = None
SHARE_FORMAT_REGEX = r'.+:/.+'
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
if self.configuration:
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_provisioned_capacity(self):
"""Returns the provisioned capacity.
Get the sum of sizes of volumes, snapshots and any other
files on the mountpoint.
"""
provisioned_size = 0.0
for share in self.shares.keys():
mount_path = self._get_mount_point_for_share(share)
out, _ = self._execute('du', '--bytes', mount_path,
run_as_root=True)
provisioned_size += int(out.split()[0])
return round(provisioned_size / units.Gi, 2)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
# If touch file exist, set the bootable flag for the volume
if (os.path.isfile('/etc/cinder/recogimage')):
LOG.debug('DEBUG : setting bootable flag for the volume')
volume['bootable'] = 1
self._create_sparsed_file(volume_path, volume_size, volume)
# Do not try to change permissions of the file here, as we are operating on a sym-link that is not local
else:
self._create_sparsed_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and mount them locally."""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s', self._mounted_shares)
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Delete snapshot.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _fallocate(self, path, size):
"""Creates a raw file of given size in GiB using fallocate."""
self._execute('fallocate', '--length=%sG' % size,
path, run_as_root=True)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
LOG.warning(_LW('%(path)s is being set with open permissions: '
'%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
remotefs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
run_as_root = self._execute_as_root
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'],
run_as_root=run_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'],
run_as_root=run_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_ip,
self.configuration.nas_share_path)):
LOG.debug('Using nas_ip and nas_share_path configuration.')
nas_ip = self.configuration.nas_ip
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_ip, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_ip and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip()
# Replace \040 with a space, to support paths with spaces
share_address = share_address.replace("\\040", " ")
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error(_LE("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud" \
"/blockstorage_nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warning(_LW("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration."),
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration."),
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info(_LI('Cinder secure environment '
'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s'),
err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriver, self).do_setup(context)
self._nova = compute.API()
def _local_volume_dir(self, volume):
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
'basedir': basedir,
'volname': volume_name
}
if not re.match(backing_file_template, info.backing_file):
msg = _("File %(path)s has invalid backing file "
"%(bfile)s, aborting.") % {'path': path,
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path,
run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param: info_path: path to file
:param: empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume['name'])
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume['name'])
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str.
Returns string in a hex format.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: RemoteFSException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('Share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
vol_path = self._local_volume_dir(snapshot['volume'])
self._ensure_share_writable(vol_path)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot['volume']['name'])
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot['volume']['name'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | commited down) |
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | commited down) | exist, needs |
# used here) | | ptr update) |
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
info = self._qemu_img_info(backing_path_full_path,
snapshot['volume']['name'])
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s', result)
except Exception:
LOG.exception(_LE('Call to Nova to create snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception:
LOG.exception(_LE('Call to Nova delete snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.RemoteFSException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
@locked_volume_id_operation
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@locked_volume_id_operation
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@locked_volume_id_operation
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
@locked_volume_id_operation
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref)
@locked_volume_id_operation
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context,
volume,
image_service,
image_meta)
|
apache-2.0
| -5,967,509,913,570,102,000
| 38.459254
| 109
| 0.548926
| false
| 4.404301
| true
| false
| false
|
Connexions/openstax-cms
|
news/models.py
|
1
|
15456
|
from bs4 import BeautifulSoup
from django.db import models
from django import forms
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField, StreamField
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.embeds.blocks import EmbedBlock
from wagtail.search import index
from wagtail.core import blocks
from wagtail.core.blocks import TextBlock, StructBlock, StreamBlock, FieldBlock, CharBlock, RichTextBlock, RawHTMLBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.documents.blocks import DocumentChooserBlock
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.api import APIField
from wagtail.images.api.fields import ImageRenditionField
from wagtail.core.models import Site
from modelcluster.fields import ParentalKey
from modelcluster.contrib.taggit import ClusterTaggableManager
from taggit.models import TaggedItemBase
from openstax.functions import build_image_url
from snippets.models import NewsSource
class ImageChooserBlock(ImageChooserBlock):
def get_api_representation(self, value, context=None):
if value:
return {
'id': value.id,
'title': value.title,
'original': value.get_rendition('original').attrs_dict,
}
class PullQuoteBlock(StructBlock):
quote = TextBlock("quote title")
attribution = CharBlock()
class Meta:
icon = "openquote"
class ImageFormatChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('left', 'Wrap left'), ('right', 'Wrap right'), ('mid', 'Mid width'), ('full', 'Full width'),
))
class HTMLAlignmentChoiceBlock(FieldBlock):
field = forms.ChoiceField(choices=(
('normal', 'Normal'), ('full', 'Full width'),
))
class ImageBlock(StructBlock):
image = ImageChooserBlock()
caption = RichTextBlock()
alignment = ImageFormatChoiceBlock()
alt_text = blocks.CharBlock(required=False)
class AlignedHTMLBlock(StructBlock):
html = RawHTMLBlock()
alignment = HTMLAlignmentChoiceBlock()
class Meta:
icon = "code"
class BlogStreamBlock(StreamBlock):
paragraph = RichTextBlock(icon="pilcrow")
aligned_image = ImageBlock(label="Aligned image", icon="image")
pullquote = PullQuoteBlock()
aligned_html = AlignedHTMLBlock(icon="code", label='Raw HTML')
document = DocumentChooserBlock(icon="doc-full-inverse")
embed = EmbedBlock(icon="media", label="Embed Media URL")
class NewsIndex(Page):
intro = RichTextField(blank=True)
press_kit = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def articles(self):
articles = NewsArticle.objects.live().child_of(self)
article_data = {}
for article in articles:
article_data['{}'.format(article.slug)] = {
'detail_url': '/apps/cms/api/v2/pages/{}/'.format(article.pk),
'date': article.date,
'heading': article.heading,
'subheading': article.subheading,
'body_blurb': article.first_paragraph,
'pin_to_top': article.pin_to_top,
'article_image': article.article_image,
'article_image_alt': article.featured_image_alt_text,
'author': article.author,
'tags': [tag.name for tag in article.tags.all()],
}
return article_data
content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
DocumentChooserPanel('press_kit'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('intro'),
APIField('press_kit'),
APIField('articles'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
subpage_types = ['news.NewsArticle']
parent_page_types = ['pages.HomePage']
max_count = 1
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/blog/'.format(Site.find_for_request(request).root_url),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
class NewsArticleTag(TaggedItemBase):
content_object = ParentalKey('news.NewsArticle', related_name='tagged_items')
class NewsArticle(Page):
date = models.DateField("Post date")
heading = models.CharField(max_length=250, help_text="Heading displayed on website")
subheading = models.CharField(max_length=250, blank=True, null=True)
author = models.CharField(max_length=250)
featured_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Image should be 1200 x 600"
)
featured_image_alt_text = models.CharField(max_length=250, blank=True, null=True)
def get_article_image(self):
return build_image_url(self.featured_image)
article_image = property(get_article_image)
tags = ClusterTaggableManager(through=NewsArticleTag, blank=True)
body = StreamField(BlogStreamBlock())
pin_to_top = models.BooleanField(default=False)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def first_paragraph(self):
paragraphs = []
for block in self.body:
if block.block_type == 'paragraph':
paragraphs.append(str(block.value))
first_paragraph_parsed = []
soup = BeautifulSoup(paragraphs[0], "html.parser")
for tag in soup.findAll('p'):
first_paragraph_parsed.append(tag)
return str(first_paragraph_parsed[0])
search_fields = Page.search_fields + [
index.SearchField('body'),
index.SearchField('tags'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('title'),
FieldPanel('heading'),
FieldPanel('subheading'),
FieldPanel('author'),
ImageChooserPanel('featured_image'),
FieldPanel('featured_image_alt_text'),
FieldPanel('tags'),
StreamFieldPanel('body'),
FieldPanel('pin_to_top'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('date'),
APIField('title'),
APIField('heading'),
APIField('subheading'),
APIField('author'),
APIField('article_image'),
APIField('featured_image_small', serializer=ImageRenditionField('width-420', source='featured_image')),
APIField('featured_image_alt_text'),
APIField('tags'),
APIField('body'),
APIField('pin_to_top'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
parent_page_types = ['news.NewsIndex']
def save(self, *args, **kwargs):
if self.pin_to_top:
current_pins = self.__class__.objects.filter(pin_to_top=True)
for pin in current_pins:
if pin != self:
pin.pin_to_top = False
pin.save()
return super(NewsArticle, self).save(*args, **kwargs)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/blog/{}/'.format(Site.find_for_request(request).root_url, self.slug),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
class Experts(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField(blank=True, null=True)
title = models.CharField(max_length=255)
bio = models.TextField()
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
def get_expert_image(self):
return build_image_url(self.image)
expert_image = property(get_expert_image)
api_fields = [
APIField('name'),
APIField('email'),
APIField('title'),
APIField('bio'),
APIField('expert_image')
]
panels = [
FieldPanel('name'),
FieldPanel('email'),
FieldPanel('title'),
FieldPanel('bio'),
ImageChooserPanel('image'),
]
class ExpertsBios(Orderable, Experts):
experts_bios = ParentalKey('news.PressIndex', related_name='experts_bios')
class NewsMentionChooserBlock(SnippetChooserBlock):
def get_api_representation(self, value, context=None):
if value:
return {
'id': value.id,
'name': value.name,
'logo': value.news_logo,
}
class NewsMentionBlock(blocks.StructBlock):
source = NewsMentionChooserBlock(NewsSource)
url = blocks.URLBlock()
headline = blocks.CharBlock()
date = blocks.DateBlock()
class Meta:
icon = 'document'
class MissionStatement(models.Model):
statement = models.CharField(max_length=255)
api_fields = ('statement', )
class MissionStatements(Orderable, MissionStatement):
mission_statements = ParentalKey('news.PressIndex', related_name='mission_statements')
class PressIndex(Page):
press_kit = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_press_kit(self):
return build_image_url(self.press_kit)
press_kit_url = property(get_press_kit)
press_inquiry_name = models.CharField(max_length=255, blank=True, null=True)
press_inquiry_phone = models.CharField(max_length=255)
press_inquiry_email = models.EmailField()
experts_heading = models.CharField(max_length=255)
experts_blurb = models.TextField()
mentions = StreamField([
('mention', NewsMentionBlock(icon='document')),
], null=True)
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/press/'.format(Site.find_for_request(request).root_url),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
@property
def releases(self):
releases = PressRelease.objects.live().child_of(self)
releases_data = {}
for release in releases:
releases_data['press/{}'.format(release.slug)] = {
'detail_url': '/apps/cms/api/v2/pages/{}/'.format(release.pk),
'date': release.date,
'heading': release.heading,
'excerpt': release.excerpt,
'author': release.author,
}
return releases_data
content_panels = Page.content_panels + [
DocumentChooserPanel('press_kit'),
FieldPanel('press_inquiry_name'),
FieldPanel('press_inquiry_phone'),
FieldPanel('press_inquiry_email'),
FieldPanel('experts_heading'),
FieldPanel('experts_blurb'),
InlinePanel('experts_bios', label="Experts"),
StreamFieldPanel('mentions'),
InlinePanel('mission_statements', label="Mission Statement"),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('press_kit'),
APIField('press_kit_url'),
APIField('releases'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image'),
APIField('experts_heading'),
APIField('experts_blurb'),
APIField('experts_bios'),
APIField('mentions'),
APIField('mission_statements'),
APIField('press_inquiry_name'),
APIField('press_inquiry_phone'),
APIField('press_inquiry_email')
]
subpage_types = ['news.PressRelease']
parent_page_types = ['pages.HomePage']
max_count = 1
class PressRelease(Page):
date = models.DateField("PR date")
heading = models.CharField(max_length=250, help_text="Heading displayed on website")
subheading = models.CharField(max_length=250, blank=True, null=True)
author = models.CharField(max_length=250)
featured_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
featured_image_alt_text = models.CharField(max_length=250, blank=True, null=True)
def get_article_image(self):
return build_image_url(self.featured_image)
article_image = property(get_article_image)
excerpt = models.CharField(max_length=255)
body = StreamField(BlogStreamBlock())
promote_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def get_sitemap_urls(self, request=None):
return [
{
'location': '{}/press/{}'.format(Site.find_for_request(request).root_url, self.slug),
'lastmod': (self.last_published_at or self.latest_revision_created_at),
}
]
search_fields = Page.search_fields + [
index.SearchField('body'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('title'),
FieldPanel('heading'),
FieldPanel('subheading'),
FieldPanel('author'),
ImageChooserPanel('featured_image'),
FieldPanel('featured_image_alt_text'),
FieldPanel('excerpt'),
StreamFieldPanel('body'),
]
promote_panels = [
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('search_description'),
ImageChooserPanel('promote_image')
]
api_fields = [
APIField('date'),
APIField('title'),
APIField('heading'),
APIField('subheading'),
APIField('author'),
APIField('article_image'),
APIField('featured_image_alt_text'),
APIField('excerpt'),
APIField('body'),
APIField('slug'),
APIField('seo_title'),
APIField('search_description'),
APIField('promote_image')
]
|
agpl-3.0
| -7,732,224,415,095,894,000
| 29.788845
| 119
| 0.614777
| false
| 3.938838
| false
| false
| false
|
gonicus/gosa
|
common/src/gosa/common/components/mqtt_proxy.py
|
1
|
3896
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import uuid
from tornado import gen
from gosa.common import Environment
from gosa.common.components.json_exception import JSONRPCException
from gosa.common.components.mqtt_handler import MQTTHandler
from gosa.common.gjson import dumps, loads
from tornado.concurrent import Future
class MQTTException(Exception):
pass
class MQTTServiceProxy(object):
"""
The MQTTServiceProxy provides a simple way to use GOsa RPC
services from various clients. Using the proxy object, you
can directly call methods without the need to know where
it actually gets executed::
>>> from gosa.common.components.mqtt_proxy import MQTTServiceProxy
>>> proxy = MQTTServiceProxy('localhost')
>>> proxy.getMethods()
This will return a dictionary describing the available methods.
=============== ============
Parameter Description
=============== ============
mqttHandler MQTTHandler used to connect to the MQTT service broker
serviceAddress Address string describing the target queue to bind to, must be skipped if no special queue is needed
serviceName *internal*
methods *internal*
=============== ============
The MQTTService proxy creates a temporary MQTT *reply to* queue, which
is used for command results.
"""
worker = {}
def __init__(self, mqttHandler=None, serviceAddress=None, serviceName=None,
methods=None):
self.__handler = mqttHandler if mqttHandler is not None else MQTTHandler()
self.__serviceName = serviceName
self.__serviceAddress = serviceAddress
self.__methods = methods
self.env = Environment.getInstance()
# Retrieve methods
if self.__methods is None:
self.__serviceName = "getMethods"
self.__methods = self.__call__()
self.__serviceName = None
#pylint: disable=W0613
def login(self, user, password): # pragma: nocover
return True
def logout(self): # pragma: nocover
return True
def close(self): # pragma: nocover
pass
def getProxy(self):
return MQTTServiceProxy(self.__handler, self.__serviceAddress, None, methods=self.__methods)
def __getattr__(self, name):
if self.__serviceName is not None:
name = "%s/%s" % (self.__serviceName, name)
return MQTTServiceProxy(self.__handler, self.__serviceAddress, name, methods=self.__methods)
@gen.coroutine
def __call__(self, *args, **kwargs):
data = {}
if '__user__' in kwargs:
data['user'] = kwargs['__user__']
del kwargs['__user__']
if '__session_id__' in kwargs:
data['session_id'] = kwargs['__session_id__']
del kwargs['__session_id__']
# Default to 'core' queue
call_id = uuid.uuid4()
topic = "%s/%s" % (self.__serviceAddress, call_id)
if isinstance(self.__methods, Future):
self.__methods = yield self.__methods
if self.__methods and self.__serviceName not in self.__methods:
raise NameError("name '%s' not defined" % self.__serviceName)
# Send
data.update({
"method": self.__serviceName,
"id": "mqttrpc",
"sender": self.env.uuid
})
data["kwparams"] = kwargs
data["params"] = args
postdata = dumps(data)
response = yield self.__handler.send_sync_message(postdata, topic, qos=2)
resp = loads(response)
if 'error' in resp and resp['error'] is not None:
raise JSONRPCException(resp['error'])
return resp['result']
|
lgpl-2.1
| 8,577,437,640,126,136,000
| 31.198347
| 120
| 0.61037
| false
| 4.198276
| false
| false
| false
|
Sightline-Networks/email_bar
|
email_bar.py
|
1
|
1124
|
#!/bin/python3
from argparse import ArgumentParser
from configparser import ConfigParser
from os.path import expanduser
from mailbox import MaildirMessage, Maildir
config = ConfigParser()
config_mailboxes = {}
config.read(expanduser('~/.config/email_bar.cfg'))
parser = ArgumentParser()
parser.add_argument('--only', help='only check specified mailbox', action='store')
parser.add_argument('--no-title', help='do not display the title', action='store_true')
args = parser.parse_args()
if args.only:
config_mailboxes[args.only] = 0
# Else read through the config and check all of the mailboxes
else:
for mailbox in config.sections():
config_mailboxes[mailbox] = 0
# Iter through and see what has not been read
for mailbox in config_mailboxes:
maildir = Maildir(config.get(mailbox, 'path'))
for mail in maildir:
if 'S' not in mail.get_flags():
config_mailboxes[mailbox] += 1
for mailbox in config_mailboxes.keys():
if args.no_title:
print(config_mailboxes[mailbox])
else:
print("%s: %s " % (mailbox, config_mailboxes[mailbox]))
|
gpl-2.0
| -6,381,215,655,860,284,000
| 22.416667
| 87
| 0.689502
| false
| 3.721854
| true
| false
| false
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/kafka/protocol/admin.py
|
1
|
7824
|
from __future__ import absolute_import
from .api import Request, Response
from .types import Array, Boolean, Bytes, Int16, Int32, Schema, String
class ApiVersionResponse_v0(Response):
API_KEY = 18
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16)))
)
class ApiVersionResponse_v1(Response):
API_KEY = 18
API_VERSION = 1
SCHEMA = Schema(
('error_code', Int16),
('api_versions', Array(
('api_key', Int16),
('min_version', Int16),
('max_version', Int16))),
('throttle_time_ms', Int32)
)
class ApiVersionRequest_v0(Request):
API_KEY = 18
API_VERSION = 0
RESPONSE_TYPE = ApiVersionResponse_v0
SCHEMA = Schema()
class ApiVersionRequest_v1(Request):
API_KEY = 18
API_VERSION = 1
RESPONSE_TYPE = ApiVersionResponse_v1
SCHEMA = ApiVersionRequest_v0.SCHEMA
ApiVersionRequest = [ApiVersionRequest_v0, ApiVersionRequest_v1]
ApiVersionResponse = [ApiVersionResponse_v0, ApiVersionResponse_v1]
class CreateTopicsResponse_v0(Response):
API_KEY = 19
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class CreateTopicsResponse_v1(Response):
API_KEY = 19
API_VERSION = 1
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsResponse_v2(Response):
API_KEY = 19
API_VERSION = 2
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16),
('error_message', String('utf-8'))))
)
class CreateTopicsRequest_v0(Request):
API_KEY = 19
API_VERSION = 0
RESPONSE_TYPE = CreateTopicsResponse_v0
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32)
)
class CreateTopicsRequest_v1(Request):
API_KEY = 19
API_VERSION = 1
RESPONSE_TYPE = CreateTopicsResponse_v1
SCHEMA = Schema(
('create_topic_requests', Array(
('topic', String('utf-8')),
('num_partitions', Int32),
('replication_factor', Int16),
('replica_assignment', Array(
('partition_id', Int32),
('replicas', Array(Int32)))),
('configs', Array(
('config_key', String('utf-8')),
('config_value', String('utf-8')))))),
('timeout', Int32),
('validate_only', Boolean)
)
class CreateTopicsRequest_v2(Request):
API_KEY = 19
API_VERSION = 2
RESPONSE_TYPE = CreateTopicsResponse_v2
SCHEMA = CreateTopicsRequest_v1.SCHEMA
CreateTopicsRequest = [
CreateTopicsRequest_v0, CreateTopicsRequest_v1, CreateTopicsRequest_v2
]
CreateTopicsResponse = [
CreateTopicsResponse_v0, CreateTopicsResponse_v1, CreateTopicsResponse_v2
]
class DeleteTopicsResponse_v0(Response):
API_KEY = 20
API_VERSION = 0
SCHEMA = Schema(
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsResponse_v1(Response):
API_KEY = 20
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('topic_error_codes', Array(
('topic', String('utf-8')),
('error_code', Int16)))
)
class DeleteTopicsRequest_v0(Request):
API_KEY = 20
API_VERSION = 0
RESPONSE_TYPE = DeleteTopicsResponse_v0
SCHEMA = Schema(
('topics', Array(String('utf-8'))),
('timeout', Int32)
)
class DeleteTopicsRequest_v1(Request):
API_KEY = 20
API_VERSION = 1
RESPONSE_TYPE = DeleteTopicsResponse_v1
SCHEMA = DeleteTopicsRequest_v0.SCHEMA
DeleteTopicsRequest = [DeleteTopicsRequest_v0, DeleteTopicsRequest_v1]
DeleteTopicsResponse = [DeleteTopicsResponse_v0, DeleteTopicsResponse_v1]
class ListGroupsResponse_v0(Response):
API_KEY = 16
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsResponse_v1(Response):
API_KEY = 16
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest_v0(Request):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse_v0
SCHEMA = Schema()
class ListGroupsRequest_v1(Request):
API_KEY = 16
API_VERSION = 1
RESPONSE_TYPE = ListGroupsResponse_v1
SCHEMA = ListGroupsRequest_v0.SCHEMA
ListGroupsRequest = [ListGroupsRequest_v0, ListGroupsRequest_v1]
ListGroupsResponse = [ListGroupsResponse_v0, ListGroupsResponse_v1]
class DescribeGroupsResponse_v0(Response):
API_KEY = 15
API_VERSION = 0
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsResponse_v1(Response):
API_KEY = 15
API_VERSION = 1
SCHEMA = Schema(
('throttle_time_ms', Int32),
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest_v0(Request):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse_v0
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
class DescribeGroupsRequest_v1(Request):
API_KEY = 15
API_VERSION = 1
RESPONSE_TYPE = DescribeGroupsResponse_v1
SCHEMA = DescribeGroupsRequest_v0.SCHEMA
DescribeGroupsRequest = [DescribeGroupsRequest_v0, DescribeGroupsRequest_v1]
DescribeGroupsResponse = [DescribeGroupsResponse_v0, DescribeGroupsResponse_v1]
class SaslHandShakeResponse_v0(Response):
API_KEY = 17
API_VERSION = 0
SCHEMA = Schema(
('error_code', Int16),
('enabled_mechanisms', Array(String('utf-8')))
)
class SaslHandShakeRequest_v0(Request):
API_KEY = 17
API_VERSION = 0
RESPONSE_TYPE = SaslHandShakeResponse_v0
SCHEMA = Schema(
('mechanism', String('utf-8'))
)
SaslHandShakeRequest = [SaslHandShakeRequest_v0]
SaslHandShakeResponse = [SaslHandShakeResponse_v0]
|
mit
| -3,200,656,330,581,579,300
| 25.255034
| 79
| 0.576176
| false
| 3.564465
| true
| false
| false
|
kylef/bluepaste
|
bluepaste/models.py
|
1
|
2381
|
import datetime
import json
from hashlib import sha1
import requests
import peewee
from rivr_peewee import Database
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import JsonLexer
from bluepaste.lexer import BlueprintLexer
from bluepaste.config import BLUEPRINT_PARSER_URL
database = Database()
EXPIRE_CHOICES = (
(600, 'In 10 minutes'),
(3600, 'In one hour'),
(3600*24, 'In one day'),
(3600*24*7, 'In one week'),
(3600*24*14, 'In two weeks'),
(3600*24*30, 'In one month'),
)
EXPIRE_DEFAULT = 3600*24*14
class User(database.Model):
email = peewee.CharField(unique=True)
class Blueprint(database.Model):
slug = peewee.CharField(max_length=40, unique=True)
expires = peewee.DateTimeField()
author = peewee.ForeignKeyField(User, related_name='blueprints', null=True)
def create_revision(self, message, content):
ast = requests.post(BLUEPRINT_PARSER_URL, data=content).json()['ast']
ast_json = json.dumps(ast)
created_at = datetime.datetime.now()
slug_content = '{}\n{}'.format(created_at.isoformat(), content)
slug = sha1(slug_content).hexdigest()
return Revision.create(blueprint=self, slug=slug, content=content, message=message, ast_json=ast_json)
@property
def last_revision(self):
return self.revisions[0]
class Revision(database.Model):
blueprint = peewee.ForeignKeyField(Blueprint, related_name='revisions')
slug = peewee.CharField(max_length=40, unique=True)
content = peewee.TextField()
created_at = peewee.DateTimeField(default=datetime.datetime.now)
ast_json = peewee.TextField()
message = peewee.TextField(null=True)
class Meta:
order_by = ('-created_at',)
indexes = (
(('blueprint', 'slug'), True),
)
def __str__(self):
return self.content
@property
def highlighted_content(self):
return highlight(self.content, BlueprintLexer(), HtmlFormatter())
@property
def ast(self):
if not hasattr(self, '_ast'):
self._ast = json.loads(self.ast_json)
return self._ast
@property
def highlighted_ast(self):
ast = json.dumps(self.ast, sort_keys=True, indent=2, separators=(',', ': '))
return highlight(ast, JsonLexer(), HtmlFormatter())
|
mit
| 5,149,579,032,192,973,000
| 27.686747
| 110
| 0.660227
| false
| 3.663077
| false
| false
| false
|
jacquerie/inspire-dojson
|
inspire_dojson/hep/rules/bd0xx.py
|
1
|
12752
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""DoJSON rules for MARC fields in 0xx."""
from __future__ import absolute_import, division, print_function
import re
from collections import defaultdict
import pycountry
from dojson import utils
from idutils import is_arxiv_post_2007, is_doi, is_handle, normalize_doi
from inspire_schemas.api import load_schema
from inspire_schemas.utils import normalize_arxiv_category
from inspire_utils.helpers import force_list
from ..model import hep, hep2marc
from ...utils import force_single_element, normalize_isbn
RE_LANGUAGE = re.compile('\/| or | and |,|=|\s+')
@hep.over('isbns', '^020..')
@utils.for_each_value
def isbns(self, key, value):
"""Populate the ``isbns`` key."""
def _get_medium(value):
def _normalize(medium):
schema = load_schema('hep')
valid_media = schema['properties']['isbns']['items']['properties']['medium']['enum']
medium = medium.lower().replace('-', '').replace(' ', '')
if medium in valid_media:
return medium
elif medium == 'ebook':
return 'online'
elif medium == 'paperback':
return 'softcover'
return ''
medium = force_single_element(value.get('b', ''))
normalized_medium = _normalize(medium)
return normalized_medium
def _get_isbn(value):
a_value = force_single_element(value.get('a', ''))
normalized_a_value = a_value.replace('.', '')
if normalized_a_value:
return normalize_isbn(normalized_a_value)
return {
'medium': _get_medium(value),
'value': _get_isbn(value),
}
@hep2marc.over('020', 'isbns')
@utils.for_each_value
def isbns2marc(self, key, value):
"""Populate the ``020`` MARC field."""
return {
'a': value.get('value'),
'b': value.get('medium'),
}
@hep.over('dois', '^0247.')
def dois(self, key, value):
"""Populate the ``dois`` key.
Also populates the ``persistent_identifiers`` key through side effects.
"""
def _get_first_non_curator_source(sources):
sources_without_curator = [el for el in sources if el.upper() != 'CURATOR']
return force_single_element(sources_without_curator)
def _get_material(value):
MATERIAL_MAP = {
'ebook': 'publication',
}
q_value = force_single_element(value.get('q', ''))
normalized_q_value = q_value.lower()
return MATERIAL_MAP.get(normalized_q_value, normalized_q_value)
def _is_doi(id_, type_):
return (not type_ or type_.upper() == 'DOI') and is_doi(id_)
def _is_handle(id_, type_):
return (not type_ or type_.upper() == 'HDL') and is_handle(id_)
dois = self.get('dois', [])
persistent_identifiers = self.get('persistent_identifiers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
material = _get_material(value)
schema = force_single_element(value.get('2', ''))
sources = force_list(value.get('9'))
source = _get_first_non_curator_source(sources)
if _is_doi(id_, schema):
dois.append({
'material': material,
'source': source,
'value': normalize_doi(id_),
})
else:
schema = 'HDL' if _is_handle(id_, schema) else schema
persistent_identifiers.append({
'material': material,
'schema': schema,
'source': source,
'value': id_,
})
self['persistent_identifiers'] = persistent_identifiers
return dois
@hep2marc.over('0247', '^dois$')
@utils.for_each_value
def dois2marc(self, key, value):
"""Populate the ``0247`` MARC field."""
return {
'2': 'DOI',
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
@hep2marc.over('0247', '^persistent_identifiers$')
@utils.for_each_value
def persistent_identifiers2marc(self, key, value):
"""Populate the ``0247`` MARC field."""
return {
'2': value.get('schema'),
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
@hep.over('texkeys', '^035..')
def texkeys(self, key, value):
"""Populate the ``texkeys`` key.
Also populates the ``external_system_identifiers`` and ``_desy_bookkeeping`` keys through side effects.
"""
def _is_oai(id_, schema):
return id_.startswith('oai:')
def _is_desy(id_, schema):
return id_ and schema in ('DESY',)
def _is_texkey(id_, schema):
return id_ and schema in ('INSPIRETeX', 'SPIRESTeX')
texkeys = self.get('texkeys', [])
external_system_identifiers = self.get('external_system_identifiers', [])
_desy_bookkeeping = self.get('_desy_bookkeeping', [])
values = force_list(value)
for value in values:
ids = force_list(value.get('a', ''))
other_ids = force_list(value.get('z', ''))
schema = force_single_element(value.get('9', ''))
for id_ in ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.insert(0, id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.insert(0, {
'schema': schema,
'value': id_,
})
for id_ in other_ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.append(id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.append({
'schema': schema,
'value': id_,
})
self['external_system_identifiers'] = external_system_identifiers
self['_desy_bookkeeping'] = _desy_bookkeeping
return texkeys
@hep2marc.over('035', '^texkeys$')
def texkeys2marc(self, key, value):
"""Populate the ``035`` MARC field."""
result = []
values = force_list(value)
if values:
value = values[0]
result.append({
'9': 'INSPIRETeX',
'a': value,
})
for value in values[1:]:
result.append({
'9': 'INSPIRETeX',
'z': value,
})
return result
@hep2marc.over('035', '^external_system_identifiers$')
def external_system_identifiers2marc(self, key, value):
"""Populate the ``035`` MARC field.
Also populates the ``970`` MARC field through side effects and an extra
``id_dict`` dictionary that holds potentially duplicate IDs that are
post-processed in a filter.
"""
def _is_scheme_cernkey(id_, schema):
return schema == 'CERNKEY'
def _is_scheme_spires(id_, schema):
return schema == 'SPIRES'
result_035 = self.get('035', [])
id_dict = self.get('id_dict', defaultdict(list))
result_970 = self.get('970', [])
values = force_list(value)
for value in values:
id_ = value.get('value')
schema = value.get('schema')
if _is_scheme_spires(id_, schema):
result_970.append({
'a': id_,
})
elif _is_scheme_cernkey(id_, schema):
result_035.append({
'9': 'CERNKEY',
'z': id_,
})
else:
id_dict[schema].append(id_)
self['970'] = result_970
self['id_dict'] = id_dict
return result_035
@hep.over('arxiv_eprints', '^037..')
def arxiv_eprints(self, key, value):
"""Populate the ``arxiv_eprints`` key.
Also populates the ``report_numbers`` key through side effects.
"""
def _get_clean_arxiv_eprint(id_):
return id_.split(':')[-1]
def _is_arxiv_eprint(id_, source):
return source == 'arXiv'
def _is_hidden_report_number(other_id, source):
return other_id
def _get_clean_source(source):
if source == 'arXiv:reportnumber':
return 'arXiv'
return source
arxiv_eprints = self.get('arxiv_eprints', [])
report_numbers = self.get('report_numbers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
other_id = force_single_element(value.get('z', ''))
categories = [normalize_arxiv_category(category) for category
in force_list(value.get('c'))]
source = force_single_element(value.get('9', ''))
if _is_arxiv_eprint(id_, source):
arxiv_eprints.append({
'categories': categories,
'value': _get_clean_arxiv_eprint(id_),
})
elif _is_hidden_report_number(other_id, source):
report_numbers.append({
'hidden': True,
'source': _get_clean_source(source),
'value': other_id,
})
else:
report_numbers.append({
'source': _get_clean_source(source),
'value': id_,
})
self['report_numbers'] = report_numbers
return arxiv_eprints
@hep2marc.over('037', '^arxiv_eprints$')
def arxiv_eprints2marc(self, key, values):
"""Populate the ``037`` MARC field.
Also populates the ``035`` and the ``65017`` MARC fields through side effects.
"""
result_037 = self.get('037', [])
result_035 = self.get('035', [])
result_65017 = self.get('65017', [])
for value in values:
arxiv_id = value.get('value')
arxiv_id = 'arXiv:' + arxiv_id if is_arxiv_post_2007(arxiv_id) else arxiv_id
result_037.append({
'9': 'arXiv',
'a': arxiv_id,
'c': force_single_element(value.get('categories')),
})
result_035.append({
'9': 'arXiv',
'a': 'oai:arXiv.org:' + value.get('value'),
})
categories = force_list(value.get('categories'))
for category in categories:
result_65017.append({
'2': 'arXiv',
'a': category,
})
self['65017'] = result_65017
self['035'] = result_035
return result_037
@hep2marc.over('037', '^report_numbers$')
@utils.for_each_value
def report_numbers2marc(self, key, value):
"""Populate the ``037`` MARC field."""
def _get_mangled_source(source):
if source == 'arXiv':
return 'arXiv:reportnumber'
return source
source = _get_mangled_source(value.get('source'))
if value.get('hidden'):
return {
'9': source,
'z': value.get('value'),
}
return {
'9': source,
'a': value.get('value'),
}
@hep.over('languages', '^041..')
def languages(self, key, value):
"""Populate the ``languages`` key."""
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages
@hep2marc.over('041', '^languages$')
@utils.for_each_value
def languages2marc(self, key, value):
"""Populate the ``041`` MARC field."""
return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
|
gpl-3.0
| -6,248,574,016,785,603,000
| 28.587007
| 107
| 0.559912
| false
| 3.674928
| false
| false
| false
|
scopenco/netblock-tools
|
netnull.py
|
1
|
3572
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Author: Andrey Skopenko <andrey@scopenco.net>
'''A tool create ip route rules that blackhole networks by country code
(ex: RU CN etc.) For the correct execution of script need to download geip
database and country codes.'''
import csv
import sys
import optparse
import os.path
ROUTE_BIN = 'ip route'
MAXMIND_DB = \
'http://www.maxmind.com/download/geoip/database/GeoIPCountryCSV.zip'
COUTRY_DB = 'http://www.iso.org/iso/list-en1-semic-3.txt'
def main():
p = optparse.OptionParser(description=__doc__,
prog="netblock",
version="0.1",
usage="%prog [-cc] [-c] [-i] [-p] "
"[-d] [-a] country1 coutry2 ...")
p.add_option("--geoipdb",
help='Path to GeoIPCountryWhois.csv with GeoIP data',
default='GeoIPCountryWhois.csv')
p.add_option("--countrydb",
help='Path to country_names_and_code_elements_txt '
'with country codes',
default='country_names_and_code_elements_txt')
p.add_option("--cc",
action='store_true',
help='List of country codes')
p.add_option("--remove_nullroute", "-r",
help='Generate rules to remove subnets from ip route table',
action="store_true")
options, arguments = p.parse_args()
# show list of country codes
if options.cc:
if not os.path.isfile(options.countrydb):
print '%s not found! try command "wget %s"' % (
options.countrydb, COUTRY_DB)
sys.exit()
with open(options.countrydb) as f:
for line in f:
if line == "" or line.startswith("Country ") or \
";" not in line:
continue
c_name, c_code = line.strip().split(";")
c_name = ' '.join([part.capitalize() for part in
c_name.split(" ")])
print '%s\t%s' % (c_code, c_name)
return
# show help
if not arguments:
p.print_help()
sys.exit()
if not os.path.isfile(options.geoipdb):
print '%s not found! try ' \
'command "wget %s && unzip GeoIPCountryCSV.zip"' % (
options.geoipdb, MAXMIND_DB)
sys.exit()
# construct route rule tempate
base_rule = ROUTE_BIN
if options.remove_nullroute:
block_rule = base_rule + ' del blackhole %s'
else:
block_rule = base_rule + ' add blackhole %s'
# get country networks and show iptables rules
with open(options.geoipdb, 'rb') as f:
for i in csv.reader(f):
if i[4] in arguments:
network = int(i[2])
mask = int(i[3])
while (network <= mask):
x = 0
while True:
if network & (1 << x) == 0 and \
network + ((1 << (x + 1)) - 1) <= mask:
x += 1
continue
print block_rule % '%s/%s' % (get_net(network), 32 - x)
break
network += 1 << x
def get_net(network):
'''convert bin network to decimal'''
out = str(network & 255)
for x in range(3):
network = network >> 8
out = '%s.%s' % (str(network & 255), out)
return out
if __name__ == "__main__":
main()
|
bsd-3-clause
| 179,938,823,875,172,540
| 34.019608
| 79
| 0.493001
| false
| 3.951327
| false
| false
| false
|
ju1ius/clisnips
|
clisnips/tui/widgets/progress/process.py
|
1
|
2218
|
import multiprocessing
import os
import signal
from clisnips.tui.logging import logger
from .message_queue import MessageQueue
class Process(multiprocessing.Process):
def __init__(self, message_queue: MessageQueue, target, args=(), kwargs=None):
super().__init__(target=target, args=args, kwargs=kwargs or {})
self._stop_event = multiprocessing.Event()
self._message_queue = message_queue
def stop(self):
logger.debug('Stopping process %s', self.pid)
self._stop_event.set()
# allow garbage collection
if self._message_queue:
self._message_queue = None
self._target.message_queue = None
def kill(self):
self.stop()
if self.is_alive():
logger.debug('Killing process %s', self.pid)
try:
os.killpg(self.pid, signal.SIGKILL)
except OSError as err:
os.kill(self.pid, signal.SIGKILL)
def run(self):
logger.debug('Starting process %s', self.pid)
# pass the queue object to the function object
self._target.message_queue = self._message_queue
self._message_queue.start()
self._message_queue.progress(0.0)
try:
self._do_run_task()
except KeyboardInterrupt as e:
logger.debug('Process %s catched KeyboardInterrupt', self.pid)
self._message_queue.cancel()
except Exception as err:
msg = ' '.join(err.args) if len(err.args) else str(err)
self._message_queue.error(msg)
finally:
self._message_queue.finish()
self._message_queue.close()
def _do_run_task(self):
for msg in self._target(*self._args, **self._kwargs):
if isinstance(msg, float):
self._message_queue.progress(msg)
elif isinstance(msg, str):
self._message_queue.message(msg)
if self._stop_event.is_set():
self._message_queue.cancel()
logger.debug('Cancelled process %s', self.pid)
break
class BlockingProcess(Process):
def _do_run_task(self):
self._target(*self._args, **self._kwargs)
|
gpl-3.0
| 8,363,412,266,626,529,000
| 32.606061
| 82
| 0.584761
| false
| 4.099815
| false
| false
| false
|
sanyaade-mobiledev/renderer-service-upnp
|
test/rendererconsole.py
|
1
|
5827
|
# -*- coding: utf-8 -*-
# renderer-console
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Sébastien Bianti <sebastien.bianti@linux.intel.com>
#
import dbus
import json
import xml.etree.ElementTree as ET
ROOT_OBJECT_PATH = '/com/intel/RendererServiceUPnP'
RENDERER_BUS = 'com.intel.renderer-service-upnp'
PROPS_IF_NAME = 'org.freedesktop.DBus.Properties'
INTROSPECTABLE_IF_NAME = 'org.freedesktop.DBus.Introspectable'
DEVICE_IF_NAME = 'com.intel.UPnP.RendererDevice'
PUSH_HOST_IF_NAME = 'com.intel.RendererServiceUPnP.PushHost'
MANAGER_INTERFACE = 'com.intel.RendererServiceUPnP.Manager'
MEDIAPLAYER2_IF_NAME = 'org.mpris.MediaPlayer2'
PLAYER_IF_NAME = 'org.mpris.MediaPlayer2.Player'
global bus_type
bus_type = dbus.SessionBus()
def print_json(props):
print json.dumps(props, indent=4, sort_keys=True)
def get_interface(path, if_name):
return dbus.Interface(bus_type.get_object(RENDERER_BUS, path), if_name)
class Renderer(object):
"Represent a renderer service"
def __init__(self, object_path):
self.__path = object_path
self.__propsIF = get_interface(object_path, PROPS_IF_NAME)
self.__playerIF = get_interface(object_path, PLAYER_IF_NAME)
self.__pushhostIF = get_interface(object_path, PUSH_HOST_IF_NAME)
def get_interfaces(self):
try:
introspectable_IF = get_interface(self.__path,
INTROSPECTABLE_IF_NAME)
except:
print(u"Failed to retrieve introspectable interface")
introspection = introspectable_IF.Introspect()
tree = ET.fromstring(introspection)
return [i.attrib['name'] for i in tree if i.tag == "interface"]
def interfaces(self):
for i in self.get_interfaces():
print i
def get_prop(self, prop_name, inner_if_name = ""):
return self.__propsIF.Get(inner_if_name, prop_name)
def get_props(self, inner_if_name = ""):
return self.__propsIF.GetAll(inner_if_name)
def print_props(self, inner_if_name = ""):
print_json(self.get_props(inner_if_name))
def set_prop(self, prop_name, if_name, val):
"""
Sets only the following properties :
Rate and Volume
"""
return self.__propsIF.Set(if_name, prop_name, val)
# Control methods
def play(self):
self.__playerIF.Play()
def pause(self):
self.__playerIF.Pause()
def play_pause(self):
self.__playerIF.PlayPause()
def next(self):
self.__playerIF.Next()
def open_uri(self, uri):
self.__playerIF.OpenUri(uri)
def previous(self):
self.__playerIF.Previous()
def seek(self, offset):
self.__playerIF.Seek(offset)
def goto_track(self, trackID):
self.__playerIF.GotoTrack(trackID)
def set_position(self, trackID, position):
self.__playerIF.setPosition(trackID, position)
def stop(self):
self.__playerIF.Stop()
# Push Host methods
def host_file(self, path):
return self.__pushhostIF.HostFile(path)
def remove_file(self, path):
self.__pushhostIF.RemoveFile(path)
class Manager(object):
"""
High level class for detecting Renderers and doing common operations
on RendererServiceUPnP
"""
def __init__(self):
self.__manager = get_interface(ROOT_OBJECT_PATH, MANAGER_INTERFACE)
self.__renderers = []
def update_renderers(self):
self.__renderers = self.__manager.GetServers()
def get_renderers(self):
self.update_renderers()
return self.__renderers
def renderers(self):
self.update_renderers()
for path in self.__renderers:
try:
renderer = Renderer(path)
renderer_name = renderer.get_prop("Identity")
print(u"%s : %s" % (path, renderer_name))
except:
print(u"Failed to retrieve Identity for interface %s" % path)
def get_version(self):
return self.__manager.GetVersion()
def version(self):
print self.get_version()
def release(self):
self.__manager.Release()
if __name__ == "__main__":
print("\n\t\t\tExample for using rendererconsole:")
print("\t\t\t¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n")
manager = Manager()
print("Version = %s" % manager.get_version())
print("¯¯¯¯¯¯¯")
print "\nRenderer's list:"
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯")
manager.renderers()
renderer_list = manager.get_renderers()
for name in renderer_list:
renderer = Renderer(name)
interface_list = renderer.get_interfaces()
print("\nInterfaces of %s:" % name)
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + "¯" * len(name))
for i in interface_list:
print i
if_name = DEVICE_IF_NAME
if (if_name in interface_list) :
print("\nProperties of %s on %s:" % (if_name, name))
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + (len(name) + len(if_name)) * "¯")
renderer.print_props(if_name)
|
lgpl-2.1
| -1,308,506,136,399,219,500
| 28.420513
| 79
| 0.62402
| false
| 3.300921
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.