text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
import vtk
reader = vtk.vtkRectilinearGridReader()
reader.SetFileName("D:/Notebooks_Bogota2017/SS_2017/data/jet4_0.500.vtk")
reader.Update()
output = reader.GetOutput()
xmi, xma, ymi, yma, zmi, zma = output.GetBounds()
# Color Transfer Function and LookUpTable
# Create transfer mapping scalar value to color
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0.0, 1.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(0.15, 0.0, 0.0, 1.0)
colorTransferFunction.AddRGBPoint(0.3, 0.0, 1.0, 0.0)
tableSize = 30
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(tableSize)
lut.Build()
for i in range(0,tableSize):
rgb = list(colorTransferFunction.GetColor(float(i)/tableSize))+[0.2]
lut.SetTableValue(i,rgb)
# A plane for the seeds
plane = vtk.vtkPlaneSource()
plane.SetOrigin(0, 0, 0)
plane.SetPoint1(xma, 0, 0)
plane.SetPoint2(0, 0, zma)
plane.SetXResolution(20)
plane.SetYResolution(20)
# Add the outline of the plane
outline = vtk.vtkOutlineFilter()
outline.SetInputData(plane.GetOutput())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(1,1,1)
# Compute streamlines
streamline = vtk.vtkStreamTracer()
streamline.SetSourceConnection(plane.GetOutputPort())
streamline.SetInputConnection(reader.GetOutputPort())
streamline.SetIntegrationDirectionToForward()
#streamline.SetIntegrationDirectionToBackward()
#streamline.SetIntegrationDirectionToBoth()
streamline.SetMaximumPropagation(1)
streamline.SetComputeVorticity(True)
# Visualize stream as ribbons (= Stream ribbons); i.e. we need to pass the streamlines through the ribbon filter
streamRibbons = vtk.vtkRibbonFilter()
streamRibbons.SetInputConnection(streamline.GetOutputPort())
streamRibbons.SetWidth(0.01)
streamRibbons.Update()
streamRibbonsMapper = vtk.vtkPolyDataMapper()
streamRibbonsMapper.SetScalarModeToUsePointFieldData()
streamRibbonsMapper.SetInputConnection(streamRibbons.GetOutputPort())
# ***TODO: apply a transfer function to the stream ribbons
streamRibbonsActor = vtk.vtkActor()
streamRibbonsActor.SetMapper(streamRibbonsMapper)
# Visualize stream as tubes (= Stream tubes)
streamTubes = vtk.vtkTubeFilter()
streamTubes.SetInputConnection(streamline.GetOutputPort())
streamTubes.SetRadius(0.01)
streamTubes.Update()
streamTubeMapper = vtk.vtkPolyDataMapper()
streamTubeMapper.SetLookupTable(lut)
streamTubeMapper.SetInputConnection(streamTubes.GetOutputPort())
streamTubeMapper.SetScalarVisibility(True)
streamTubeMapper.SetScalarModeToUsePointFieldData()
streamTubeMapper.SelectColorArray('vectors')
streamTubeMapper.SetScalarRange((reader.GetOutput().GetPointData().GetVectors().GetRange(-1)))
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(streamTubeMapper)
# Visualize stream as lines (= Stream lines)
# Pass the streamlines to the mapper
streamlineMapper = vtk.vtkPolyDataMapper()
streamlineMapper.SetLookupTable(lut)
streamlineMapper.SetInputConnection(streamline.GetOutputPort())
streamlineMapper.SetScalarVisibility(True)
streamlineMapper.SetScalarModeToUsePointFieldData()
streamlineMapper.SelectColorArray('vectors')
streamlineMapper.SetScalarRange((reader.GetOutput().GetPointData().GetVectors().GetRange(-1)))
# Pass the mapper to the actor
streamlineActor = vtk.vtkActor()
streamlineActor.SetMapper(streamlineMapper)
streamlineActor.GetProperty().SetLineWidth(2.0)
# Add the outline of the data set
gOutline = vtk.vtkRectilinearGridOutlineFilter()
gOutline.SetInputData(output)
gOutlineMapper = vtk.vtkPolyDataMapper()
gOutlineMapper.SetInputConnection(gOutline.GetOutputPort())
gOutlineActor = vtk.vtkActor()
gOutlineActor.SetMapper(gOutlineMapper)
gOutlineActor.GetProperty().SetColor(0.5,0.5,0.5)
# Rendering / Window
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.0, 0.0, 0.0)
#renderer.AddActor(streamlineActor)
# renderer.AddActor(streamRibbonsActor)
renderer.AddActor(streamTubeActor)
renderer.AddActor(outlineActor)
renderer.AddActor(gOutlineActor)
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(500, 500)
renderWindow.Render()
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
interactor.SetRenderWindow(renderWindow)
interactor.Initialize()
interactor.Start()
|
dianafprieto/SS_2017
|
scripts/07_NB_StreamTubes.py
|
Python
|
mit
| 4,407
| 0.00295
|
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/review/review.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import types
import urllib
from common import crypto
from common import schema_transforms
from common import utils as common_utils
from controllers import sites
from models import data_sources
from models import models
from models import student_work
from models import transforms
from modules.review import domain
from modules.review import peer
from modules.review import review as review_module
from modules.upload import upload
from tests.functional import actions
from google.appengine.ext import db
class ManagerTest(actions.TestBase):
"""Tests for review.Manager."""
def setUp(self):
super(ManagerTest, self).setUp()
self.reviewee = models.Student(key_name='reviewee@example.com')
self.reviewee_key = self.reviewee.put()
self.reviewer = models.Student(key_name='reviewer@example.com')
self.reviewer_key = self.reviewer.put()
self.unit_id = '1'
self.submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=self.reviewee_key, unit_id=self.unit_id))
def test_add_reviewer_adds_new_step_and_summary(self):
step_key = review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step = db.get(step_key)
summary = db.get(step.review_summary_key)
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_add_reviewer_existing_raises_assertion_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'no_summary_found_for_key')
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
AssertionError, review_module.Manager.add_reviewer, self.unit_id,
self.submission_key, self.reviewee_key, self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_completed(self):
summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_unremoved_existing_changes_expired_to_assigned(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_add_reviewer_removed_unremoves_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
def test_add_reviewer_removed_unremoves_completed_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.completed_count)
def test_add_reviewer_removed_unremoves_and_assigns_expired_step(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_delete_reviewer_marks_step_removed_and_decrements_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
deleted_key = review_module.Manager.delete_reviewer(step_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(step_key, deleted_key)
self.assertTrue(step.removed)
self.assertEqual(0, summary.assigned_count)
def test_delete_reviewer_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_delete_reviewer_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer, step_key)
def test_delete_reviewer_raises_removed_error_if_already_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.delete_reviewer,
step_key)
def test_expire_review_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.expire_review,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_expire_review_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.expire_review, step_key)
def test_expire_review_raises_transition_error_when_state_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_transition_error_when_state_expired(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_removed_error_when_step_removed(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.expire_review, step_key)
def test_expire_review_transitions_state_and_updates_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
expired_key = review_module.Manager.expire_review(step_key)
step, summary = db.get([expired_key, summary_key])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, step.state)
def test_expire_old_reviews_for_unit_expires_found_reviews(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
first_step, second_step, summary = db.get(
[first_step_key, second_step_key, summary_key])
self.assertEqual(
[domain.REVIEW_STATE_EXPIRED, domain.REVIEW_STATE_EXPIRED],
[step.state for step in [first_step, second_step]])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(2, summary.expired_count)
def test_expire_old_reviews_skips_errors_and_continues_processing(self):
# Create and bind a function that we can swap in to generate a query
# that will pick up bad results so we can tell that we skip them.
query_containing_unprocessable_entities = peer.ReviewStep.all(
keys_only=True)
query_fn = types.MethodType(
lambda x, y, z: query_containing_unprocessable_entities,
review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, 'get_expiry_query', query_fn)
summary_key = peer.ReviewSummary(
assigned_count=1, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
processable_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
error_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
processed_step, error_step, summary = db.get(
[processable_step_key, error_step_key, summary_key])
self.assertEqual(domain.REVIEW_STATE_COMPLETED, error_step.state)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, processed_step.state)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(1, summary.expired_count)
def test_get_assignment_candidates_query_filters_and_orders_correctly(self):
unused_wrong_unit_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=str(int(self.unit_id) + 1)
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
older_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
younger_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=third_reviewee_key,
submission_key=third_submission_key, unit_id=self.unit_id
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
completed_but_not_assigned_key = peer.ReviewSummary(
assigned_count=0, completed_count=1,
reviewee_key=fourth_reviewee_key,
submission_key=fourth_submission_key, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
assigned_but_not_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=0,
reviewee_key=fifth_reviewee_key,
submission_key=fifth_submission_key, unit_id=self.unit_id
).put()
results = review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(5)
self.assertEqual([
assigned_but_not_completed_key,
completed_but_not_assigned_key,
older_assigned_and_completed_key,
younger_assigned_and_completed_key
], [r.key() for r in results])
def test_get_expiry_query_filters_and_orders_correctly(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_completed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
unused_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
unused_other_unit_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=third_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=third_submission_key,
state=domain.REVIEW_STATE_ASSIGNED,
unit_id=str(int(self.unit_id) + 1)
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
first_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fourth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fourth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
second_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fifth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fifth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
zero_review_window_query = review_module.Manager.get_expiry_query(
0, self.unit_id)
future_review_window_query = review_module.Manager.get_expiry_query(
1, self.unit_id)
self.assertEqual(
[first_assigned_step_key, second_assigned_step_key],
zero_review_window_query.fetch(3))
# No items are > 1 minute old, so we expect an empty result set.
self.assertEqual(None, future_review_window_query.get())
def test_get_new_review_creates_step_and_updates_summary(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
summary = db.get(summary_key)
self.assertEqual(0, summary.assigned_count)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertEqual(summary.key(), step.review_summary_key)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_raises_key_error_when_summary_missing(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side effect delete the review summary, causing a
# the lookup by key to fail.
def pick_and_remove(unused_cls, candidates):
db.delete(summary_key)
return candidates[0]
fn = types.MethodType(
pick_and_remove, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
KeyError, review_module.Manager.get_new_review, self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_already_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
already_completed_unremoved_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
db.delete(already_completed_unremoved_step_key)
unused_already_completed_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_review_is_for_self(self):
peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_no_candidates(self):
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_retry_limit_hit(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key, max_retries=0)
def test_get_new_review_raises_not_assignable_when_summary_updated(self):
summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
summary.put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the summary so we'll reject it
# as a candidate.
def pick_and_update(unused_cls, candidates):
db.put(summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_reassigns_removed_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_assigned_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_reassigns_removed_expired_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_expired_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_get_new_review_retries_successfully(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step = db.get(step_key)
self.assertEqual(lower_priority_summary_key, step.review_summary_key)
def test_get_review_step_keys_by_returns_list_of_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='reviewer2@example.com')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertEqual(
[matching_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_keys_in_sorted_order(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
[first_step_key, second_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_empty_list_when_no_matches(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='reviewer2@example.com')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_different_reviewer_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id,
).put()
unused_non_matching_step_different_unit_id_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=str(int(self.unit_id) + 1),
).put()
self.assertEqual(
[], review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_steps_by_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewer_key = models.Student(
key_name='reviewer2@example.com').put()
missing_step_key = db.Key.from_path(
peer.ReviewStep.kind(),
peer.ReviewStep.key_name(
self.submission_key, second_reviewer_key))
model_objects = db.get([step_key, missing_step_key])
domain_objects = review_module.Manager.get_review_steps_by_keys(
[step_key, missing_step_key])
model_step, model_miss = model_objects
domain_step, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_step.assigner_kind, domain_step.assigner_kind)
self.assertEqual(model_step.change_date, domain_step.change_date)
self.assertEqual(model_step.create_date, domain_step.create_date)
self.assertEqual(model_step.key(), domain_step.key)
self.assertEqual(model_step.removed, domain_step.removed)
self.assertEqual(model_step.review_key, domain_step.review_key)
self.assertEqual(
model_step.review_summary_key, domain_step.review_summary_key)
self.assertEqual(model_step.reviewee_key, domain_step.reviewee_key)
self.assertEqual(model_step.reviewer_key, domain_step.reviewer_key)
self.assertEqual(model_step.state, domain_step.state)
self.assertEqual(model_step.submission_key, domain_step.submission_key)
self.assertEqual(model_step.unit_id, domain_step.unit_id)
def test_get_reviews_by_keys(self):
review_key = student_work.Review(
contents='contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id
).put()
missing_review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(
str(int(self.unit_id) + 1), self.reviewee_key,
self.reviewer_key))
model_objects = db.get([review_key, missing_review_key])
domain_objects = review_module.Manager.get_reviews_by_keys(
[review_key, missing_review_key])
model_review, model_miss = model_objects
domain_review, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_review.contents, domain_review.contents)
self.assertEqual(model_review.key(), domain_review.key)
def test_get_submission_and_review_step_keys_no_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, []),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_with_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
non_matching_submission_key = student_work.Submission(
contents='contents2', reviewee_key=non_matching_reviewee_key,
unit_id=self.unit_id).put()
unused_non_matching_step_different_submission_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key,
reviewee_key=non_matching_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=non_matching_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, [matching_step_key]),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_returns_none_on_miss(self):
self.assertIsNone(
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submissions_by_keys(self):
submission_key = student_work.Submission(
contents='contents', reviewee_key=self.reviewee_key,
unit_id=self.unit_id).put()
missing_submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
str(int(self.unit_id) + 1), self.reviewee_key))
domain_models = db.get([submission_key, missing_submission_key])
domain_objects = review_module.Manager.get_submissions_by_keys(
[submission_key, missing_submission_key])
model_submission, model_miss = domain_models
domain_submission, domain_miss = domain_objects
self.assertEqual(2, len(domain_models))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_submission.contents, domain_submission.contents)
self.assertEqual(model_submission.key(), domain_submission.key)
def test_start_review_process_for_succeeds(self):
key = review_module.Manager.start_review_process_for(
self.unit_id, self.submission_key, self.reviewee_key)
summary = db.get(key)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_start_review_process_for_throws_if_already_started(self):
collision = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
collision.put()
self.assertRaises(
domain.ReviewProcessAlreadyStartedError,
review_module.Manager.start_review_process_for,
self.unit_id, self.submission_key, self.reviewee_key)
def test_write_review_raises_constraint_error_if_key_but_no_review(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_constraint_error_if_no_summary(self):
missing_summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(self.submission_key))
review_key = student_work.Review(
contents='contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=missing_summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_key_error_if_no_step(self):
bad_step_key = db.Key.from_path(peer.ReviewStep.kind(), 'missing')
self.assertRaises(
KeyError, review_module.Manager.write_review, bad_step_key,
'payload')
def test_write_review_raises_removed_error_if_step_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.write_review, step_key,
'payload')
def test_write_review_raises_transition_error_if_step_completed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_with_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_state_assigned_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_state_expired_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_two_students_creates_different_reviews(self):
reviewee1 = models.Student(key_name='reviewee1@example.com')
reviewee1_key = reviewee1.put()
reviewee2 = models.Student(key_name='reviewee2@example.com')
reviewee2_key = reviewee2.put()
submission1_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee1_key, unit_id=self.unit_id))
submission2_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee2_key, unit_id=self.unit_id))
summary1_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee1_key,
submission_key=submission1_key, unit_id=self.unit_id
).put()
step1_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary1_key, reviewee_key=reviewee1_key,
reviewer_key=self.reviewer_key, submission_key=submission1_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step1_key).review_key)
updated_step1_key = review_module.Manager.write_review(
step1_key, 'contents1', mark_completed=False)
self.assertEqual(step1_key, updated_step1_key)
summary2_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee2_key,
submission_key=submission2_key, unit_id=self.unit_id
).put()
step2_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary2_key, reviewee_key=reviewee2_key,
reviewer_key=self.reviewer_key, submission_key=submission2_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step2_key).review_key)
updated_step2_key = review_module.Manager.write_review(
step2_key, 'contents2', mark_completed=False)
self.assertEqual(step2_key, updated_step2_key)
step1, summary1 = db.get([updated_step1_key, summary1_key])
updated_review = db.get(step1.review_key)
self.assertEqual(1, summary1.assigned_count)
self.assertEqual(0, summary1.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step1.state)
self.assertEqual(step1.review_key, updated_review.key())
self.assertEqual('contents1', updated_review.contents)
step2, summary2 = db.get([updated_step2_key, summary2_key])
updated_review = db.get(step2.review_key)
self.assertEqual(1, summary2.assigned_count)
self.assertEqual(0, summary2.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step2.state)
self.assertEqual(step2.review_key, updated_review.key())
self.assertEqual('contents2', updated_review.contents)
class SubmissionDataSourceTest(actions.TestBase):
ADMIN_EMAIL = 'admin@foo.com'
COURSE_NAME = 'test_course'
NAMESPACE = 'ns_%s' % COURSE_NAME
STUDENT_EMAIL = 'student@foo.com'
def setUp(self):
super(SubmissionDataSourceTest, self).setUp()
self.app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Test Course')
self.base = '/' + self.COURSE_NAME
actions.login(self.STUDENT_EMAIL)
actions.register(self, 'John Smith')
with common_utils.Namespace(self.NAMESPACE):
student, _ = models.Student.get_first_by_email(self.STUDENT_EMAIL)
self.student_user_id = student.user_id
actions.login(self.ADMIN_EMAIL)
def tearDown(self):
sites.reset_courses()
super(SubmissionDataSourceTest, self).tearDown()
def _post_submission(self, unit_id, contents):
actions.login(self.STUDENT_EMAIL)
response = self.post(
upload._POST_ACTION_SUFFIX.lstrip('/'),
{'unit_id': unit_id,
'contents': contents,
'form_xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME),
},
# webtest mangles UTF-8 parameter conversion w/o this present.
content_type='application/x-www-form-urlencoded;charset=utf8')
self.assertEquals(response.status_int, 200)
actions.login(self.ADMIN_EMAIL)
def _get_data(self, source_context=None):
params = {
'chunk_size':
review_module.SubmissionDataSource.get_default_chunk_size()
}
if source_context:
params['source_context'] = source_context
response = self.get('rest/data/submissions/items?%s' %
urllib.urlencode(params))
content = transforms.loads(response.body)
return content['data']
def test_no_content(self):
data = self._get_data()
self.assertEquals([], data)
def test_non_pii_request(self):
self._post_submission('123', 'the content')
data = self._get_data()
self.assertEquals(1, len(data))
datum = data[0]
self.assertEquals(datum['user_id'], 'None')
self.assertEquals(datum['contents'], 'the content')
self.assertEquals(datum['unit_id'], '123')
updated_on = datetime.datetime.strptime(
datum['updated_on'], schema_transforms.ISO_8601_DATETIME_FORMAT)
diff = (datetime.datetime.utcnow() - updated_on).total_seconds()
self.assertLess(diff, 5)
def test_pii_request(self):
self._post_submission(456, u'音読み')
# Extra hoop-jumping to get a request context parameter blessed which
# allows non-PII-suppressed results.
params = {'data_source_token': 'fake token'}
source_context = data_sources.DbTableContext.build_blank_default(
params, review_module.SubmissionDataSource.get_default_chunk_size())
source_context.send_uncensored_pii_data = True
handler_class = data_sources._generate_rest_handler(
review_module.SubmissionDataSource)
handler_instance = handler_class()
context_param = handler_instance._encode_context(source_context)
data = self._get_data(context_param)
self.assertEquals(1, len(data))
datum = data[0]
self.assertEquals(datum['user_id'], self.student_user_id)
self.assertEquals(datum['contents'], u'音読み')
self.assertEquals(datum['unit_id'], '456')
updated_on = datetime.datetime.strptime(
datum['updated_on'], schema_transforms.ISO_8601_DATETIME_FORMAT)
diff = (datetime.datetime.utcnow() - updated_on).total_seconds()
self.assertLess(diff, 5)
|
GirlsCodePy/girlscode-coursebuilder
|
modules/review/review_tests.py
|
Python
|
gpl-3.0
| 68,643
| 0.000131
|
import os
import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy as np
from .. import DataArray
from ..core import indexing
from ..core.utils import is_scalar
from .common import BackendArray
from .file_manager import CachingFileManager
from .locks import SerializableLock
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
_ERROR_MSG = ('The kind of indexing operation you are trying to do is not '
'valid on rasterio files. Try to load your data with ds.load()'
'first.')
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
def __init__(self, manager, lock, vrt_params=None):
from rasterio.vrt import WarpedVRT
self.manager = manager
self.lock = lock
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError('All bands should have the same dtype')
self._dtype = np.dtype(dtypes[0])
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
def _get_indexer(self, key):
""" Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
assert len(key) == 3, 'rasterio datasets should always be 3D'
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(k, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = k.indices(n)
np_inds.append(slice(None, None, step))
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(- (2 - i))
start = k
stop = k + 1
else:
start, stop = np.min(k), np.max(k) + 1
np_inds.append(k - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
from rasterio.vrt import WarpedVRT
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(
stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(s):
return np.fromstring(s.strip('{}'), dtype='float', sep=',')
def default(s):
return s.strip('{}')
parse = {'wavelength': parsevec,
'fwhm': parsevec}
parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}
return parsed_meta
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None,
lock=None):
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
You can generate 2D coordinates from the file's attributes with::
from affine import Affine
da = xr.open_rasterio('path_to_file.tif')
transform = Affine.from_gdal(*da.attrs['transform'])
nx, ny = da.sizes['x'], da.sizes['y']
x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform
Parameters
----------
filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates : bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used to avoid issues with concurrent access to the same file when using
dask's multithreaded backend.
Returns
-------
data : DataArray
The newly created DataArray.
"""
import rasterio
from rasterio.vrt import WarpedVRT
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(crs=vrt.crs.to_string(),
resampling=vrt.resampling,
src_nodata=vrt.src_nodata,
dst_nodata=vrt.dst_nodata,
tolerance=vrt.tolerance,
transform=vrt.transform,
width=vrt.width,
height=vrt.height,
warp_extras=vrt.warp_extras)
if lock is None:
lock = RASTERIO_LOCK
manager = CachingFileManager(rasterio.open, filename, lock=lock, mode='r')
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
coords = OrderedDict()
# Get bands
if riods.count < 1:
raise ValueError('Unknown dims')
coords['band'] = np.asarray(riods.indexes)
# Get coordinates
if LooseVersion(rasterio.__version__) < '1.0':
transform = riods.affine
else:
transform = riods.transform
if transform.is_rectilinear:
# 1d coordinates
parse = True if parse_coordinates is None else parse_coordinates
if parse:
nx, ny = riods.width, riods.height
# xarray coordinates are pixel centered
x, _ = (np.arange(nx) + 0.5, np.zeros(nx) + 0.5) * transform
_, y = (np.zeros(ny) + 0.5, np.arange(ny) + 0.5) * transform
coords['y'] = y
coords['x'] = x
else:
# 2d coordinates
parse = False if (parse_coordinates is None) else parse_coordinates
if parse:
warnings.warn(
"The file coordinates' transformation isn't "
"rectilinear: xarray won't parse the coordinates "
"in this case. Set `parse_coordinates=False` to "
"suppress this warning.",
RuntimeWarning, stacklevel=3)
# Attributes
attrs = dict()
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
attrs['transform'] = tuple(transform)[:6]
if hasattr(riods, 'crs') and riods.crs:
# CRS is a dict-like object specific to rasterio
# If CRS is not None, we convert it back to a PROJ4 string using
# rasterio itself
try:
attrs['crs'] = riods.crs.to_proj4()
except AttributeError:
attrs['crs'] = riods.crs.to_string()
if hasattr(riods, 'res'):
# (width, height) tuple of pixels in units of CRS
attrs['res'] = riods.res
if hasattr(riods, 'is_tiled'):
# Is the TIF tiled? (bool)
# We cast it to an int for netCDF compatibility
attrs['is_tiled'] = np.uint8(riods.is_tiled)
if hasattr(riods, 'nodatavals'):
# The nodata values for the raster bands
attrs['nodatavals'] = tuple(
np.nan if nodataval is None else nodataval
for nodataval in riods.nodatavals)
# Parse extra metadata from tags, if supported
parsers = {'ENVI': _parse_envi}
driver = riods.driver
if driver in parsers:
meta = parsers[driver](riods.tags(ns=driver))
for k, v in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if (isinstance(v, (list, np.ndarray))
and len(v) == riods.count):
coords[k] = ('band', np.asarray(v))
else:
attrs[k] = v
data = indexing.LazilyOuterIndexedArray(
RasterioArrayWrapper(manager, lock, vrt_params))
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=('band', 'y', 'x'),
coords=coords, attrs=attrs)
if chunks is not None:
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
token = tokenize(filename, mtime, chunks)
name_prefix = 'open_rasterio-%s' % token
result = result.chunk(chunks, name_prefix=name_prefix, token=token)
# Make the file closeable
result._file_obj = manager
return result
|
chunweiyuan/xarray
|
xarray/backends/rasterio_.py
|
Python
|
apache-2.0
| 12,619
| 0
|
#!/usr/bin/python
#----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import json
import sys
from vmoc.VMOCConfig import VMOCSliceConfiguration, VMOCVLANConfiguration
# Usage register_controller.sh slice [vlan controller ....] [unregister]
if len(sys.argv) < 2:
print "Usage: register_controller.py slice [vlan controller ...] [unregister]"
sys.exit()
print sys.argv[1]
print sys.argv[2]
slice_id = sys.argv[1]
vlan_controllers = json.loads(sys.argv[2])
vlan_configs = []
for i in range(len(vlan_controllers)):
if i == 2*(i/2):
vlan_tag = vlan_controllers[i]
controller_url = vlan_controllers[i+1]
vlan_config = \
VMOCVLANConfiguration(vlan_tag=vlan_tag, \
controller_url=controller_url)
vlan_configs.append(vlan_config)
slice_config = \
VMOCSliceConfiguration(slice_id=slice_id, vlan_configs=vlan_configs)
unregister = False
if len(sys.argv)>3:
unregister = bool(sys.argv[3])
print str(slice_config)
command = 'register'
if unregister: command = 'unregister'
command = command + " " + json.dumps(slice_config.__attr__())
print command
|
GENI-NSF/gram
|
src/vmoc/register_controller.py
|
Python
|
mit
| 2,320
| 0.003879
|
import sys
sys.path = ['..'] + sys.path
import zope
from twisted.internet import reactor
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.transport import TTwisted
from thrift.protocol import TBinaryProtocol
from lib.genpy.snowflake import Snowflake
from lib.genpy.snowflake.ttypes import *
import idworker
class SnowflakeServer(object):
zope.interface.implements(Snowflake.Iface)
def __init__(self, worker_id, datacenter_id):
self.worker = idworker.IdWorker(worker_id, datacenter_id)
def get_worker_id(self):
return self.worker.get_worker_id()
def get_datacenter_id(self):
return self.worker.get_datacenter_id()
def get_timestamp(self):
return self.worker.get_timestamp()
def get_id(self):
return self.worker.get_id()
def print_usage():
print 'python snowflakeserver.py <port> <worker_id> <datacenter_id>'
print 'e.g. python snowflakeserver.py 1111 1 1'
def main():
if len(sys.argv) != 4:
return print_usage()
port = int(sys.argv[1])
worker_id = int(sys.argv[2])
datacenter_id = int(sys.argv[3])
reactor.listenTCP(port, TTwisted.ThriftServerFactory(
processor=Snowflake.Processor(SnowflakeServer(worker_id, datacenter_id)),
iprot_factory=TBinaryProtocol.TBinaryProtocolFactory()
))
reactor.run()
if __name__ == '__main__':
sys.exit(main())
|
michaelmontano/snowflakepy
|
src/snowflakeserver.py
|
Python
|
bsd-3-clause
| 1,494
| 0.014726
|
'''
Convert an image file to a data uri.
Copyright 2012 GoodCrypto
Last modified: 2013-11-13
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
import os.path
from django import template
import reinhardt.data_image
register = template.Library()
@register.filter
def data_img(filename, browser=None):
''' Encode an image file in base 64 as a data uri.
The filename is relative to settings.STATIC_URL/settings.STATIC_ROOT.
If the datauri is too large or anything goes wrong,
returns the url to the filename.
Example:
<img alt="embedded image" src="{{ 'images/myimage.png'|data_img:browser }}">
'''
return reinhardt.data_image.data_image(filename, browser=browser)
|
goodcrypto/goodcrypto-libs
|
reinhardt/templatetags/data_img.py
|
Python
|
gpl-3.0
| 828
| 0.014493
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ResourceFinder is a helper class for finding resources given their name."""
import codecs
import os
from py_vulcanize import module
from py_vulcanize import style_sheet as style_sheet_module
from py_vulcanize import resource as resource_module
from py_vulcanize import html_module
from py_vulcanize import strip_js_comments
class ResourceLoader(object):
"""Manges loading modules and their dependencies from files.
Modules handle parsing and the construction of their individual dependency
pointers. The loader deals with bookkeeping of what has been loaded, and
mapping names to file resources.
"""
def __init__(self, project):
self.project = project
self.stripped_js_by_filename = {}
self.loaded_modules = {}
self.loaded_raw_scripts = {}
self.loaded_style_sheets = {}
self.loaded_images = {}
@property
def source_paths(self):
"""A list of base directories to search for modules under."""
return self.project.source_paths
def FindResource(self, some_path, binary=False):
"""Finds a Resource for the given path.
Args:
some_path: A relative or absolute path to a file.
Returns:
A Resource or None.
"""
if os.path.isabs(some_path):
return self.FindResourceGivenAbsolutePath(some_path, binary)
else:
return self.FindResourceGivenRelativePath(some_path, binary)
def FindResourceGivenAbsolutePath(self, absolute_path, binary=False):
"""Returns a Resource for the given absolute path."""
candidate_paths = []
for source_path in self.source_paths:
if absolute_path.startswith(source_path):
candidate_paths.append(source_path)
if len(candidate_paths) == 0:
return None
# Sort by length. Longest match wins.
candidate_paths.sort(lambda x, y: len(x) - len(y))
longest_candidate = candidate_paths[-1]
return resource_module.Resource(longest_candidate, absolute_path, binary)
def FindResourceGivenRelativePath(self, relative_path, binary=False):
"""Returns a Resource for the given relative path."""
absolute_path = None
for script_path in self.source_paths:
absolute_path = os.path.join(script_path, relative_path)
if os.path.exists(absolute_path):
return resource_module.Resource(script_path, absolute_path, binary)
return None
def _FindResourceGivenNameAndSuffix(
self, requested_name, extension, return_resource=False):
"""Searches for a file and reads its contents.
Args:
requested_name: The name of the resource that was requested.
extension: The extension for this requested resource.
Returns:
A (path, contents) pair.
"""
pathy_name = requested_name.replace('.', os.sep)
filename = pathy_name + extension
resource = self.FindResourceGivenRelativePath(filename)
if return_resource:
return resource
if not resource:
return None, None
return _read_file(resource.absolute_path)
def FindModuleResource(self, requested_module_name):
"""Finds a module javascript file and returns a Resource, or none."""
js_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.js', return_resource=True)
html_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.html', return_resource=True)
if js_resource and html_resource:
if html_module.IsHTMLResourceTheModuleGivenConflictingResourceNames(
js_resource, html_resource):
return html_resource
return js_resource
elif js_resource:
return js_resource
return html_resource
def LoadModule(self, module_name=None, module_filename=None,
excluded_scripts=None):
assert bool(module_name) ^ bool(module_filename), (
'Must provide either module_name or module_filename.')
if module_filename:
resource = self.FindResource(module_filename)
if not resource:
raise Exception('Could not find %s in %s' % (
module_filename, repr(self.source_paths)))
module_name = resource.name
else:
resource = None # Will be set if we end up needing to load.
if module_name in self.loaded_modules:
assert self.loaded_modules[module_name].contents
return self.loaded_modules[module_name]
if not resource: # happens when module_name was given
resource = self.FindModuleResource(module_name)
if not resource:
raise module.DepsException('No resource for module "%s"' % module_name)
m = html_module.HTMLModule(self, module_name, resource)
self.loaded_modules[module_name] = m
# Fake it, this is probably either polymer.min.js or platform.js which are
# actually .js files....
if resource.absolute_path.endswith('.js'):
return m
m.Parse(excluded_scripts)
m.Load(excluded_scripts)
return m
def LoadRawScript(self, relative_raw_script_path):
resource = None
for source_path in self.source_paths:
possible_absolute_path = os.path.join(
source_path, os.path.normpath(relative_raw_script_path))
if os.path.exists(possible_absolute_path):
resource = resource_module.Resource(
source_path, possible_absolute_path)
break
if not resource:
raise module.DepsException(
'Could not find a file for raw script %s in %s' %
(relative_raw_script_path, self.source_paths))
assert relative_raw_script_path == resource.unix_style_relative_path, (
'Expected %s == %s' % (relative_raw_script_path,
resource.unix_style_relative_path))
if resource.absolute_path in self.loaded_raw_scripts:
return self.loaded_raw_scripts[resource.absolute_path]
raw_script = module.RawScript(resource)
self.loaded_raw_scripts[resource.absolute_path] = raw_script
return raw_script
def LoadStyleSheet(self, name):
if name in self.loaded_style_sheets:
return self.loaded_style_sheets[name]
resource = self._FindResourceGivenNameAndSuffix(
name, '.css', return_resource=True)
if not resource:
raise module.DepsException(
'Could not find a file for stylesheet %s' % name)
style_sheet = style_sheet_module.StyleSheet(self, name, resource)
style_sheet.load()
self.loaded_style_sheets[name] = style_sheet
return style_sheet
def LoadImage(self, abs_path):
if abs_path in self.loaded_images:
return self.loaded_images[abs_path]
if not os.path.exists(abs_path):
raise module.DepsException("url('%s') did not exist" % abs_path)
res = self.FindResourceGivenAbsolutePath(abs_path, binary=True)
if res is None:
raise module.DepsException("url('%s') was not in search path" % abs_path)
image = style_sheet_module.Image(res)
self.loaded_images[abs_path] = image
return image
def GetStrippedJSForFilename(self, filename, early_out_if_no_py_vulcanize):
if filename in self.stripped_js_by_filename:
return self.stripped_js_by_filename[filename]
with open(filename, 'r') as f:
contents = f.read(4096)
if early_out_if_no_py_vulcanize and ('py_vulcanize' not in contents):
return None
s = strip_js_comments.StripJSComments(contents)
self.stripped_js_by_filename[filename] = s
return s
def _read_file(absolute_path):
"""Reads a file and returns a (path, contents) pair.
Args:
absolute_path: Absolute path to a file.
Raises:
Exception: The given file doesn't exist.
IOError: There was a problem opening or reading the file.
"""
if not os.path.exists(absolute_path):
raise Exception('%s not found.' % absolute_path)
f = codecs.open(absolute_path, mode='r', encoding='utf-8')
contents = f.read()
f.close()
return absolute_path, contents
|
endlessm/chromium-browser
|
third_party/catapult/common/py_vulcanize/py_vulcanize/resource_loader.py
|
Python
|
bsd-3-clause
| 7,961
| 0.006657
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for voxels."""
import gin
import gin.tf
import tensorflow as tf
from tf3d.layers import sparse_voxel_net_utils
from tf3d.utils import shape_utils
compute_pooled_voxel_indices = sparse_voxel_net_utils.compute_pooled_voxel_indices
pool_features_given_indices = sparse_voxel_net_utils.pool_features_given_indices
def crop_and_pad_voxels(voxels, start_coordinates, end_coordinates):
"""Crops a voxel region and pads past the boundaries with zeros.
This accepts start and end coordinates past the limits of the voxel grid,
and uses it to calculate how much top/left/right/bottom padding to add.
Args:
voxels: A tf.float32 tensor of shape [x, y, z, f] to crop
start_coordinates: A list of len 4 with the [x, y, z, f] starting location
of our crop. This can be negative, which indicates left/top padding.
end_coordinates: A list of len 4 with the [x, y, z, f] ending location of
our crop. This can be beyond the size of the voxel tensor, which indicates
padding.
Returns:
cropped_and_padded_voxels: A voxel grid with shape
[end_coordinates[0] - start_coordinates[0],
end_coordinates[1] - start_coordinates[1],
end_coordinates[2] - start_coordinates[2],
end_coordinates[3] - start_coordinates[3]]
Raises:
ValueError: If requested crop and pad is outside the bounds of what the
function supports.
"""
if len(start_coordinates) != 4:
raise ValueError('start_coordinates should be of length 4')
if len(end_coordinates) != 4:
raise ValueError('end_coordinates should be of length 4')
if any([coord <= 0 for coord in end_coordinates]):
raise ValueError('Requested end coordinates should be > 0')
start_coordinates = tf.convert_to_tensor(start_coordinates, tf.int32)
end_coordinates = tf.convert_to_tensor(end_coordinates, tf.int32)
# Clip the coordinates to within the voxel grid
clipped_start_coordinates = tf.maximum(0, start_coordinates)
clipped_end_coordinates = tf.minimum(voxels.shape, end_coordinates)
cropped_voxels = tf.slice(voxels,
begin=clipped_start_coordinates,
size=(clipped_end_coordinates -
clipped_start_coordinates))
top_and_left_padding = tf.maximum(0, -start_coordinates)
bottom_and_right_padding = tf.maximum(0, end_coordinates - voxels.shape)
padding = tf.stack([top_and_left_padding, bottom_and_right_padding], axis=1)
return tf.pad(cropped_voxels, padding)
def pointcloud_to_voxel_grid(points,
features,
grid_cell_size,
start_location,
end_location,
segment_func=tf.math.unsorted_segment_mean):
"""Converts a pointcloud into a voxel grid.
Args:
points: A tf.float32 tensor of size [N, 3].
features: A tf.float32 tensor of size [N, F].
grid_cell_size: A tf.float32 tensor of size [3].
start_location: A tf.float32 tensor of size [3].
end_location: A tf.float32 tensor of size [3].
segment_func: A tensorflow function that operates on segments. Expect one
of tf.math.unsorted_segment_{min/max/mean/prod/sum}. Defaults to
tf.math.unsorted_segment_mean
Returns:
voxel_features: A tf.float32 tensor of
size [grid_x_len, grid_y_len, grid_z_len, F].
segment_ids: A tf.int32 tensor of IDs for each point indicating
which (flattened) voxel cell its data was mapped to.
point_indices: A tf.int32 tensor of size [num_points, 3] containing the
location of each point in the 3d voxel grid.
"""
grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32)
start_location = tf.convert_to_tensor(start_location, dtype=tf.float32)
end_location = tf.convert_to_tensor(end_location, dtype=tf.float32)
point_indices = tf.cast(
(points - tf.expand_dims(start_location, axis=0)) /
tf.expand_dims(grid_cell_size, axis=0),
dtype=tf.int32)
grid_size = tf.cast(
tf.math.ceil((end_location - start_location) / grid_cell_size),
dtype=tf.int32)
# Note: all points outside the grid are added to the edges
# Cap index at grid_size - 1 (so a 10x10x10 grid's max cell is (9,9,9))
point_indices = tf.minimum(point_indices, tf.expand_dims(grid_size - 1,
axis=0))
# Don't allow any points below index (0, 0, 0)
point_indices = tf.maximum(point_indices, 0)
segment_ids = tf.reduce_sum(
point_indices * tf.stack(
[grid_size[1] * grid_size[2], grid_size[2], 1], axis=0),
axis=1)
voxel_features = segment_func(
data=features,
segment_ids=segment_ids,
num_segments=(grid_size[0] * grid_size[1] * grid_size[2]))
return (tf.reshape(voxel_features,
[grid_size[0],
grid_size[1],
grid_size[2],
features.get_shape().as_list()[1]]),
segment_ids,
point_indices)
def voxels_to_points(voxels, segment_ids):
"""Convert voxels back to points given their segment id.
Args:
voxels: A tf.float32 tensor representing a voxel grid. Expect shape
[x, y, z, f].
segment_ids: A tf.int32 tensor representing the segment id of each point
in the original pointcloud we want to project voxel features back to.
Returns:
point_features: A tf.float32 tensor of shape [N, f] where each point
now has the features in the associated voxel cell.
"""
flattened_voxels = tf.reshape(voxels, shape=(-1, voxels.shape[-1]))
return tf.gather(flattened_voxels, segment_ids)
def _points_offset_in_voxels_unbatched(points, grid_cell_size):
"""Converts points into offsets in voxel grid for a single batch.
The values range from -0.5 to 0.5
Args:
points: A tf.float32 tensor of size [N, 3].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
Returns:
voxel_xyz_offsets: A tf.float32 tensor of size [N, 3].
"""
min_points = tf.reduce_min(points, axis=0)
points_index = tf.math.floordiv(points - min_points, grid_cell_size)
points_offset = points - min_points - (points_index * grid_cell_size)
return (points_offset / grid_cell_size) - 0.5
def points_offset_in_voxels(points, grid_cell_size):
"""Converts points into offsets in voxel grid.
Args:
points: A tf.float32 tensor of size [batch_size, N, 3].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
Returns:
voxel_xyz_offsets: A tf.float32 tensor of size [batch_size, N, 3].
"""
batch_size = points.get_shape().as_list()[0]
def fn(i):
return _points_offset_in_voxels_unbatched(
points=points[i, :, :], grid_cell_size=grid_cell_size)
return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32)
def _points_to_voxel_indices(points, grid_cell_size):
"""Converts points into corresponding voxel indices.
Maps each point into a voxel grid with cell size given by grid_cell_size.
For each voxel, it computes a x, y, z index. Also converts the x, y, z index
to a single number index where there is a one-on-one mapping between
each x, y, z index value and its corresponding single number index value.
Args:
points: A tf.float32 tensor of size [N, 3].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
Returns:
voxel_xyz_indices: A tf.int32 tensor of size [N, 3] containing the x, y, z
index of the voxel corresponding to each given point.
voxel_single_number_indices: A tf.int32 tensor of size [N] containing the
single number index of the voxel corresponding to each given point.
voxel_start_location: A tf.float32 tensor of size [3] containing the start
location of the voxels.
"""
voxel_start_location = tf.reduce_min(points, axis=0)
voxel_xyz_indices = tf.cast(
tf.math.floordiv(points - voxel_start_location, grid_cell_size),
dtype=tf.int32)
voxel_xyz_indices, voxel_single_number_indices = compute_pooled_voxel_indices(
voxel_xyz_indices=voxel_xyz_indices, pooling_size=(1, 1, 1))
return voxel_xyz_indices, voxel_single_number_indices, voxel_start_location
def pointcloud_to_sparse_voxel_grid_unbatched(points, features, grid_cell_size,
segment_func):
"""Converts a pointcloud into a voxel grid.
This function does not handle batch size and only works for a single batch
of points. The function `pointcloud_to_sparse_voxel_grid` below calls this
function in a while loop to map a batch of points to a batch of voxels.
A sparse voxel grid is represented by only keeping the voxels that
have points in them in memory. Assuming that N' voxels have points in them,
we represent a sparse voxel grid by
(a) voxel_features, a [N', F] or [N', G, F] tensor containing the feature
vector for each voxel.
(b) voxel_indices, a [N', 3] tensor containing the x, y, z index of each
voxel.
Args:
points: A tf.float32 tensor of size [N, 3].
features: A tf.float32 tensor of size [N, F].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
segment_func: A tensorflow function that operates on segments. Examples are
one of tf.math.unsorted_segment_{min/max/mean/prod/sum}.
Returns:
voxel_features: A tf.float32 tensor of size [N', F] or [N', G, F] where G is
the number of points sampled per voxel.
voxel_indices: A tf.int32 tensor of size [N', 3].
segment_ids: A size [N] tf.int32 tensor of IDs for each point indicating
which (flattened) voxel cell its data was mapped to.
voxel_start_location: A tf.float32 tensor of size [3] containing the start
location of the voxels.
Raises:
ValueError: If pooling method is unknown.
"""
grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32)
voxel_xyz_indices, voxel_single_number_indices, voxel_start_location = (
_points_to_voxel_indices(points=points, grid_cell_size=grid_cell_size))
voxel_features, segment_ids, num_segments = pool_features_given_indices(
features=features,
indices=voxel_single_number_indices,
segment_func=segment_func)
voxel_xyz_indices = tf.math.unsorted_segment_max(
data=voxel_xyz_indices,
segment_ids=segment_ids,
num_segments=num_segments)
return voxel_features, voxel_xyz_indices, segment_ids, voxel_start_location
def _pad_or_clip_voxels(voxel_features, voxel_indices, num_valid_voxels,
segment_ids, voxels_pad_or_clip_size):
"""Pads or clips voxels."""
if voxels_pad_or_clip_size:
num_valid_voxels = tf.minimum(num_valid_voxels, voxels_pad_or_clip_size)
num_channels = voxel_features.get_shape().as_list()[-1]
if len(voxel_features.shape.as_list()) == 2:
output_shape = [voxels_pad_or_clip_size, num_channels]
elif len(voxel_features.shape.as_list()) == 3:
num_samples_per_voxel = voxel_features.get_shape().as_list()[1]
if num_samples_per_voxel is None:
num_samples_per_voxel = tf.shape(voxel_features)[1]
output_shape = [
voxels_pad_or_clip_size, num_samples_per_voxel, num_channels
]
else:
raise ValueError('voxel_features should be either rank 2 or 3.')
voxel_features = shape_utils.pad_or_clip_nd(
tensor=voxel_features, output_shape=output_shape)
voxel_indices = shape_utils.pad_or_clip_nd(
tensor=voxel_indices, output_shape=[voxels_pad_or_clip_size, 3])
valid_segment_ids_mask = tf.cast(
tf.less(segment_ids, num_valid_voxels), dtype=tf.int32)
segment_ids *= valid_segment_ids_mask
return voxel_features, voxel_indices, num_valid_voxels, segment_ids
def pointcloud_to_sparse_voxel_grid(points, features, num_valid_points,
grid_cell_size, voxels_pad_or_clip_size,
segment_func):
"""Converts a pointcloud into a voxel grid.
This function calls the `pointcloud_to_sparse_voxel_grid_unbatched`
function above in a while loop to map a batch of points to a batch of voxels.
Args:
points: A tf.float32 tensor of size [batch_size, N, 3].
features: A tf.float32 tensor of size [batch_size, N, F].
num_valid_points: A tf.int32 tensor of size [num_batches] containing the
number of valid points in each batch example.
grid_cell_size: A tf.float32 tensor of size [3].
voxels_pad_or_clip_size: Number of target voxels to pad or clip to. If None,
it will not perform the padding.
segment_func: A tensorflow function that operates on segments. Examples are
one of tf.math.unsorted_segment_{min/max/mean/prod/sum}.
Returns:
voxel_features: A tf.float32 tensor of size [batch_size, N', F]
or [batch_size, N', G, F] where G is the number of points sampled per
voxel.
voxel_indices: A tf.int32 tensor of size [batch_size, N', 3].
num_valid_voxels: A tf.int32 tensor of size [batch_size].
segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point
indicating which (flattened) voxel cell its data was mapped to.
voxel_start_location: A size [batch_size, 3] tf.float32 tensor of voxel
start locations.
Raises:
ValueError: If pooling method is unknown.
"""
batch_size = points.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(points)[0]
num_points = tf.shape(points)[1]
def fn(i):
"""Map function."""
num_valid_points_i = num_valid_points[i]
points_i = points[i, :num_valid_points_i, :]
features_i = features[i, :num_valid_points_i, :]
voxel_features_i, voxel_indices_i, segment_ids_i, voxel_start_location_i = (
pointcloud_to_sparse_voxel_grid_unbatched(
points=points_i,
features=features_i,
grid_cell_size=grid_cell_size,
segment_func=segment_func))
num_valid_voxels_i = tf.shape(voxel_features_i)[0]
(voxel_features_i, voxel_indices_i, num_valid_voxels_i,
segment_ids_i) = _pad_or_clip_voxels(
voxel_features=voxel_features_i,
voxel_indices=voxel_indices_i,
num_valid_voxels=num_valid_voxels_i,
segment_ids=segment_ids_i,
voxels_pad_or_clip_size=voxels_pad_or_clip_size)
segment_ids_i = tf.pad(
segment_ids_i, paddings=[[0, num_points - num_valid_points_i]])
return (voxel_features_i, voxel_indices_i, num_valid_voxels_i,
segment_ids_i, voxel_start_location_i)
return tf.map_fn(
fn=fn,
elems=tf.range(batch_size),
dtype=(tf.float32, tf.int32, tf.int32, tf.int32, tf.float32))
def sparse_voxel_grid_to_pointcloud(voxel_features, segment_ids,
num_valid_voxels, num_valid_points):
"""Convert voxel features back to points given their segment ids.
Args:
voxel_features: A tf.float32 tensor of size [batch_size, N', F].
segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point
indicating which (flattened) voxel cell its data was mapped to.
num_valid_voxels: A tf.int32 tensor of size [batch_size] containing the
number of valid voxels in each batch example.
num_valid_points: A tf.int32 tensor of size [batch_size] containing the
number of valid points in each batch example.
Returns:
point_features: A tf.float32 tensor of size [batch_size, N, F].
Raises:
ValueError: If batch_size is unknown at graph construction time.
"""
batch_size = voxel_features.shape[0]
if batch_size is None:
raise ValueError('batch_size is unknown at graph construction time.')
num_points = tf.shape(segment_ids)[1]
def fn(i):
num_valid_voxels_i = num_valid_voxels[i]
num_valid_points_i = num_valid_points[i]
voxel_features_i = voxel_features[i, :num_valid_voxels_i, :]
segment_ids_i = segment_ids[i, :num_valid_points_i]
point_features = tf.gather(voxel_features_i, segment_ids_i)
point_features_rank = len(point_features.get_shape().as_list())
point_features_paddings = [[0, num_points - num_valid_points_i]]
for _ in range(point_features_rank - 1):
point_features_paddings.append([0, 0])
point_features = tf.pad(point_features, paddings=point_features_paddings)
return point_features
return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32)
@gin.configurable
def per_voxel_point_sample_segment_func(data, segment_ids, num_segments,
num_samples_per_voxel):
"""Samples features from the points within each voxel.
Args:
data: A tf.float32 tensor of size [N, F].
segment_ids: A tf.int32 tensor of size [N].
num_segments: Number of segments.
num_samples_per_voxel: Number of features to sample per voxel. If the voxel
has less number of points in it, the point features will be padded by 0.
Returns:
A tf.float32 tensor of size [num_segments, num_samples_per_voxel, F].
A tf.int32 indices of size [N, num_samples_per_voxel].
"""
num_channels = data.get_shape().as_list()[1]
if num_channels is None:
raise ValueError('num_channels is None.')
n = tf.shape(segment_ids)[0]
def _body_fn(i, indices_range, indices):
"""Computes the indices of the i-th point feature in each segment."""
indices_i = tf.math.unsorted_segment_max(
data=indices_range, segment_ids=segment_ids, num_segments=num_segments)
indices_i_positive_mask = tf.greater(indices_i, 0)
indices_i_positive = tf.boolean_mask(indices_i, indices_i_positive_mask)
boolean_mask = tf.scatter_nd(
indices=tf.cast(
tf.expand_dims(indices_i_positive - 1, axis=1), dtype=tf.int64),
updates=tf.ones_like(indices_i_positive, dtype=tf.int32),
shape=(n,))
indices_range *= (1 - boolean_mask)
indices_i *= tf.cast(indices_i_positive_mask, dtype=tf.int32)
indices_i = tf.pad(
tf.expand_dims(indices_i, axis=1),
paddings=[[0, 0], [i, num_samples_per_voxel - i - 1]])
indices += indices_i
i = i + 1
return i, indices_range, indices
cond = lambda i, indices_range, indices: i < num_samples_per_voxel
(_, _, indices) = tf.while_loop(
cond=cond,
body=_body_fn,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.range(n) + 1,
tf.zeros([num_segments, num_samples_per_voxel],
dtype=tf.int32)))
data = tf.pad(data, paddings=[[1, 0], [0, 0]])
voxel_features = tf.gather(data, tf.reshape(indices, [-1]))
return tf.reshape(voxel_features,
[num_segments, num_samples_per_voxel, num_channels])
def compute_pointcloud_weights_based_on_voxel_density(points, grid_cell_size):
"""Computes pointcloud weights based on voxel density.
Args:
points: A tf.float32 tensor of size [num_points, 3].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
Returns:
A tf.float32 tensor of size [num_points, 1] containing weights that are
inverse proportional to the denisty of the points in voxels.
"""
num_points = tf.shape(points)[0]
features = tf.ones([num_points, 1], dtype=tf.float32)
voxel_features, _, segment_ids, _ = (
pointcloud_to_sparse_voxel_grid_unbatched(
points=points,
features=features,
grid_cell_size=grid_cell_size,
segment_func=tf.math.unsorted_segment_sum))
num_voxels = tf.shape(voxel_features)[0]
point_features = sparse_voxel_grid_to_pointcloud(
voxel_features=tf.expand_dims(voxel_features, axis=0),
segment_ids=tf.expand_dims(segment_ids, axis=0),
num_valid_voxels=tf.expand_dims(num_voxels, axis=0),
num_valid_points=tf.expand_dims(num_points, axis=0))
inverse_point_densities = 1.0 / tf.squeeze(point_features, axis=0)
total_inverse_density = tf.reduce_sum(inverse_point_densities)
return (inverse_point_densities * tf.cast(num_points, dtype=tf.float32) /
total_inverse_density)
|
google-research/google-research
|
tf3d/utils/voxel_utils.py
|
Python
|
apache-2.0
| 21,191
| 0.004672
|
#!/usr/bin/env python
import argparse
import os
import logging
import cdec.configobj
import cdec.sa
from cdec.sa._sa import monitor_cpu
import sys
MAX_PHRASE_LENGTH = 4
def precompute(f_sa, max_len, max_nt, max_size, min_gap, rank1, rank2, tight_phrases):
lcp = cdec.sa.LCP(f_sa)
stats = sorted(lcp.compute_stats(MAX_PHRASE_LENGTH), reverse=True)
precomp = cdec.sa.Precomputation(from_stats=stats,
fsarray=f_sa,
precompute_rank=rank1,
precompute_secondary_rank=rank2,
max_length=max_len,
max_nonterminals=max_nt,
train_max_initial_size=max_size,
train_min_gap_size=min_gap)
return precomp
def main():
preprocess_start_time = monitor_cpu()
sys.setrecursionlimit(sys.getrecursionlimit() * 100)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cdec.sa.compile')
parser = argparse.ArgumentParser(description='Compile a corpus into a suffix array.')
parser.add_argument('--maxnt', '-n', type=int, default=2,
help='Maximum number of non-terminal symbols')
parser.add_argument('--maxlen', '-l', type=int, default=5,
help='Maximum number of terminals')
parser.add_argument('--maxsize', '-s', type=int, default=15,
help='Maximum rule span')
parser.add_argument('--mingap', '-g', type=int, default=1,
help='Minimum gap size')
parser.add_argument('--rank1', '-r1', type=int, default=100,
help='Number of pre-computed frequent patterns')
parser.add_argument('--rank2', '-r2', type=int, default=10,
help='Number of pre-computed super-frequent patterns)')
parser.add_argument('--loose', action='store_true',
help='Enable loose phrase extraction (default: tight)')
parser.add_argument('-c', '--config', default='/dev/stdout',
help='Output configuration')
parser.add_argument('-f', '--source',
help='Source language corpus')
parser.add_argument('-e', '--target',
help='Target language corpus')
parser.add_argument('-b', '--bitext',
help='Parallel text (source ||| target)')
parser.add_argument('-a', '--alignment', required=True,
help='Bitext word alignment')
parser.add_argument('-o', '--output', required=True,
help='Output path')
args = parser.parse_args()
if not ((args.source and args.target) or args.bitext):
parser.error('a parallel corpus is required\n'
'\tuse -f (source) with -e (target) or -b (bitext)')
param_names = ('max_len', 'max_nt', 'max_size', 'min_gap',
'rank1', 'rank2', 'tight_phrases')
params = (args.maxlen, args.maxnt, args.maxsize, args.mingap,
args.rank1, args.rank2, not args.loose)
if not os.path.exists(args.output):
os.mkdir(args.output)
f_sa_bin = os.path.join(args.output, 'f.sa.bin')
e_bin = os.path.join(args.output, 'e.bin')
precomp_file = 'precomp.{0}.{1}.{2}.{3}.{4}.{5}.bin'.format(*params)
precomp_bin = os.path.join(args.output, precomp_file)
a_bin = os.path.join(args.output, 'a.bin')
lex_bin = os.path.join(args.output, 'lex.bin')
start_time = monitor_cpu()
logger.info('Compiling source suffix array')
if args.bitext:
f_sa = cdec.sa.SuffixArray(from_text=args.bitext, side='source')
else:
f_sa = cdec.sa.SuffixArray(from_text=args.source)
f_sa.write_binary(f_sa_bin)
stop_time = monitor_cpu()
logger.info('Compiling source suffix array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling target data array')
if args.bitext:
e = cdec.sa.DataArray(from_text=args.bitext, side='target')
else:
e = cdec.sa.DataArray(from_text=args.target)
e.write_binary(e_bin)
stop_time = monitor_cpu()
logger.info('Compiling target data array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Precomputing frequent phrases')
precompute(f_sa, *params).write_binary(precomp_bin)
stop_time = monitor_cpu()
logger.info('Compiling precomputations took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling alignment')
a = cdec.sa.Alignment(from_text=args.alignment)
a.write_binary(a_bin)
stop_time = monitor_cpu()
logger.info('Compiling alignment took %f seonds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling bilexical dictionary')
lex = cdec.sa.BiLex(from_data=True, alignment=a, earray=e, fsarray=f_sa)
lex.write_binary(lex_bin)
stop_time = monitor_cpu()
logger.info('Compiling bilexical dictionary took %f seconds', stop_time - start_time)
# Write configuration
config = cdec.configobj.ConfigObj(args.config, unrepr=True)
config['f_sa_file'] = os.path.abspath(f_sa_bin)
config['e_file'] = os.path.abspath(e_bin)
config['a_file'] = os.path.abspath(a_bin)
config['lex_file'] = os.path.abspath(lex_bin)
config['precompute_file'] = os.path.abspath(precomp_bin)
for name, value in zip(param_names, params):
config[name] = value
config.write()
preprocess_stop_time = monitor_cpu()
logger.info('Overall preprocessing step took %f seconds', preprocess_stop_time - preprocess_start_time)
if __name__ == '__main__':
main()
|
kho/mr-cdec
|
python/pkg/cdec/sa/compile.py
|
Python
|
apache-2.0
| 5,575
| 0.003587
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Define base Blueprint."""
from flask import Blueprint
blueprint = Blueprint('base', __name__, template_folder='templates',
static_folder='static')
|
zenodo/invenio
|
invenio/base/views.py
|
Python
|
gpl-2.0
| 947
| 0
|
input = """
x | -x.
y | -y.
"""
output = """
x | -x.
y | -y.
"""
|
veltri/DLV2
|
tests/parser/bkunstrat3.bk.test.py
|
Python
|
apache-2.0
| 69
| 0
|
"""
Copyright 2014 Jason Heeris, jason.heeris@gmail.com
This file is part of the dungeon excavator web interface ("webcavate").
Webcavate is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Webcavate is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
webcavate. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import uuid
from flask import Flask, render_template, request, make_response, redirect, url_for, flash
from dungeon.excavate import render_room
HELP_TEXT = """\
Web interface to the dungeon excavator."""
app = Flask('dungeon.web')
app.secret_key = str(uuid.uuid4())
@app.route("/")
def root():
""" Web interface landing page. """
return render_template('index.html')
@app.route("/error")
def error():
""" Display errors. """
return render_template('error.html')
def make_map(request, format):
tile_size = int(request.form['size'])
wall_file = request.files['walls']
floor_file = request.files['floor']
floorplan_file = request.files['floorplan']
try:
room_data, content_type = render_room(
floor_file.read(),
wall_file.read(),
floorplan_file.read(),
tile_size,
format
)
except ValueError as ve:
flash(str(ve))
return redirect(url_for('error'))
# Create response
response = make_response(room_data)
response.headers['Content-Type'] = content_type
return response
@app.route("/map.svg", methods=['POST'])
def map_svg():
return make_map(request, format='svg')
@app.route("/map.png", methods=['POST'])
def map_png():
return make_map(request, format='png')
@app.route("/map.jpg", methods=['POST'])
def map_jpg():
return make_map(request, format='jpg')
@app.route("/map", methods=['POST'])
def process():
""" Process submitted form data. """
format = request.form['format']
try:
node = {
'png': 'map_png',
'svg': 'map_svg',
'jpg': 'map_jpg',
}[format]
except KeyError:
flash("The output format you selected is not supported.")
return redirect(url_for('error'))
else:
return redirect(url_for(node, _method='POST'), code=307)
def main():
""" Parse arguments and get things going for the web interface """
parser = argparse.ArgumentParser(description=HELP_TEXT)
parser.add_argument(
'-p', '--port',
help="Port to serve the interface on.",
type=int,
default=5050
)
parser.add_argument(
'-a', '--host',
help="Host to server the interface on.",
)
args = parser.parse_args()
app.run(port=args.port, host=args.host, debug=False)
|
detly/webcavate
|
webcavate/app.py
|
Python
|
gpl-3.0
| 3,132
| 0.000958
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('create_library')
@click.argument("name", type=str)
@click.option(
"--description",
help="Optional data library description",
type=str
)
@click.option(
"--synopsis",
help="Optional data library synopsis",
type=str
)
@pass_context
@custom_exception
@json_output
def cli(ctx, name, description="", synopsis=""):
"""Create a data library with the properties defined in the arguments.
Output:
Details of the created library.
For example::
{'id': 'f740ab636b360a70',
'name': 'Library from bioblend',
'url': '/api/libraries/f740ab636b360a70'}
"""
return ctx.gi.libraries.create_library(name, description=description, synopsis=synopsis)
|
galaxy-iuc/parsec
|
parsec/commands/libraries/create_library.py
|
Python
|
apache-2.0
| 859
| 0.001164
|
import galaxy.model
from logging import getLogger
log = getLogger( __name__ )
ROLES_UNSET = object()
INVALID_STATES = [ galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ]
class DatasetMatcher( object ):
""" Utility class to aid DataToolParameter and similar classes in reasoning
about what HDAs could match or are selected for a parameter and value.
Goal here is to both encapsulate and reuse logic related to filtering,
datatype matching, hiding errored dataset, finding implicit conversions,
and permission handling.
"""
def __init__( self, trans, param, value, other_values ):
self.trans = trans
self.param = param
self.tool = param.tool
self.value = value
self.current_user_roles = ROLES_UNSET
filter_value = None
if param.options:
try:
filter_value = param.options.get_options( trans, other_values )[0][0]
except IndexError:
pass # no valid options
self.filter_value = filter_value
def hda_accessible( self, hda, check_security=True ):
""" Does HDA correspond to dataset that is an a valid state and is
accessible to user.
"""
dataset = hda.dataset
state_valid = not dataset.state in INVALID_STATES
return state_valid and ( not check_security or self.__can_access_dataset( dataset ) )
def valid_hda_match( self, hda, check_implicit_conversions=True, check_security=False ):
""" Return False of this parameter can not be matched to a the supplied
HDA, otherwise return a description of the match (either a
HdaDirectMatch describing a direct match or a HdaImplicitMatch
describing an implicit conversion.)
"""
if self.filter( hda ):
return False
formats = self.param.formats
if hda.datatype.matches_any( formats ):
return HdaDirectMatch( hda )
if not check_implicit_conversions:
return False
target_ext, converted_dataset = hda.find_conversion_destination( formats )
if target_ext:
if converted_dataset:
hda = converted_dataset
if check_security and not self.__can_access_dataset( hda.dataset ):
return False
return HdaImplicitMatch( hda, target_ext )
return False
def hda_match( self, hda, check_implicit_conversions=True, ensure_visible=True ):
""" If HDA is accessible, return information about whether it could
match this parameter and if so how. See valid_hda_match for more
information.
"""
accessible = self.hda_accessible( hda )
if accessible and ( not ensure_visible or hda.visible or ( self.selected( hda ) and not hda.implicitly_converted_parent_datasets ) ):
# If we are sending data to an external application, then we need to make sure there are no roles
# associated with the dataset that restrict its access from "public".
require_public = self.tool and self.tool.tool_type == 'data_destination'
if require_public and not self.trans.app.security_agent.dataset_is_public( hda.dataset ):
return False
if self.filter( hda ):
return False
return self.valid_hda_match( hda, check_implicit_conversions=check_implicit_conversions )
def selected( self, hda ):
""" Given value for DataToolParameter, is this HDA "selected".
"""
value = self.value
if value and str( value[ 0 ] ).isdigit():
return hda.id in map(int, value)
else:
return value and hda in value
def filter( self, hda ):
""" Filter out this value based on other values for job (if
applicable).
"""
param = self.param
return param.options and param._options_filter_attribute( hda ) != self.filter_value
def __can_access_dataset( self, dataset ):
# Lazily cache current_user_roles.
if self.current_user_roles is ROLES_UNSET:
self.current_user_roles = self.trans.get_current_user_roles()
return self.trans.app.security_agent.can_access_dataset( self.current_user_roles, dataset )
class HdaDirectMatch( object ):
""" Supplied HDA was a valid option directly (did not need to find implicit
conversion).
"""
def __init__( self, hda ):
self.hda = hda
@property
def implicit_conversion( self ):
return False
class HdaImplicitMatch( object ):
""" Supplied HDA was a valid option directly (did not need to find implicit
conversion).
"""
def __init__( self, hda, target_ext ):
self.hda = hda
self.target_ext = target_ext
@property
def implicit_conversion( self ):
return True
class DatasetCollectionMatcher( object ):
def __init__( self, dataset_matcher ):
self.dataset_matcher = dataset_matcher
def __valid_element( self, element ):
# Simplify things for now and assume these are hdas and not implicit
# converts. One could imagine handling both of those cases down the
# road.
if element.ldda:
return False
child_collection = element.child_collection
if child_collection:
return self.dataset_collection_match( child_collection )
hda = element.hda
if not hda:
return False
hda_match = self.dataset_matcher.hda_match( hda, ensure_visible=False )
return hda_match and not hda_match.implicit_conversion
def hdca_match( self, history_dataset_collection_association, reduction=False ):
dataset_collection = history_dataset_collection_association.collection
if reduction and dataset_collection.collection_type.find( ":" ) > 0:
return False
else:
return self.dataset_collection_match( dataset_collection )
def dataset_collection_match( self, dataset_collection ):
valid = True
for element in dataset_collection.elements:
if not self.__valid_element( element ):
valid = False
break
return valid
__all__ = [ DatasetMatcher, DatasetCollectionMatcher ]
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/tools/parameters/dataset_matcher.py
|
Python
|
gpl-3.0
| 6,314
| 0.01758
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import api_select
prefixed_jobs = """
serial flow: [
job: 'top_quick1'
serial flow: [
job: 'top_x_quick2-1'
]
serial flow: [
job: 'top_x_quick2-2'
]
serial flow: [
job: 'top_x_quick2-3'
]
job: 'top_quick3'
parallel flow: (
serial flow: [
job: 'top_y_z_quick4a'
]
serial flow: [
job: 'quick4b'
]
job: 'top_y_quick5'
)
]
"""
def test_prefix(api_type, capsys):
with api_select.api(__file__, api_type) as api:
def job(name):
api.job(name, exec_time=0.5, max_fails=0, expect_invocations=0, expect_order=None, params=None)
api.flow_job()
job('quick1')
index = 0
for index in 1, 2, 3:
job('x_quick2-' + str(index))
job('quick3')
job('y_z_quick4')
job('y_quick5')
with serial(api, timeout=70, report_interval=3, job_name_prefix='top_', just_dump=True) as ctrl1:
ctrl1.invoke('quick1')
for index in 1, 2, 3:
with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2:
ctrl2.invoke('quick2-' + str(index))
ctrl1.invoke('quick3')
with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3a:
ctrl3a.invoke('quick4a')
# Reset prefix
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix=None) as ctrl3b:
ctrl3b.invoke('quick4b')
ctrl2.invoke('quick5')
sout, _ = capsys.readouterr()
assert prefixed_jobs.strip() in sout
|
lechat/jenkinsflow
|
test/prefix_test.py
|
Python
|
bsd-3-clause
| 1,965
| 0.005598
|
"""
Fixture to create a course and course components (XBlocks).
"""
import datetime
import json
import mimetypes
from collections import namedtuple
from textwrap import dedent
from opaque_keys.edx.keys import CourseKey
from path import Path
from common.test.acceptance.fixtures import STUDIO_BASE_URL
from common.test.acceptance.fixtures.base import FixtureError, XBlockContainerFixture
class XBlockFixtureDesc(object):
"""
Description of an XBlock, used to configure a course fixture.
"""
def __init__(self, category, display_name, data=None,
metadata=None, grader_type=None, publish='make_public', **kwargs):
"""
Configure the XBlock to be created by the fixture.
These arguments have the same meaning as in the Studio REST API:
* `category`
* `display_name`
* `data`
* `metadata`
* `grader_type`
* `publish`
"""
self.category = category
self.display_name = display_name
self.data = data
self.metadata = metadata
self.grader_type = grader_type
self.publish = publish
self.children = []
self.locator = None
self.fields = kwargs
def add_children(self, *args):
"""
Add child XBlocks to this XBlock.
Each item in `args` is an `XBlockFixtureDesc` object.
Returns the `xblock_desc` instance to allow chaining.
"""
self.children.extend(args)
return self
def serialize(self):
"""
Return a JSON representation of the XBlock, suitable
for sending as POST data to /xblock
XBlocks are always set to public visibility.
"""
returned_data = {
'display_name': self.display_name,
'data': self.data,
'metadata': self.metadata,
'graderType': self.grader_type,
'publish': self.publish,
'fields': self.fields,
}
return json.dumps(returned_data)
def __str__(self):
"""
Return a string representation of the description.
Useful for error messages.
"""
return dedent(u"""
<XBlockFixtureDescriptor:
category={0},
data={1},
metadata={2},
grader_type={3},
publish={4},
children={5},
locator={6},
>
""").strip().format(
self.category, self.data, self.metadata,
self.grader_type, self.publish, self.children, self.locator
)
# Description of course updates to add to the course
# `date` is a str (e.g. "January 29, 2014)
# `content` is also a str (e.g. "Test course")
CourseUpdateDesc = namedtuple("CourseUpdateDesc", ['date', 'content'])
class CourseFixture(XBlockContainerFixture):
"""
Fixture for ensuring that a course exists.
WARNING: This fixture is NOT idempotent. To avoid conflicts
between tests, you should use unique course identifiers for each fixture.
"""
def __init__(self, org, number, run, display_name, start_date=None, end_date=None, settings=None):
"""
Configure the course fixture to create a course with
`org`, `number`, `run`, and `display_name` (all unicode).
`start_date` and `end_date` are datetime objects indicating the course start and end date.
The default is for the course to have started in the distant past, which is generally what
we want for testing so students can enroll.
`settings` can be any additional course settings needs to be enabled. for example
to enable entrance exam settings would be a dict like this {"entrance_exam_enabled": "true"}
These have the same meaning as in the Studio restful API /course end-point.
"""
super(CourseFixture, self).__init__()
self._course_dict = {
'org': org,
'number': number,
'run': run,
'display_name': display_name
}
# Set a default start date to the past, but use Studio's
# default for the end date (meaning we don't set it here)
if start_date is None:
start_date = datetime.datetime(1970, 1, 1)
self._course_details = {
'start_date': start_date.isoformat(),
}
if end_date is not None:
self._course_details['end_date'] = end_date.isoformat()
if settings is not None:
self._course_details.update(settings)
self._updates = []
self._handouts = []
self._assets = []
self._textbooks = []
self._advanced_settings = {}
self._course_key = None
def __str__(self):
"""
String representation of the course fixture, useful for debugging.
"""
return u"<CourseFixture: org='{org}', number='{number}', run='{run}'>".format(**self._course_dict)
def add_course_details(self, course_details):
"""
Add course details to dict of course details to be updated when configure_course or install is called.
Arguments:
Dictionary containing key value pairs for course updates,
e.g. {'start_date': datetime.now() }
"""
if 'start_date' in course_details:
course_details['start_date'] = course_details['start_date'].isoformat()
if 'end_date' in course_details:
course_details['end_date'] = course_details['end_date'].isoformat()
self._course_details.update(course_details)
def add_update(self, update):
"""
Add an update to the course. `update` should be a `CourseUpdateDesc`.
"""
self._updates.append(update)
def add_handout(self, asset_name):
"""
Add the handout named `asset_name` to the course info page.
Note that this does not actually *create* the static asset; it only links to it.
"""
self._handouts.append(asset_name)
def add_asset(self, asset_name):
"""
Add the asset to the list of assets to be uploaded when the install method is called.
"""
self._assets.extend(asset_name)
def add_textbook(self, book_title, chapters):
"""
Add textbook to the list of textbooks to be added when the install method is called.
"""
self._textbooks.append({"chapters": chapters, "tab_title": book_title})
def add_advanced_settings(self, settings):
"""
Adds advanced settings to be set on the course when the install method is called.
"""
self._advanced_settings.update(settings)
def install(self):
"""
Create the course and XBlocks within the course.
This is NOT an idempotent method; if the course already exists, this will
raise a `FixtureError`. You should use unique course identifiers to avoid
conflicts between tests.
"""
self._create_course()
self._install_course_updates()
self._install_course_handouts()
self._install_course_textbooks()
self._configure_course()
self._upload_assets()
self._add_advanced_settings()
self._create_xblock_children(self._course_location, self.children)
return self
def configure_course(self):
"""
Configure Course Settings, take new course settings from self._course_details dict object
"""
self._configure_course()
@property
def studio_course_outline_as_json(self):
"""
Retrieves Studio course outline in JSON format.
"""
url = STUDIO_BASE_URL + '/course/' + self._course_key + "?format=json"
response = self.session.get(url, headers=self.headers)
if not response.ok:
raise FixtureError(
u"Could not retrieve course outline json. Status was {0}".format(
response.status_code))
try:
course_outline_json = response.json()
except ValueError:
raise FixtureError(
u"Could not decode course outline as JSON: '{0}'".format(response)
)
return course_outline_json
@property
def _course_location(self):
"""
Return the locator string for the course.
"""
course_key = CourseKey.from_string(self._course_key)
if getattr(course_key, 'deprecated', False):
block_id = self._course_dict['run']
else:
block_id = 'course'
return unicode(course_key.make_usage_key('course', block_id))
@property
def _assets_url(self):
"""
Return the url string for the assets
"""
return "/assets/" + self._course_key + "/"
@property
def _handouts_loc(self):
"""
Return the locator string for the course handouts
"""
course_key = CourseKey.from_string(self._course_key)
return unicode(course_key.make_usage_key('course_info', 'handouts'))
def _create_course(self):
"""
Create the course described in the fixture.
"""
# If the course already exists, this will respond
# with a 200 and an error message, which we ignore.
response = self.session.post(
STUDIO_BASE_URL + '/course/',
data=self._encode_post_dict(self._course_dict),
headers=self.headers
)
try:
err = response.json().get('ErrMsg')
except ValueError:
raise FixtureError(
u"Could not parse response from course request as JSON: '{0}'".format(
response.content))
# This will occur if the course identifier is not unique
if err is not None:
raise FixtureError(u"Could not create course {0}. Error message: '{1}'".format(self, err))
if response.ok:
self._course_key = response.json()['course_key']
else:
raise FixtureError(
u"Could not create course {0}. Status was {1}\nResponse content was: {2}".format(
self._course_dict, response.status_code, response.content))
def _configure_course(self):
"""
Configure course settings (e.g. start and end date)
"""
url = STUDIO_BASE_URL + '/settings/details/' + self._course_key
# First, get the current values
response = self.session.get(url, headers=self.headers)
if not response.ok:
raise FixtureError(
u"Could not retrieve course details. Status was {0}".format(
response.status_code))
try:
details = response.json()
except ValueError:
raise FixtureError(
u"Could not decode course details as JSON: '{0}'".format(details)
)
# Update the old details with our overrides
details.update(self._course_details)
# POST the updated details to Studio
response = self.session.post(
url, data=self._encode_post_dict(details),
headers=self.headers,
)
if not response.ok:
raise FixtureError(
u"Could not update course details to '{0}' with {1}: Status was {2}.".format(
self._course_details, url, response.status_code))
def _install_course_handouts(self):
"""
Add handouts to the course info page.
"""
url = STUDIO_BASE_URL + '/xblock/' + self._handouts_loc
# Construct HTML with each of the handout links
handouts_li = [
u'<li><a href="/static/{handout}">Example Handout</a></li>'.format(handout=handout)
for handout in self._handouts
]
handouts_html = u'<ol class="treeview-handoutsnav">{}</ol>'.format("".join(handouts_li))
# Update the course's handouts HTML
payload = json.dumps({
'children': None,
'data': handouts_html,
'id': self._handouts_loc,
'metadata': dict(),
})
response = self.session.post(url, data=payload, headers=self.headers)
if not response.ok:
raise FixtureError(
u"Could not update course handouts with {0}. Status was {1}".format(url, response.status_code))
def _install_course_updates(self):
"""
Add updates to the course, if any are configured.
"""
url = STUDIO_BASE_URL + '/course_info_update/' + self._course_key + '/'
for update in self._updates:
# Add the update to the course
date, content = update
payload = json.dumps({'date': date, 'content': content})
response = self.session.post(url, headers=self.headers, data=payload)
if not response.ok:
raise FixtureError(
u"Could not add update to course: {0} with {1}. Status was {2}".format(
update, url, response.status_code))
def _upload_assets(self):
"""
Upload assets
:raise FixtureError:
"""
url = STUDIO_BASE_URL + self._assets_url
test_dir = Path(__file__).abspath().dirname().dirname().dirname()
for asset_name in self._assets:
asset_file_path = test_dir + '/data/uploads/' + asset_name
asset_file = open(asset_file_path)
files = {'file': (asset_name, asset_file, mimetypes.guess_type(asset_file_path)[0])}
headers = {
'Accept': 'application/json',
'X-CSRFToken': self.session_cookies.get('csrftoken', '')
}
upload_response = self.session.post(url, files=files, headers=headers)
if not upload_response.ok:
raise FixtureError(u'Could not upload {asset_name} with {url}. Status code: {code}'.format(
asset_name=asset_name, url=url, code=upload_response.status_code))
def _install_course_textbooks(self):
"""
Add textbooks to the course, if any are configured.
"""
url = STUDIO_BASE_URL + '/textbooks/' + self._course_key
for book in self._textbooks:
payload = json.dumps(book)
response = self.session.post(url, headers=self.headers, data=payload)
if not response.ok:
raise FixtureError(
u"Could not add book to course: {0} with {1}. Status was {2}".format(
book, url, response.status_code))
def _add_advanced_settings(self):
"""
Add advanced settings.
"""
url = STUDIO_BASE_URL + "/settings/advanced/" + self._course_key
# POST advanced settings to Studio
response = self.session.post(
url, data=self._encode_post_dict(self._advanced_settings),
headers=self.headers,
)
if not response.ok:
raise FixtureError(
u"Could not update advanced details to '{0}' with {1}: Status was {2}.".format(
self._advanced_settings, url, response.status_code))
def _create_xblock_children(self, parent_loc, xblock_descriptions):
"""
Recursively create XBlock children.
"""
super(CourseFixture, self)._create_xblock_children(parent_loc, xblock_descriptions)
self._publish_xblock(parent_loc)
|
jolyonb/edx-platform
|
common/test/acceptance/fixtures/course.py
|
Python
|
agpl-3.0
| 15,531
| 0.002447
|
# This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "212"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
|
prudnikov/python-oauth2
|
oauth2/_version.py
|
Python
|
mit
| 438
| 0.004566
|
import mcpi.minecraft as minecraft
import mcpi.block as block
import random
import time
mc = minecraft.Minecraft.create()
#mc.postToChat("Heat Vision!")
pos = mc.player.getTilePos()
#mc.postToChat(pos)
#rot = mc.player.getRotation()
#pitch = mc.player.getPitch()
#direct = mc.player.getDirection()
#mc.postToChat(rot)
#mc.postToChat(pitch)
#mc.postToChat(direct)
# those dont work on Pi
# activate any tnt around
mc.postToChat("Oliver's boom!")
while True:
x,y,z = mc.player.getPos()
for xi in range(-4, 4):
for zi in range (-4, 4):
for yi in range (-1, 3):
thisblock = mc.getBlock(x + xi, y + yi, z + zi)
#print thisblock
if thisblock == 46:
mc.setBlock(x + xi, y + yi, z+zi, 46, 1)
print "setting on"
#mc.setBlock(x + xi, y + 1, z+zi, 46, 1)
time.sleep(1)
|
joadavis/rpi-coding
|
minecraft/oliverboom.py
|
Python
|
mit
| 907
| 0.016538
|
'''
Created on Oct 19, 2016
@author: jaime
'''
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from products import views
urlpatterns = [
url(r'^categories/$', csrf_exempt(views.ProductCategoryView.as_view())),
url(r'^categories/(?P<uid>\w+)/$', csrf_exempt(views.ProductCategoryView.as_view())),
url(r'^$', csrf_exempt(views.ProductView.as_view())),
url(r'^(?P<uid>\w+)/$', csrf_exempt(views.ProductView.as_view())),
]
|
jroeland/teapot
|
project/web/app/products/urls.py
|
Python
|
mit
| 489
| 0.010225
|
# -*- encoding: utf-8 -*-
import argparse
import os
import sys
import yaml
from . import VERSION
import actions
import core
import exception
from utils import log
def error(errtype, msg, code=42):
sys.stderr.write("{t.red}[ERROR] {t.yellow}{er}: {msg}"
"{t.normal}\n".format(er=errtype, msg=msg, t=log.term))
sys.exit(code)
def get_parser():
parser = argparse.ArgumentParser(prog='rdoupdate')
subparsers = parser.add_subparsers(help='available actions')
parser.add_argument('--version', action='version', version=VERSION)
# check
check_parser = subparsers.add_parser(
'check', help="validate update file(s)",
description="validate one or more update files; use -g to select "
"an update file added by last commit to a git repo or "
"use -f to select update files directly (default: -g .)")
check_parser.add_argument(
'-g', '--git', type=str, metavar='DIR',
help="check latest update file added to git repo in DIR directory")
check_parser.add_argument(
'-f', '--files', type=str, metavar='FILE', nargs='+',
help="check all specified FILEs; use - for stdin")
check_parser.add_argument(
'-a', '--available', action='store_true',
help="also check if builds are available for download")
check_parser.set_defaults(action=do_check)
# download
dl_parser = subparsers.add_parser(
'download', help="download builds from update file(s)",
description=("download builds from one or more update files into a "
"directory tree; use -g to select an update file added "
"by last commit to a git repo or use -f to select update "
"files directly; default: -g ."))
dl_parser.add_argument(
'-g', '--git', type=str, metavar='DIR',
help="download builds from latest update file added to git repo in "
"DIR directory")
dl_parser.add_argument(
'-f', '--files', type=str, metavar='FILE', nargs='+',
help="check all specified FILEs; use - for stdin")
dl_parser.add_argument(
'-o', '--outdir', type=str, metavar='DIR',
help="directory to download builds into (default: .)")
dl_parser.add_argument(
'-u', '--per-update', action='store_true',
help="create extra directory for each update")
dl_parser.add_argument(
'-b', '--build-filter', metavar='ATTR:REGEX', action='append',
help="Only download builds with ATTRibute matching python REGEX; can "
"be specified multiple times")
dl_parser.set_defaults(action=do_download)
# move
move_parser = subparsers.add_parser(
'move', help="move an update file (create a commit)",
description="create a commit that moves selected files to a directory")
move_parser.add_argument(
'files', metavar='FILE', type=str, nargs='+',
help='update file(s) to move')
move_parser.add_argument(
'-d', '--dir', type=str, metavar='DIR',
help="move update file(s) to this directory instead of using "
"update.group")
move_parser.set_defaults(action=do_move)
list_parser = subparsers.add_parser(
'list-bsources', help="show available build sources",
description="show available build sources")
list_parser.set_defaults(action=do_list_bsources)
return parser
def _get_update_files(args):
if args.files and args.git:
error("invalid invocation", "-g and -f are exclusive.", 19)
if args.files:
files = args.files
else:
if not args.git:
args.git = '.'
f = actions.get_last_commit_update(args.git)
files = [os.path.join(args.git, f)]
return files
def do_check(args):
files = _get_update_files(args)
good, fails = actions.check_files(*files, available=args.available,
verbose=True)
actions.print_summary(good, fails, 'PASSED', 'FAILED')
if fails:
return 127
def _parse_build_filter(fargs):
bf = []
if not fargs:
return bf
for f in fargs:
try:
attr, rex = f.split(':', 1)
except Exception as ex:
raise exception.InvalidFilter(what=f)
bf.append((attr, rex))
return bf
def do_download(args):
files = _get_update_files(args)
build_filter = _parse_build_filter(args.build_filter)
good, fails = actions.download_updates_builds(
*files, out_dir=args.outdir, per_update=args.per_update,
build_filter=build_filter)
actions.print_summary(good, fails, 'DOWNLOADED', 'FAILED to download')
if fails:
return 128
def do_move(args):
actions.move_files(args.files, args.dir)
def do_list_bsources(args):
actions.list_build_sources()
def run(*cargs):
parser = get_parser()
args = parser.parse_args(cargs)
action = args.action
return action(args)
def main():
cargs = sys.argv[1:]
try:
return run(*cargs)
except IOError as e:
error("file error", "%s: %s" % (e.strerror, e.filename), 2)
except exception.ChdirError as e:
error("file error", e, 3)
except exception.CommandFailed as e:
error("command failed", e.kwargs['cmd'], 5)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
error("invalid YAML", e, 7)
except exception.InvalidUpdateStructure as e:
error("invalid structure", e, 11)
except exception.InvalidUpdateCommit as e:
error("invalid commit", e, 13)
except exception.ParsingError as e:
error("parsing error", e, 17)
except Exception as e:
err = type(e).__name__
ex = str(e)
if ex:
err += ": %s" % ex
error("unexpected error", err, 42)
if __name__ == '__main__':
main()
|
yac/rdoupdate
|
rdoupdate/shell.py
|
Python
|
apache-2.0
| 5,901
| 0
|
from fontTools.misc.py23 import byteord
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
# from itertools import *
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
Silf_hdr_format = '''
>
version: 16.16F
'''
Silf_hdr_format_3 = '''
>
version: 16.16F
compilerVersion: L
numSilf: H
x
x
'''
Silf_part1_format_v3 = '''
>
ruleVersion: 16.16F
passOffset: H
pseudosOffset: H
'''
Silf_part1_format = '''
>
maxGlyphID: H
extraAscent: h
extraDescent: h
numPasses: B
iSubst: B
iPos: B
iJust: B
iBidi: B
flags: B
maxPreContext: B
maxPostContext: B
attrPseudo: B
attrBreakWeight: B
attrDirectionality: B
attrMirroring: B
attrSkipPasses: B
numJLevels: B
'''
Silf_justify_format = '''
>
attrStretch: B
attrShrink: B
attrStep: B
attrWeight: B
runto: B
x
x
x
'''
Silf_part2_format = '''
>
numLigComp: H
numUserDefn: B
maxCompPerLig: B
direction: B
attCollisions: B
x
x
x
numCritFeatures: B
'''
Silf_pseudomap_format = '''
>
unicode: L
nPseudo: H
'''
Silf_pseudomap_format_h = '''
>
unicode: H
nPseudo: H
'''
Silf_classmap_format = '''
>
numClass: H
numLinear: H
'''
Silf_lookupclass_format = '''
>
numIDs: H
searchRange: H
entrySelector: H
rangeShift: H
'''
Silf_lookuppair_format = '''
>
glyphId: H
index: H
'''
Silf_pass_format = '''
>
flags: B
maxRuleLoop: B
maxRuleContext: B
maxBackup: B
numRules: H
fsmOffset: H
pcCode: L
rcCode: L
aCode: L
oDebug: L
numRows: H
numTransitional: H
numSuccess: H
numColumns: H
'''
aCode_info = (
("NOP", 0),
("PUSH_BYTE", "b"),
("PUSH_BYTE_U", "B"),
("PUSH_SHORT", ">h"),
("PUSH_SHORT_U", ">H"),
("PUSH_LONG", ">L"),
("ADD", 0),
("SUB", 0),
("MUL", 0),
("DIV", 0),
("MIN", 0),
("MAX", 0),
("NEG", 0),
("TRUNC8", 0),
("TRUNC16", 0),
("COND", 0),
("AND", 0), # x10
("OR", 0),
("NOT", 0),
("EQUAL", 0),
("NOT_EQ", 0),
("LESS", 0),
("GTR", 0),
("LESS_EQ", 0),
("GTR_EQ", 0),
("NEXT", 0),
("NEXT_N", "b"),
("COPY_NEXT", 0),
("PUT_GLYPH_8BIT_OBS", "B"),
("PUT_SUBS_8BIT_OBS", "bBB"),
("PUT_COPY", "b"),
("INSERT", 0),
("DELETE", 0), # x20
("ASSOC", -1),
("CNTXT_ITEM", "bB"),
("ATTR_SET", "B"),
("ATTR_ADD", "B"),
("ATTR_SUB", "B"),
("ATTR_SET_SLOT", "B"),
("IATTR_SET_SLOT", "BB"),
("PUSH_SLOT_ATTR", "Bb"),
("PUSH_GLYPH_ATTR_OBS", "Bb"),
("PUSH_GLYPH_METRIC", "Bbb"),
("PUSH_FEAT", "Bb"),
("PUSH_ATT_TO_GATTR_OBS", "Bb"),
("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
("PUSH_ISLOT_ATTR", "Bbb"),
("PUSH_IGLYPH_ATTR", "Bbb"),
("POP_RET", 0), # x30
("RET_ZERO", 0),
("RET_TRUE", 0),
("IATTR_SET", "BB"),
("IATTR_ADD", "BB"),
("IATTR_SUB", "BB"),
("PUSH_PROC_STATE", "B"),
("PUSH_VERSION", 0),
("PUT_SUBS", ">bHH"),
("PUT_SUBS2", 0),
("PUT_SUBS3", 0),
("PUT_GLYPH", ">H"),
("PUSH_GLYPH_ATTR", ">Hb"),
("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
("BITOR", 0),
("BITAND", 0),
("BITNOT", 0), # x40
("BITSET", ">HH"),
("SET_FEAT", "Bb")
)
aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)])
def disassemble(aCode):
codelen = len(aCode)
pc = 0
res = []
while pc < codelen:
opcode = byteord(aCode[pc:pc+1])
if opcode > len(aCode_info):
instr = aCode_info[0]
else:
instr = aCode_info[opcode]
pc += 1
if instr[1] != 0 and pc >= codelen : return res
if instr[1] == -1:
count = byteord(aCode[pc])
fmt = "%dB" % count
pc += 1
elif instr[1] == 0:
fmt = ""
else :
fmt = instr[1]
if fmt == "":
res.append(instr[0])
continue
parms = struct.unpack_from(fmt, aCode[pc:])
res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")")
pc += struct.calcsize(fmt)
return res
instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
def assemble(instrs):
res = b""
for inst in instrs:
m = instre.match(inst)
if not m or not m.group(1) in aCode_map:
continue
opcode, parmfmt = aCode_map[m.group(1)]
res += struct.pack("B", opcode)
if m.group(2):
if parmfmt == 0:
continue
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
if parmfmt == -1:
l = len(parms)
res += struct.pack(("%dB" % (l+1)), l, *parms)
else:
res += struct.pack(parmfmt, *parms)
return res
def writecode(tag, writer, instrs):
writer.begintag(tag)
writer.newline()
for l in disassemble(instrs):
writer.write(l)
writer.newline()
writer.endtag(tag)
writer.newline()
def readcode(content):
res = []
for e in content_string(content).split('\n'):
e = e.strip()
if not len(e): continue
res.append(e)
return assemble(res)
attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID',
'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID')
attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi')
attrs_contexts = ('maxPreContext', 'maxPostContext')
attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality',
'attrMirroring', 'attrSkipPasses', 'attCollisions')
pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup',
'minRulePreContext', 'maxRulePreContext', 'collisionThreshold')
pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns')
def writesimple(tag, self, writer, *attrkeys):
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
writer.simpletag(tag, **attrs)
writer.newline()
def getSimple(self, attrs, *attr_list):
for k in attr_list:
if k in attrs:
setattr(self, k, int(safeEval(attrs[k])))
def content_string(contents):
res = ""
for element in contents:
if isinstance(element, tuple): continue
res += element
return res.strip()
def wrapline(writer, dat, length=80):
currline = ""
for d in dat:
if len(currline) > length:
writer.write(currline[:-1])
writer.newline()
currline = ""
currline += d + " "
if len(currline):
writer.write(currline[:-1])
writer.newline()
class _Object() :
pass
class table_S__i_l_f(DefaultTable.DefaultTable):
'''Silf table support'''
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.silfs = []
def decompile(self, data, ttFont):
sstruct.unpack2(Silf_hdr_format, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
if self.version >= 5.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
elif self.version < 3.0:
self.numSilf = struct.unpack('>H', data[4:6])
self.scheme = 0
self.compilerVersion = 0
base = 8
else:
self.scheme = 0
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:])
for offset in silfoffsets:
s = Silf()
self.silfs.append(s)
s.decompile(data[offset:], ttFont, self.version)
def compile(self, ttFont):
self.numSilf = len(self.silfs)
if self.version < 3.0:
hdr = sstruct.pack(Silf_hdr_format, self)
hdr += struct.pack(">HH", self.numSilf, 0)
else:
hdr = sstruct.pack(Silf_hdr_format_3, self)
offset = len(hdr) + 4 * self.numSilf
data = b""
for s in self.silfs:
hdr += struct.pack(">L", offset)
subdata = s.compile(ttFont, self.version)
offset += len(subdata)
data += subdata
if self.version >= 5.0:
return grUtils.compress(self.scheme, hdr+data)
return hdr+data
def toXML(self, writer, ttFont):
writer.comment('Attributes starting with _ are informative only')
writer.newline()
writer.simpletag('version', version=self.version,
compilerVersion=self.compilerVersion, compressionScheme=self.scheme)
writer.newline()
for s in self.silfs:
writer.begintag('silf')
writer.newline()
s.toXML(writer, ttFont, self.version)
writer.endtag('silf')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version':
self.scheme=int(safeEval(attrs['compressionScheme']))
self.version = float(safeEval(attrs['version']))
self.compilerVersion = int(safeEval(attrs['compilerVersion']))
return
if name == 'silf':
s = Silf()
self.silfs.append(s)
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
s.fromXML(tag, attrs, subcontent, ttFont, self.version)
class Silf(object):
'''A particular Silf subtable'''
def __init__(self):
self.passes = []
self.scriptTags = []
self.critFeatures = []
self.jLevels = []
self.pMap = {}
def decompile(self, data, ttFont, version=2.0):
if version >= 3.0 :
_, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
self.ruleVersion = float(floatToFixedToStr(self.ruleVersion, precisionBits=16))
_, data = sstruct.unpack2(Silf_part1_format, data, self)
for jlevel in range(self.numJLevels):
j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
self.jLevels.append(j)
_, data = sstruct.unpack2(Silf_part2_format, data, self)
if self.numCritFeatures:
self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data)
data = data[self.numCritFeatures * 2 + 1:]
(numScriptTag,) = struct.unpack_from('B', data)
if numScriptTag:
self.scriptTags = [struct.unpack("4s", data[x:x+4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4)]
data = data[1 + 4 * numScriptTag:]
(self.lbGID,) = struct.unpack('>H', data[:2])
if self.numPasses:
self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses])
data = data[6 + 4 * self.numPasses:]
(numPseudo,) = struct.unpack(">H", data[:2])
for i in range(numPseudo):
if version >= 3.0:
pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
else:
pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
data = data[8 + 6 * numPseudo:]
currpos = (sstruct.calcsize(Silf_part1_format)
+ sstruct.calcsize(Silf_justify_format) * self.numJLevels
+ sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
+ 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo)
if version >= 3.0:
currpos += sstruct.calcsize(Silf_part1_format_v3)
self.classes = Classes()
self.classes.decompile(data, ttFont, version)
for i in range(self.numPasses):
p = Pass()
self.passes.append(p)
p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
ttFont, version)
def compile(self, ttFont, version=2.0):
self.numPasses = len(self.passes)
self.numJLevels = len(self.jLevels)
self.numCritFeatures = len(self.critFeatures)
numPseudo = len(self.pMap)
data = b""
if version >= 3.0:
hdroffset = sstruct.calcsize(Silf_part1_format_v3)
else:
hdroffset = 0
data += sstruct.pack(Silf_part1_format, self)
for j in self.jLevels:
data += sstruct.pack(Silf_justify_format, j)
data += sstruct.pack(Silf_part2_format, self)
if self.numCritFeatures:
data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures)
data += struct.pack("BB", 0, len(self.scriptTags))
if len(self.scriptTags):
tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags]
data += b"".join(tdata)
data += struct.pack(">H", self.lbGID)
self.passOffset = len(data)
data1 = grUtils.bininfo(numPseudo, 6)
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
self.pseudosOffset = currpos + len(data1)
for u, p in sorted(self.pMap.items()):
data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
u, ttFont.getGlyphID(p))
data1 += self.classes.compile(ttFont, version)
currpos += len(data1)
data2 = b""
datao = b""
for i, p in enumerate(self.passes):
base = currpos + len(data2)
datao += struct.pack(">L", base)
data2 += p.compile(ttFont, base, version)
datao += struct.pack(">L", currpos + len(data2))
if version >= 3.0:
data3 = sstruct.pack(Silf_part1_format_v3, self)
else:
data3 = b""
return data3 + data + datao + data1 + data2
def toXML(self, writer, ttFont, version=2.0):
if version >= 3.0:
writer.simpletag('version', ruleVersion=self.ruleVersion)
writer.newline()
writesimple('info', self, writer, *attrs_info)
writesimple('passindexes', self, writer, *attrs_passindexes)
writesimple('contexts', self, writer, *attrs_contexts)
writesimple('attributes', self, writer, *attrs_attributes)
if len(self.jLevels):
writer.begintag('justifications')
writer.newline()
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
for i, j in enumerate(self.jLevels):
attrs = dict([(k, getattr(j, k)) for k in jnames])
writer.simpletag('justify', **attrs)
writer.newline()
writer.endtag('justifications')
writer.newline()
if len(self.critFeatures):
writer.begintag('critFeatures')
writer.newline()
writer.write(" ".join(map(str, self.critFeatures)))
writer.newline()
writer.endtag('critFeatures')
writer.newline()
if len(self.scriptTags):
writer.begintag('scriptTags')
writer.newline()
writer.write(" ".join(self.scriptTags))
writer.newline()
writer.endtag('scriptTags')
writer.newline()
if self.pMap:
writer.begintag('pseudoMap')
writer.newline()
for k, v in sorted(self.pMap.items()):
writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
writer.newline()
writer.endtag('pseudoMap')
writer.newline()
self.classes.toXML(writer, ttFont, version)
if len(self.passes):
writer.begintag('passes')
writer.newline()
for i, p in enumerate(self.passes):
writer.begintag('pass', _index=i)
writer.newline()
p.toXML(writer, ttFont, version)
writer.endtag('pass')
writer.newline()
writer.endtag('passes')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'version':
self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
if name == 'info':
getSimple(self, attrs, *attrs_info)
elif name == 'passindexes':
getSimple(self, attrs, *attrs_passindexes)
elif name == 'contexts':
getSimple(self, attrs, *attrs_contexts)
elif name == 'attributes':
getSimple(self, attrs, *attrs_attributes)
elif name == 'justifications':
for element in content:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
if tag == 'justify':
j = _Object()
for k, v in attrs.items():
setattr(j, k, int(v))
self.jLevels.append(j)
elif name == 'critFeatures':
self.critFeatures = []
element = content_string(content)
self.critFeatures.extend(map(int, element.split()))
elif name == 'scriptTags':
self.scriptTags = []
element = content_string(content)
for n in element.split():
self.scriptTags.append(n)
elif name == 'pseudoMap':
self.pMap = {}
for element in content:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
if tag == 'pseudo':
k = int(attrs['unicode'], 16)
v = attrs['pseudo']
self.pMap[k] = v
elif name == 'classes':
self.classes = Classes()
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
elif name == 'passes':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'pass':
p = Pass()
for e in subcontent:
if not isinstance(e, tuple): continue
p.fromXML(e[0], e[1], e[2], ttFont, version)
self.passes.append(p)
class Classes(object):
def __init__(self):
self.linear = []
self.nonLinear = []
def decompile(self, data, ttFont, version=2.0):
sstruct.unpack2(Silf_classmap_format, data, self)
if version >= 4.0 :
oClasses = struct.unpack((">%dL" % (self.numClass+1)),
data[4:8+4*self.numClass])
else:
oClasses = struct.unpack((">%dH" % (self.numClass+1)),
data[4:6+2*self.numClass])
for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
self.linear.append(ttFont.getGlyphName(x) for x in
struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
for s,e in zip(oClasses[self.numLinear:self.numClass],
oClasses[self.numLinear+1:self.numClass+1]):
nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)]
nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
self.nonLinear.append(nonLin)
def compile(self, ttFont, version=2.0):
data = b""
oClasses = []
if version >= 4.0:
offset = 8 + 4 * (len(self.linear) + len(self.nonLinear))
else:
offset = 6 + 2 * (len(self.linear) + len(self.nonLinear))
for l in self.linear:
oClasses.append(len(data) + offset)
gs = [ttFont.getGlyphID(x) for x in l]
data += struct.pack((">%dH" % len(l)), *gs)
for l in self.nonLinear:
oClasses.append(len(data) + offset)
gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()]
data += grUtils.bininfo(len(gs))
data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)])
oClasses.append(len(data) + offset)
self.numClass = len(oClasses) - 1
self.numLinear = len(self.linear)
return sstruct.pack(Silf_classmap_format, self) + \
struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)),
*oClasses) + data
def toXML(self, writer, ttFont, version=2.0):
writer.begintag('classes')
writer.newline()
writer.begintag('linearClasses')
writer.newline()
for i,l in enumerate(self.linear):
writer.begintag('linear', _index=i)
writer.newline()
wrapline(writer, l)
writer.endtag('linear')
writer.newline()
writer.endtag('linearClasses')
writer.newline()
writer.begintag('nonLinearClasses')
writer.newline()
for i, l in enumerate(self.nonLinear):
writer.begintag('nonLinear', _index=i + self.numLinear)
writer.newline()
for inp, ind in l.items():
writer.simpletag('map', glyph=inp, index=ind)
writer.newline()
writer.endtag('nonLinear')
writer.newline()
writer.endtag('nonLinearClasses')
writer.newline()
writer.endtag('classes')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'linearClasses':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'linear':
l = content_string(subcontent).split()
self.linear.append(l)
elif name == 'nonLinearClasses':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag =='nonLinear':
l = {}
for e in subcontent:
if not isinstance(e, tuple): continue
tag, attrs, subsubcontent = e
if tag == 'map':
l[attrs['glyph']] = int(safeEval(attrs['index']))
self.nonLinear.append(l)
class Pass(object):
def __init__(self):
self.colMap = {}
self.rules = []
self.rulePreContexts = []
self.ruleSortKeys = []
self.ruleConstraints = []
self.passConstraints = b""
self.actions = []
self.stateTrans = []
self.startStates = []
def decompile(self, data, ttFont, version=2.0):
_, data = sstruct.unpack2(Silf_pass_format, data, self)
(numRange, _, _, _) = struct.unpack(">4H", data[:8])
data = data[8:]
for i in range(numRange):
(first, last, col) = struct.unpack(">3H", data[6*i:6*i+6])
for g in range(first, last+1):
self.colMap[ttFont.getGlyphName(g)] = col
data = data[6*numRange:]
oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
data = data[2+2*self.numSuccess:]
rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])]
data = data[2*oRuleMap[-1]:]
(self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2])
numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
self.startStates = struct.unpack((">%dH" % numStartStates),
data[2:2 + numStartStates * 2])
data = data[2+numStartStates*2:]
self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules])
data = data[2*self.numRules:]
self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules])
data = data[self.numRules:]
(self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)),
data[3:5 + self.numRules * 2]))
data = data[5 + self.numRules * 2:]
oActions = list(struct.unpack((">%dH" % (self.numRules + 1)),
data[:2 + self.numRules * 2]))
data = data[2 * self.numRules + 2:]
for i in range(self.numTransitional):
a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2])
if sys.byteorder != "big": a.byteswap()
self.stateTrans.append(a)
data = data[self.numTransitional * self.numColumns * 2 + 1:]
self.passConstraints = data[:pConstraint]
data = data[pConstraint:]
for i in range(len(oConstraints)-2,-1,-1):
if oConstraints[i] == 0 :
oConstraints[i] = oConstraints[i+1]
self.ruleConstraints = [(data[s:e] if (e-s > 1) else b"") for (s,e) in zip(oConstraints, oConstraints[1:])]
data = data[oConstraints[-1]:]
self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])]
data = data[oActions[-1]:]
# not using debug
def compile(self, ttFont, base, version=2.0):
# build it all up backwards
oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [b""], (0, []))[1]
oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [b""], (1, []))[1]
constraintCode = b"\000" + b"".join(self.ruleConstraints)
transes = []
for t in self.stateTrans:
if sys.byteorder != "big": t.byteswap()
transes.append(t.tobytes())
if sys.byteorder != "big": t.byteswap()
if not len(transes):
self.startStates = [0]
oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1]
passRanges = []
gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
for e in grUtils.entries(gidcolmap, sameval = True):
if e[1]:
passRanges.append((e[0], e[0]+e[1]-1, e[2][0]))
self.numRules = len(self.actions)
self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6
+ len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2
+ 2 * len(self.startStates) + 3 * self.numRules + 3
+ 4 * self.numRules + 4)
self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base
self.rcCode = self.pcCode + len(self.passConstraints)
self.aCode = self.rcCode + len(constraintCode)
self.oDebug = 0
# now generate output
data = sstruct.pack(Silf_pass_format, self)
data += grUtils.bininfo(len(passRanges), 6)
data += b"".join(struct.pack(">3H", *p) for p in passRanges)
data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
flatrules = reduce(lambda a,x: a+x, self.rules, [])
data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints)
data += struct.pack((">%dH" % (self.numRules+1)), *oActions)
return data + b"".join(transes) + struct.pack("B", 0) + \
self.passConstraints + constraintCode + b"".join(self.actions)
def toXML(self, writer, ttFont, version=2.0):
writesimple('info', self, writer, *pass_attrs_info)
writesimple('fsminfo', self, writer, *pass_attrs_fsm)
writer.begintag('colmap')
writer.newline()
wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(),
key=lambda x:ttFont.getGlyphID(x[0]))])
writer.endtag('colmap')
writer.newline()
writer.begintag('staterulemap')
writer.newline()
for i, r in enumerate(self.rules):
writer.simpletag('state', number = self.numRows - self.numSuccess + i,
rules = " ".join(map(str, r)))
writer.newline()
writer.endtag('staterulemap')
writer.newline()
writer.begintag('rules')
writer.newline()
for i in range(len(self.actions)):
writer.begintag('rule', index=i, precontext=self.rulePreContexts[i],
sortkey=self.ruleSortKeys[i])
writer.newline()
if len(self.ruleConstraints[i]):
writecode('constraint', writer, self.ruleConstraints[i])
writecode('action', writer, self.actions[i])
writer.endtag('rule')
writer.newline()
writer.endtag('rules')
writer.newline()
if len(self.passConstraints):
writecode('passConstraint', writer, self.passConstraints)
if len(self.stateTrans):
writer.begintag('fsm')
writer.newline()
writer.begintag('starts')
writer.write(" ".join(map(str, self.startStates)))
writer.endtag('starts')
writer.newline()
for i, s in enumerate(self.stateTrans):
writer.begintag('row', _i=i)
# no newlines here
writer.write(" ".join(map(str, s)))
writer.endtag('row')
writer.newline()
writer.endtag('fsm')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'info':
getSimple(self, attrs, *pass_attrs_info)
elif name == 'fsminfo':
getSimple(self, attrs, *pass_attrs_fsm)
elif name == 'colmap':
e = content_string(content)
for w in e.split():
x = w.split('=')
if len(x) != 2 or x[0] == '' or x[1] == '': continue
self.colMap[x[0]] = int(x[1])
elif name == 'staterulemap':
for e in content:
if not isinstance(e, tuple): continue
tag, a, c = e
if tag == 'state':
self.rules.append([int(x) for x in a['rules'].split(" ")])
elif name == 'rules':
for element in content:
if not isinstance(element, tuple): continue
tag, a, c = element
if tag != 'rule': continue
self.rulePreContexts.append(int(a['precontext']))
self.ruleSortKeys.append(int(a['sortkey']))
con = b""
act = b""
for e in c:
if not isinstance(e, tuple): continue
tag, a, subc = e
if tag == 'constraint':
con = readcode(subc)
elif tag == 'action':
act = readcode(subc)
self.actions.append(act)
self.ruleConstraints.append(con)
elif name == 'passConstraint':
self.passConstraints = readcode(content)
elif name == 'fsm':
for element in content:
if not isinstance(element, tuple): continue
tag, a, c = element
if tag == 'row':
s = array('H')
e = content_string(c)
s.extend(map(int, e.split()))
self.stateTrans.append(s)
elif tag == 'starts':
s = []
e = content_string(c)
s.extend(map(int, e.split()))
self.startStates = s
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py
|
Python
|
apache-2.0
| 33,326
| 0.003691
|
# Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
import libvirt
import re
from libvirt import libvirtError
from libvirttestapi.utils import utils
required_params = {'guestname', 'checkpoint_name'}
optional_params = {'flags': None}
def checkpoint_get_xml(params):
logger = params['logger']
guestname = params['guestname']
checkpoint_name = params.get('checkpoint_name', None)
flag = utils.parse_flags(params)
if not utils.version_compare('libvirt-python', 5, 6, 0, logger):
logger.info("Current libvirt-python don't support getXMLDesc().")
return 0
logger.info("Checkpoint name: %s" % checkpoint_name)
logger.info("flag: %s" % flag)
if flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SIZE:
logger.info("Bug 1207659: Don't support this flag.")
return 0
try:
conn = libvirt.open()
dom = conn.lookupByName(guestname)
cp = dom.checkpointLookupByName(checkpoint_name)
cp_xml = cp.getXMLDesc(flag)
except libvirtError as err:
logger.error("API error message: %s" % err.get_error_message())
return 1
checkpoint_xml_path = "/var/lib/libvirt/qemu/checkpoint/%s/%s.xml" % (guestname, checkpoint_name)
cp_fd = open(checkpoint_xml_path, 'r')
checkpoint_xml = cp_fd.read()
checkpoint_xml = re.sub(r'<!--\n.*\n-->\n\n', '', checkpoint_xml, flags=re.S)
if flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_NO_DOMAIN:
cp_xml = cp_xml.replace('</domaincheckpoint>\n', '')
if cp_xml in checkpoint_xml:
logger.info("PASS: check checkpoint xml successful.")
else:
logger.error("FAIL: check checkpoint xml failed.")
return 1
elif flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SIZE:
logger.info("Don't support this flag.")
elif flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SECURE or flag == 0:
if cp_xml == checkpoint_xml:
logger.info("PASS: check checkpoint xml successful.")
else:
logger.error("FAIL: check checkpoint xml failed.")
return 1
return 0
|
libvirt/libvirt-test-API
|
libvirttestapi/repos/checkpoint/checkpoint_get_xml.py
|
Python
|
gpl-2.0
| 2,134
| 0.000937
|
def factorial(count):
return count * factorial(count - 1)
answer = factorial(6)
|
zamonia500/PythonTeacherMythenmetz
|
과외숙제/factorial.py
|
Python
|
gpl-3.0
| 86
| 0
|
import pytest
from iotile.core.exceptions import ArgumentError
from iotile.sg.model import DeviceModel
def test_default_values():
"""Make sure we can get properties with default values."""
model = DeviceModel()
assert model.get('max_nodes') == 32
assert model.get(u'max_nodes') == 32
model.set('max_nodes', 16)
assert model.get('max_nodes') == 16
assert model.get(u'max_nodes') == 16
model.set(u'max_nodes', 17)
assert model.get('max_nodes') == 17
assert model.get(u'max_nodes') == 17
with pytest.raises(ArgumentError):
model.get('unknown_parameter')
with pytest.raises(ArgumentError):
model.set('unknown_parameter', 15)
|
iotile/coretools
|
iotilesensorgraph/test/test_devicemodel.py
|
Python
|
gpl-3.0
| 693
| 0
|
''' custom script for platformio '''
from os.path import join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
#print "post_extra_script running..."
#print env.Dump()
# compiler and linker flags dont work very well in build_flags of platformio.ini - need to set them here
env.Append(
LINKFLAGS = [
"--data-loc", 0x30
],
STCGALCMD="/stcgal.py"
)
|
zerog2k/stc_diyclock
|
post_extra_script.py
|
Python
|
mit
| 392
| 0.015306
|
from attributes import *
from constants import *
# ------------------------------------------------------------------------------
#
class UnitManager (Attributes) :
"""
UnitManager class -- manages a pool
"""
# --------------------------------------------------------------------------
#
def __init__ (self, url=None, scheduler='default', session=None) :
Attributes.__init__ (self)
# --------------------------------------------------------------------------
#
def add_pilot (self, pid) :
"""
add (Compute or Data)-Pilot(s) to the pool
"""
raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_pilots (self, ptype=ANY) :
"""
List IDs of data and/or compute pilots
"""
raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def remove_pilot (self, pid, drain=False) :
"""
Remove pilot(s) (does not cancel the pilot(s), but removes all units
from the pilot(s).
`drain` determines what happens to the units which are managed by the
removed pilot(s). If `True`, the pilot removal is delayed until all
units reach a final state. If `False` (the default), then `RUNNING`
units will be canceled, and `PENDING` units will be re-assinged to the
unit managers for re-scheduling to other pilots.
"""
raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def submit_unit (self, description) :
"""
Instantiate and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_units (self, utype=ANY) :
"""
List IDs of data and/or compute units
"""
raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def get_unit (self, uids) :
"""
Reconnect to and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) :
"""
Wait for given unit(s) to enter given state
"""
raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def cancel_units (self, uids) :
"""
Cancel given unit(s)
"""
raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__)
# ------------------------------------------------------------------------------
#
|
JensTimmerman/radical.pilot
|
docs/architecture/api_draft/unit_manager.py
|
Python
|
mit
| 3,311
| 0.017215
|
import pytz
priorities = ('US/Pacific', 'US/Mountain', 'US/Central', 'US/Eastern',
'Brazil/East', 'UTC')
all_tz = pytz.all_timezones_set.copy()
for priority in priorities:
all_tz.remove(priority)
all_tz = sorted(list(all_tz))
all_tz[:0] = priorities # prepends list to list
# tuples for selection widget
all_tz = tuple((tz, tz) for tz in all_tz)
|
mixmastamyk/flask-skeleton
|
src/timezones.py
|
Python
|
unlicense
| 371
| 0.002703
|
# pylint: disable=missing-docstring
from datetime import datetime, timedelta
import factory
import pytz
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
from oauth2_provider.models import AccessToken, Application, RefreshToken
from openedx.core.djangoapps.oauth_dispatch.models import ApplicationAccess
from common.djangoapps.student.tests.factories import UserFactory
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
user = factory.SubFactory(UserFactory)
client_id = factory.Sequence('client_{}'.format)
client_secret = 'some_secret'
client_type = 'confidential'
authorization_grant_type = Application.CLIENT_CONFIDENTIAL
name = FuzzyText(prefix='name', length=8)
class ApplicationAccessFactory(DjangoModelFactory):
class Meta:
model = ApplicationAccess
application = factory.SubFactory(ApplicationFactory)
scopes = ['grades:read']
class AccessTokenFactory(DjangoModelFactory):
class Meta:
model = AccessToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
expires = datetime.now(pytz.UTC) + timedelta(days=1)
class RefreshTokenFactory(DjangoModelFactory):
class Meta:
model = RefreshToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
|
eduNEXT/edx-platform
|
openedx/core/djangoapps/oauth_dispatch/tests/factories.py
|
Python
|
agpl-3.0
| 1,383
| 0
|
{
"name": "Delivery Sequence",
"vesion": "12.0.1.0.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "LGPL-3",
"category": "Custom",
"website": "https://yelizariev.github.io",
"depends": ["delivery"],
"data": ["views.xml"],
"installable": False,
}
|
yelizariev/addons-yelizariev
|
delivery_sequence/__manifest__.py
|
Python
|
lgpl-3.0
| 295
| 0
|
'''
Nibblegen: A script to convert LaTex text to html usable in Nibbleblog Forked from the latex2wp project (the licenceing for which is below).
Copyright (C) 2014 Theodore Jones
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
"""
Copyright 2009 Luca Trevisan
Additional contributors: Radu Grigore
LaTeX2WP version 0.6.2
This file is part of LaTeX2WP, a program that converts
a LaTeX document into a format that is ready to be
copied and pasted into WordPress.
You are free to redistribute and/or modify LaTeX2WP under the
terms of the GNU General Public License (GPL), version 3
or (at your option) any later version.
I hope you will find LaTeX2WP useful, but be advised that
it comes WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GPL for more details.
You should have received a copy of the GNU General Public
License along with LaTeX2WP. If you can't find it,
see <http://www.gnu.org/licenses/>.
"""
import re
from sys import argv
from latex2wpstyle import *
# prepare variables computed from the info in latex2wpstyle
count = dict()
for thm in ThmEnvs:
count[T[thm]] = 0
count["section"] = count["subsection"] = count["equation"] = 0
ref={}
endlatex = "&fg="+textcolor
if HTML : endproof = ""
inthm = ""
"""
At the beginning, the commands \$, \% and \& are temporarily
replaced by placeholders (the second entry in each 4-tuple).
At the end, The placeholders in text mode are replaced by
the third entry, and the placeholders in math mode are
replaced by the fourth entry.
"""
esc = [["\\$","_dollar_","$","\\$"],
["\\%","_percent_","%","\\%"],
["\\&","_amp_","&","\\&"],
[">","_greater_",">",">"],
["<","_lesser_","<","<"]]
M = M + [ ["\\more","<!--more-->"],
["\\newblock","\\\\"],
["\\sloppy",""],
["\\S","§"]]
Mnomath =[["\\\\","<br/>\n"],
["\\ "," "],
["\\`a","à"],
["\\'a","á"],
["\\\"a","ä"],
["\\aa ","å"],
["{\\aa}","å"],
["\\`e","è"],
["\\'e","é"],
["\\\"e","ë"],
["\\`i","ì"],
["\\'i","í"],
["\\\"i","ï"],
["\\`o","ò"],
["\\'o","ó"],
["\\\"o","ö"],
["\\`o","ò"],
["\\'o","ó"],
["\\\"o","ö"],
["\\H o","ö"],
["\\`u","ù"],
["\\'u","ú"],
["\\\"u","ü"],
["\\`u","ù"],
["\\'u","ú"],
["\\\"u","ü"],
["\\v{C}","Č"]]
cb = re.compile("\\{|}")
def extractbody(m) :
begin = re.compile("\\\\begin\s*")
m= begin.sub("\\\\begin",m)
end = re.compile("\\\\end\s*")
m = end.sub("\\\\end",m)
beginenddoc = re.compile("\\\\begin\\{document}"
"|\\\\end\\{document}")
parse = beginenddoc.split(m)
if len(parse)== 1 :
m = parse[0]
else :
m = parse[1]
"""
removes comments, replaces double returns with <p> and
other returns and multiple spaces by a single space.
"""
for e in esc :
m = m.replace(e[0],e[1])
comments = re.compile("%.*?\n")
m=comments.sub(" ",m)
multiplereturns = re.compile("\n\n+")
m= multiplereturns.sub ("<p>",m)
spaces=re.compile("(\n|[ ])+")
m=spaces.sub(" ",m)
"""
removes text between \iffalse ... \fi and
between \iftex ... \fi keeps text between
\ifblog ... \fi
"""
ifcommands = re.compile("\\\\iffalse|\\\\ifblog|\\\\iftex|\\\\fi")
L=ifcommands.split(m)
I=ifcommands.findall(m)
m= L[0]
for i in range(1,(len(L)+1)/2) :
if (I[2*i-2]=="\\ifblog") :
m=m+L[2*i-1]
m=m+L[2*i]
"""
changes $$ ... $$ into \[ ... \] and reformats
eqnarray* environments as regular array environments
"""
doubledollar = re.compile("\\$\\$")
L=doubledollar.split(m)
m=L[0]
for i in range(1,(len(L)+1)/2) :
m = m+ "\\[" + L[2*i-1] + "\\]" + L[2*i]
m=m.replace("\\begin{eqnarray*}","\\[ \\begin{array}{rcl} ")
m=m.replace("\\end{eqnarray*}","\\end{array} \\]")
return m
def convertsqb(m) :
r = re.compile("\\\\item\\s*\\[.*?\\]")
Litems = r.findall(m)
Lrest = r.split(m)
m = Lrest[0]
for i in range(0,len(Litems)) :
s= Litems[i]
s=s.replace("\\item","\\nitem")
s=s.replace("[","{")
s=s.replace("]","}")
m=m+s+Lrest[i+1]
r = re.compile("\\\\begin\\s*\\{\\w+}\\s*\\[.*?\\]")
Lthms = r.findall(m)
Lrest = r.split(m)
m = Lrest[0]
for i in range(0,len(Lthms)) :
s= Lthms[i]
s=s.replace("\\begin","\\nbegin")
s=s.replace("[","{")
s=s.replace("]","}")
m=m+s+Lrest[i+1]
return m
def converttables(m) :
retable = re.compile("\\\\begin\s*\\{tabular}.*?\\\\end\s*\\{tabular}"
"|\\\\begin\s*\\{btabular}.*?\\\\end\s*\\{btabular}")
tables = retable.findall(m)
rest = retable.split(m)
m = rest[0]
for i in range(len(tables)) :
if tables[i].find("{btabular}") != -1 :
m = m + convertonetable(tables[i],True)
else :
m = m + convertonetable(tables[i],False)
m = m + rest[i+1]
return m
def convertmacros(m) :
comm = re.compile("\\\\[a-zA-Z]*")
commands = comm.findall(m)
rest = comm.split(m)
r= rest[0]
for i in range( len (commands) ) :
for s1,s2 in M :
if s1==commands[i] :
commands[i] = s2
r=r+commands[i]+rest[i+1]
return(r)
def convertonetable(m,border) :
tokens = re.compile("\\\\begin\\{tabular}\s*\\{.*?}"
"|\\\\end\\{tabular}"
"|\\\\begin\\{btabular}\s*\\{.*?}"
"|\\\\end\\{btabular}"
"|&|\\\\\\\\")
align = { "c" : "center", "l" : "left" , "r" : "right" }
T = tokens.findall(m)
C = tokens.split(m)
L = cb.split(T[0])
format = L[3]
columns = len(format)
if border :
m = "<table border=\"1\" align=center>"
else :
m="<table align = center><tr>"
p=1
i=0
while T[p-1] != "\\end{tabular}" and T[p-1] != "\\end{btabular}":
m = m + "<td align="+align[format[i]]+">" + C[p] + "</td>"
p=p+1
i=i+1
if T[p-1]=="\\\\" :
for i in range (p,columns) :
m=m+"<td></td>"
m=m+"</tr><tr>"
i=0
m = m+ "</tr></table>"
return (m)
def separatemath(m) :
mathre = re.compile("\\$.*?\\$"
"|\\\\begin\\{equation}.*?\\\\end\\{equation}"
"|\\\\\\[.*?\\\\\\]")
math = mathre.findall(m)
text = mathre.split(m)
return(math,text)
def processmath( M ) :
R = []
counteq=0
global ref
mathdelim = re.compile("\\$"
"|\\\\begin\\{equation}"
"|\\\\end\\{equation}"
"|\\\\\\[|\\\\\\]")
label = re.compile("\\\\label\\{.*?}")
for m in M :
md = mathdelim.findall(m)
mb = mathdelim.split(m)
"""
In what follows, md[0] contains the initial delimiter,
which is either \begin{equation}, or $, or \[, and
mb[1] contains the actual mathematical equation
"""
if md[0] == "$" :
if HTML :
m=m.replace("$","")
m="$$"+m+""+endlatex+"$$"
else :
m="$$ {"+mb[1]+"}"+endlatex+"$$"
else :
if md[0].find("\\begin") != -1 :
count["equation"] += 1
mb[1] = mb[1] + "\\ \\ \\ \\ \\ ("+str(count["equation"])+")"
if HTML :
m = "<p align=center>$$" + mb[1] +endlatex+"$$" + "</p>\n"
else :
m = "<p align=center>$$ " + mb[1] +endlatex+"$$</p>\n"
if m.find("\\label") != -1 :
mnolab = label.split(m)
mlab = label.findall(m)
"""
Now the mathematical equation, which has already
been formatted for WordPress, is the union of
the strings mnolab[0] and mnolab[1]. The content
of the \label{...} command is in mlab[0]
"""
lab = mlab[0]
lab=cb.split(lab)[1]
lab=lab.replace(":","")
ref[lab]=count["equation"]
m="<a name=\""+lab+"\">"+mnolab[0]+mnolab[1]+"</a>"
R= R + [m]
return R
def convertcolors(m,c) :
if m.find("begin") != -1 :
return("<span style=\"color:#"+colors[c]+";\">")
else :
return("</span>")
def convertitm(m) :
if m.find("begin") != -1 :
return ("\n\n<ul>")
else :
return ("\n</ul>\n\n")
def convertenum(m) :
if m.find("begin") != -1 :
return ("\n\n<ol>")
else :
return ("\n</ol>\n\n")
def convertbeginnamedthm(thname,thm) :
global inthm
count[T[thm]] +=1
inthm = thm
t = beginnamedthm.replace("_ThmType_",thm.capitalize())
t = t.replace("_ThmNumb_",str(count[T[thm]]))
t = t.replace("_ThmName_",thname)
return(t)
def convertbeginthm(thm) :
global inthm
count[T[thm]] +=1
inthm = thm
t = beginthm.replace("_ThmType_",thm.capitalize())
t = t.replace("_ThmNumb_",str(count[T[thm]]))
return(t)
def convertendthm(thm) :
global inthm
inthm = ""
return(endthm)
def convertlab(m) :
global inthm
global ref
m=cb.split(m)[1]
m=m.replace(":","")
if inthm != "" :
ref[m]=count[T[inthm]]
else :
ref[m]=count["section"]
return("<a name=\""+m+"\"></a>")
def convertproof(m) :
if m.find("begin") != -1 :
return(beginproof)
else :
return(endproof)
def convertsection (m) :
L=cb.split(m)
"""
L[0] contains the \\section or \\section* command, and
L[1] contains the section name
"""
if L[0].find("*") == -1 :
t=section
count["section"] += 1
count["subsection"]=0
else :
t=sectionstar
t=t.replace("_SecNumb_",str(count["section"]) )
t=t.replace("_SecName_",L[1])
return(t)
def convertsubsection (m) :
L=cb.split(m)
if L[0].find("*") == -1 :
t=subsection
else :
t=subsectionstar
count["subsection"] += 1
t=t.replace("_SecNumb_",str(count["section"]) )
t=t.replace("_SubSecNumb_",str(count["subsection"]) )
t=t.replace("_SecName_",L[1])
return(t)
def converturl (m) :
L = cb.split(m)
return ("<a href=\""+L[1]+"\">"+L[3]+"</a>")
def converturlnosnap (m) :
L = cb.split(m)
return ("<a class=\"snap_noshots\" href=\""+L[1]+"\">"+L[3]+"</a>")
def convertimage (m) :
L = cb.split (m)
return ("<p align=center><img "+L[1] + " src=\""+L[3]
+"\"></p>")
def convertstrike (m) :
L=cb.split(m)
return("<s>"+L[1]+"</s>")
def processtext ( t ) :
p = re.compile("\\\\begin\\{\\w+}"
"|\\\\nbegin\\{\\w+}\\s*\\{.*?}"
"|\\\\end\\{\\w+}"
"|\\\\item"
"|\\\\nitem\\s*\\{.*?}"
"|\\\\label\\s*\\{.*?}"
"|\\\\section\\s*\\{.*?}"
"|\\\\section\\*\\s*\\{.*?}"
"|\\\\subsection\\s*\\{.*?}"
"|\\\\subsection\\*\\s*\\{.*?}"
"|\\\\href\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\hrefnosnap\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\image\\s*\\{.*?}\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\sout\\s*\\{.*?}")
for s1, s2 in Mnomath :
t=t.replace(s1,s2)
ttext = p.split(t)
tcontrol = p.findall(t)
w = ttext[0]
i=0
while i < len(tcontrol) :
if tcontrol[i].find("{itemize}") != -1 :
w=w+convertitm(tcontrol[i])
elif tcontrol[i].find("{enumerate}") != -1 :
w= w+convertenum(tcontrol[i])
elif tcontrol[i][0:5]=="\\item" :
w=w+"<li>"
elif tcontrol[i][0:6]=="\\nitem" :
lb = tcontrol[i][7:].replace("{","")
lb = lb.replace("}","")
w=w+"<li>"+lb
elif tcontrol[i].find("\\hrefnosnap") != -1 :
w = w+converturlnosnap(tcontrol[i])
elif tcontrol[i].find("\\href") != -1 :
w = w+converturl(tcontrol[i])
elif tcontrol[i].find("{proof}") != -1 :
w = w+convertproof(tcontrol[i])
elif tcontrol[i].find("\\subsection") != -1 :
w = w+convertsubsection(tcontrol[i])
elif tcontrol[i].find("\\section") != -1 :
w = w+convertsection(tcontrol[i])
elif tcontrol[i].find("\\label") != -1 :
w=w+convertlab(tcontrol[i])
elif tcontrol[i].find("\\image") != -1 :
w = w+convertimage(tcontrol[i])
elif tcontrol[i].find("\\sout") != -1 :
w = w+convertstrike(tcontrol[i])
elif tcontrol[i].find("\\begin") !=-1 and tcontrol[i].find("{center}")!= -1 :
w = w+"<p align=center>"
elif tcontrol[i].find("\\end")!= -1 and tcontrol[i].find("{center}") != -1 :
w = w+"</p>"
else :
for clr in colorchoice :
if tcontrol[i].find("{"+clr+"}") != -1:
w=w + convertcolors(tcontrol[i],clr)
for thm in ThmEnvs :
if tcontrol[i]=="\\end{"+thm+"}" :
w=w+convertendthm(thm)
elif tcontrol[i]=="\\begin{"+thm+"}":
w=w+convertbeginthm(thm)
elif tcontrol[i].find("\\nbegin{"+thm+"}") != -1:
L=cb.split(tcontrol[i])
thname=L[3]
w=w+convertbeginnamedthm(thname,thm)
w += ttext[i+1]
i += 1
return processfontstyle(w)
def processfontstyle(w) :
close = dict()
ww = ""
level = i = 0
while i < len(w):
special = False
for k, v in fontstyle.items():
l = len(k)
if w[i:i+l] == k:
level += 1
ww += '<' + v + '>'
close[level] = '</' + v + '>'
i += l
special = True
if not special:
if w[i] == '{':
ww += '{'
level += 1
close[level] = '}'
elif w[i] == '}' and level > 0:
ww += close[level]
level -= 1
else:
ww += w[i]
i += 1
return ww
def convertref(m) :
global ref
p=re.compile("\\\\ref\s*\\{.*?}|\\\\eqref\s*\\{.*?}")
T=p.split(m)
M=p.findall(m)
w = T[0]
for i in range(len(M)) :
t=M[i]
lab=cb.split(t)[1]
lab=lab.replace(":","")
if t.find("\\eqref") != -1 :
w=w+"<a href=\"#"+lab+"\">("+str(ref[lab])+")</a>"
else :
w=w+"<a href=\"#"+lab+"\">"+str(ref[lab])+"</a>"
w=w+T[i+1]
return w
"""
The program makes several passes through the input.
In a first clean-up, all text before \begin{document}
and after \end{document}, if present, is removed,
all double-returns are converted
to <p>, and all remaining returns are converted to
spaces.
The second step implements a few simple macros. The user can
add support for more macros if desired by editing the
convertmacros() procedure.
Then the program separates the mathematical
from the text parts. (It assumes that the document does
not start with a mathematical expression.)
It makes one pass through the text part, translating
environments such as theorem, lemma, proof, enumerate, itemize,
\em, and \bf. Along the way, it keeps counters for the current
section and subsection and for the current numbered theorem-like
environment, as well as a flag that tells whether one is
inside a theorem-like environment or not. Every time a \label{xx}
command is encountered, we give ref[xx] the value of the section
in which the command appears, or the number of the theorem-like
environment in which it appears (if applicable). Each appearence
of \label is replace by an html "name" tag, so that later we can
replace \ref commands by clickable html links.
The next step is to make a pass through the mathematical environments.
Displayed equations are numbered and centered, and when a \label{xx}
command is encountered we give ref[xx] the number of the current
equation.
A final pass replaces \ref{xx} commands by the number in ref[xx],
and a clickable link to the referenced location.
"""
import sys
s = ""
while True:
char = sys.stdin.read(1)
if not char:
break
if char:
s = s + char
"""
extractbody() takes the text between a \begin{document}
and \end{document}, if present, (otherwise it keeps the
whole document), normalizes the spacing, and removes comments
"""
s=extractbody(s)
# formats tables
s=converttables(s)
# reformats optional parameters passed in square brackets
s=convertsqb(s)
#implement simple macros
s=convertmacros(s)
# extracts the math parts, and replaces the with placeholders
# processes math and text separately, then puts the processed
# math equations in place of the placeholders
(math,text) = separatemath(s)
s=text[0]
for i in range(len(math)) :
s=s+"__math"+str(i)+"__"+text[i+1]
s = processtext ( s )
math = processmath ( math )
# converts escape sequences such as \$ to HTML codes
# This must be done after formatting the tables or the '&' in
# the HTML codes will create problems
for e in esc :
s=s.replace(e[1],e[2])
for i in range ( len ( math ) ) :
math[i] = math[i].replace(e[1],e[3])
# puts the math equations back into the text
for i in range(len(math)) :
s=s.replace("__math"+str(i)+"__",math[i])
# translating the \ref{} commands
s=convertref(s)
if HTML :
s="<head><style>body{max-width:55em;}a:link{color:#4444aa;}a:visited{color:#4444aa;}a:hover{background-color:#aaaaFF;}</style></head><body>"+s+"</body></html>"
s = s.replace("<p>","\n<p>\n")
print s
|
theoj2/Nibbletex
|
nibblegen/nibblegen.py
|
Python
|
gpl-3.0
| 19,283
| 0.02733
|
# -*- coding: utf-8 -*-
import re
from ..base.downloader import BaseDownloader
class VeehdCom(BaseDownloader):
__name__ = "VeehdCom"
__type__ = "downloader"
__version__ = "0.29"
__status__ = "testing"
__pattern__ = r"http://veehd\.com/video/\d+_\S+"
__config__ = [
("enabled", "bool", "Activated", True),
("filename_spaces", "bool", "Allow spaces in filename", False),
("replacement_char", "str", "Filename replacement character", "_"),
]
__description__ = """Veehd.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("cat", "cat@pyload")]
def setup(self):
self.multi_dl = True
self.req.can_continue = True
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.log_debug(f"Requesting page: {url}")
self.data = self.load(url)
def file_exists(self):
if not self.data:
self.download_html()
if "<title>Veehd</title>" in self.data:
return False
return True
def get_file_name(self):
if not self.data:
self.download_html()
m = re.search(r"<title.*?>(.+?) on Veehd</title>", self.data)
if m is None:
self.error(self._("Video title not found"))
name = m.group(1)
#: Replace unwanted characters in filename
if self.config.get("filename_spaces"):
pattern = r"[^\w ]+"
else:
pattern = r"[^\w.]+"
return re.sub(pattern, self.config.get("replacement_char"), name) + ".avi"
def get_file_url(self):
"""
Returns the absolute downloadable filepath.
"""
if not self.data:
self.download_html()
m = re.search(
r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/.+?)"',
self.data,
)
if m is None:
self.error(self._("Embedded video url not found"))
return m.group(1)
|
vuolter/pyload
|
src/pyload/plugins/downloaders/VeehdCom.py
|
Python
|
agpl-3.0
| 2,189
| 0.000914
|
import csv
import datetime
import logging
import os
from celery.task import task
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.timezone import now
from libya_elections.constants import REMINDER_CHECKIN, REMINDER_REPORT, \
REMINDER_LAST_REPORT, REMINDER_CLOSE
from polling_reports.models import CenterOpen, PollingReport, StaffPhone
from register.models import Whitelist
from text_messages.utils import get_message
from .models import Batch, Broadcast
from .utils import Line
logger = logging.getLogger(__name__)
def read_messages_from_file(file_path):
"""
Read uploaded bulk SMS file.
Generate tuples: (phone_number, message, from_shortcode).
Delete file afterward.
:param file_path:
:return:
"""
# We don't currently enable customization of the from_shortcode via file upload.
# Just use the default.
from_shortcode = None
with open(file_path, encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader:
if any(row):
line = Line._make(row)
number = int(line.number)
yield number, line.message, from_shortcode
os.remove(file_path)
@task
def upload_bulk_sms_file(batch_id, file_path):
"""
Upload a batch of bulk SMS messages for the given batch. Delete
the temp file after we're done.
Assumes the file is valid (run is_file_valid on it first!)
:param batch_id:
:param _file:
:return: message_for_user
"""
batch = Batch.objects.get(id=batch_id)
batch.add_messages(read_messages_from_file(file_path))
batch.status = Batch.PENDING
batch.save()
# Break out some of the logic for sending polling report reminder messages
# for easier testing
class PollingReportReminderMessage(object):
"""
Capture some of the common logic for polling report reminders.
(Do not instantiate, use the subclasses.)
"""
def __init__(self, message_number, reminder_number):
self.message_number = message_number
self.reminder_number = reminder_number
def get_message_code(self):
raise NotImplementedError
def get_message_text(self):
context = {'message_number': self.message_number,
'reminder_number': self.reminder_number}
return get_message(self.get_message_code()).msg.format(**context)
def get_phone_numbers_to_send_to(self):
"""
Generator that yields (phone_number, message_text, from_shortcode) tuples
for the phone numbers that we need to send this reminder to.
"""
# Get the phone numbers we want to send to, excluding those that have
# already done the thing we want to remind them of
phone_numbers = self.PhoneModel.objects.exclude(phone_number__in=self.to_exclude())\
.values_list('phone_number', flat=True)
message_text = self.get_message_text()
# Set from_number to REPORTS_SHORT_CODE so that recipient can
# simply just respond to this message with their report.
from_shortcode = settings.REPORTS_SHORT_CODE
for phone_number in phone_numbers:
yield phone_number, message_text, from_shortcode
def to_exclude(self):
raise NotImplementedError
class CheckinReminderMessage(PollingReportReminderMessage):
"""
Message telling user to check in (activate phone, roll call)
"""
def __init__(self, message_number, reminder_number):
super(CheckinReminderMessage, self).__init__(message_number, reminder_number)
self.PhoneModel = Whitelist
def get_message_code(self):
return REMINDER_CHECKIN
def to_exclude(self):
"""Return list of phone numbers to exclude"""
midnight = now().replace(hour=0, minute=0, microsecond=0)
return CenterOpen.objects.filter(
creation_date__gte=midnight,
).values_list('phone_number', flat=True)
class PollingDayReportReminderMessage(PollingReportReminderMessage):
"""
Message telling user to send in polling day statistics report
"""
def __init__(self, message_number, reminder_number):
super(PollingDayReportReminderMessage, self).__init__(message_number, reminder_number)
self.PhoneModel = StaffPhone
def get_message_code(self):
return {
4: REMINDER_REPORT,
5: REMINDER_REPORT,
6: REMINDER_LAST_REPORT,
7: REMINDER_CLOSE,
}[self.message_number]
def to_exclude(self):
"""Return list of phone numbers to exclude"""
reporting_period = self.message_number - 3
one_day_ago = now() - datetime.timedelta(hours=24)
return PollingReport.objects.filter(
period_number=reporting_period,
creation_date__gte=one_day_ago,
).values_list('phone_number', flat=True)
@task
def message_reminder_task(message_number, reminder_number, audience, election):
"""
Make a batch to send out a bunch of reminder messages to a given audience,
iffi they haven't sent us the expected report yet.
"""
logger.debug("Start message_reminder_task")
if audience not in ('whitelist', 'registered'):
raise ValueError("Unknown audience type %s - expected whitelist or registered" % audience)
# Batches need to be owned by somebody - pick a non-random superuser
user = get_user_model().objects.filter(is_active=True, is_superuser=True)[0]
batch = Batch.objects.create(
name="Reminder %d for message_number %d" % (reminder_number, message_number),
created_by=user,
priority=Batch.PRIORITY_TIME_CRITICAL)
# create the corresponding broadcast object
broadcast = Broadcast.objects.create(
created_by=batch.created_by,
batch=batch,
audience=Broadcast.STAFF_ONLY,
message=batch.name, # this message is only temporary
)
try:
if audience == 'whitelist':
msg = CheckinReminderMessage(message_number, reminder_number)
else:
msg = PollingDayReportReminderMessage(message_number, reminder_number)
batch.add_messages(msg.get_phone_numbers_to_send_to())
batch.status = Batch.APPROVED
batch.reviewed_by = user
batch.save()
# update the message for the broadcast.
broadcast.message = msg.get_message_text()
broadcast.save()
logger.debug("Batch saved")
except Exception:
logger.exception("Error while creating message reminder batch")
# If anything went wrong, don't leave partial batch lying around in unknown state
batch.delete()
broadcast.delete()
raise
@task
def approve_broadcast(broadcast_id):
"""Creates messages for each individual in the audience and
changes batch status to approved."""
broadcast = Broadcast.objects.get(pk=broadcast_id)
messages = broadcast.get_messages()
batch = broadcast.batch
batch.add_messages(messages)
batch.status = Batch.APPROVED
batch.save()
|
SmartElect/SmartElect
|
bulk_sms/tasks.py
|
Python
|
apache-2.0
| 7,112
| 0.001547
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
import time
import re
class qyxx_ck():
"""采矿许可证"""
need_check_ziduan = ['valid_from',
'validto'
]
def check_valid_from(self, indexstr, ustr):
"""有效期限自"""
ret = None
validdate = indexstr['validdate'].strip()
if validdate and len(validdate):
err, time = public.get_date(validdate, 0)
if err:
ret = err
else:
frm = time
if ustr != frm:
ret = u'不等我的是-%s-' % frm
return ret
def check_validto(self, indexstr, ustr):
"""有效期限至"""
ret = None
validdate = indexstr['validdate'].strip()
if validdate and len(validdate):
err, time = public.get_date(validdate, 1)
if err:
ret = err
else:
frm = time
if ustr != frm:
ret = u'不等我的是-%s-' % frm
return ret
|
mefly2012/platform
|
src/parse/qyxx_ck.py
|
Python
|
apache-2.0
| 1,153
| 0.00272
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Romain Bignon, Laurent Bachelier
#
# This file is part of assnet.
#
# assnet is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# assnet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with assnet. If not, see <http://www.gnu.org/licenses/>.
from urlparse import urlsplit, urlunsplit
from urllib import quote
from paste.url import URL
__all__ = ['compact', 'quote_url', 'quote_path', 'quote_and_decode_url']
UNSAFE_CHARS = {
'?': quote('?'),
'&': quote('&'),
';': quote(';'),
':': quote(':'),
',': quote(','),
'=': quote('='),
' ': quote(' '),
'+': quote('+'),
':': quote(':'),
'$': quote('$'),
'"': quote('"'),
}
def compact(text):
return text.replace('\n', ' ').strip()
def quote_path(path):
"""
Quote a path (see quote_url)
"""
return ''.join([UNSAFE_CHARS.get(c, c) for c in path])
def quote_url(url):
"""
Quote the path part of an URL object and return the full URL as a string.
Special characters in the URL are not considered as the query string or
any other parameters, they should be in their dedicated variables
of the URL class.
"""
purl = urlsplit(url.url)
# do not escape the scheme and netloc
if purl.scheme and purl.netloc:
path = urlunsplit((None, None, purl.path, purl.query, purl.fragment))
basepath = urlunsplit((purl.scheme, purl.netloc, '', None, None))
else:
path = url.url
basepath = ''
return URL(basepath + quote_path(path), vars=url.vars).href
def quote_and_decode_url(url):
"""
Like quote_url but for usage in Mako templates
"""
return quote_url(url).decode('utf-8')
|
laurentb/assnet
|
assnet/filters.py
|
Python
|
agpl-3.0
| 2,171
| 0
|
import radon.complexity
import radon.visitors
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.SourceRange import SourceRange
from coalib.settings.Setting import typed_list
class RadonBear(LocalBear):
def run(self, filename, file,
radon_ranks_info: typed_list(str)=(),
radon_ranks_normal: typed_list(str)=('C', 'D'),
radon_ranks_major: typed_list(str)=('E', 'F')):
"""
Uses radon to compute complexity of a given file.
:param radon_ranks_info: The ranks (given by radon) to
treat as severity INFO.
:param radon_ranks_normal: The ranks (given by radon) to
treat as severity NORMAL.
:param radon_ranks_major: The ranks (given by radon) to
treat as severity MAJOR.
"""
severity_map = {
RESULT_SEVERITY.INFO: radon_ranks_info,
RESULT_SEVERITY.NORMAL: radon_ranks_normal,
RESULT_SEVERITY.MAJOR: radon_ranks_major
}
for visitor in radon.complexity.cc_visit("".join(file)):
rank = radon.complexity.cc_rank(visitor.complexity)
severity = None
for result_severity, rank_list in severity_map.items():
if rank in rank_list:
severity = result_severity
if severity is None:
continue
visitor_range = SourceRange.from_values(
filename, visitor.lineno, visitor.col_offset, visitor.endline)
message = "{} has a cyclomatic complexity of {}".format(
visitor.name, rank)
yield Result(self, message, severity=severity,
affected_code=(visitor_range,))
|
sims1253/coala-bears
|
bears/python/RadonBear.py
|
Python
|
agpl-3.0
| 1,902
| 0.003155
|
"""Place all plugins in this directory."""
|
jscott1989/happening
|
src/plugins/__init__.py
|
Python
|
mit
| 43
| 0
|
import os
import datetime
from utils.util import run_command
__author__ = 'maa'
class MsBuilder:
def __init__(self, msbuild):
if msbuild == None:
self.msbuild = r"C:\Windows\Microsoft.NET\Framework64\v4.0.30319\MSBuild.exe"
else:
self.msbuild = msbuild
def build_with_params(self, csprojPath, targets, properties):
if not os.path.isfile(self.msbuild):
raise Exception('MsBuild.exe not found. path = ' + self.msbuild)
start = datetime.datetime.now()
print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S'))
params = [self.msbuild, csprojPath]
params.append('/t:' + ';'.join(targets))
params.append('/p:' + ';'.join(properties))
return run_command(params)
def build(self, csprojPath, args):
if not os.path.isfile(self.msbuild):
raise Exception('MsBuild.exe not found. path = ' + self.msbuild)
start = datetime.datetime.now()
print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S'))
params = [self.msbuild, csprojPath] + list(args)
return run_command(params)
def get_files_from_project_bin_folder(self, csproj, configuration, do_return_full_paths=False):
name = os.path.dirname(os.path.realpath(csproj))
bin_config_path = os.path.join(name, 'bin', configuration)
files = os.listdir(bin_config_path)
if not do_return_full_paths:
return files
files_full_path = list()
for file in files:
files_full_path.append(os.path.join(bin_config_path, file))
return files_full_path
|
amatkivskiy/baidu
|
baidu/utils/msbuilder.py
|
Python
|
apache-2.0
| 1,654
| 0.002418
|
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_geometry import Point
from mathmaker.lib.core.geometry import Polygon
@pytest.fixture
def p1():
p1 = Polygon([Point('A', 0.5, 0.5),
Point('B', 3, 1),
Point('C', 3.2, 4),
Point('D', 0.8, 3)
])
p1.side[0].label = Value(4, unit='cm')
p1.side[1].label = Value(3, unit='cm')
p1.side[2].label = Value(2, unit='cm')
p1.side[3].label = Value(6.5, unit='cm')
p1.angle[0].label = Value(64, unit="\\textdegree")
p1.angle[1].label = Value(128, unit="\\textdegree")
p1.angle[2].label = Value(32, unit="\\textdegree")
p1.angle[3].label = Value(256, unit="\\textdegree")
p1.angle[0].mark = 'simple'
p1.angle[1].mark = 'simple'
p1.angle[2].mark = 'simple'
p1.angle[3].mark = 'simple'
return p1
def test_p1_into_euk(p1):
"""Check Polygon's generated euk file."""
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'B = point(3, 1)\n'\
'C = point(3.2, 4)\n'\
'D = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.B.C.D)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ B 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ C 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ D 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ B 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ C 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ D 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "B" B 318.7 deg, font("sffamily")\n'\
' "C" C 54.3 deg, font("sffamily")\n'\
' "D" D 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' B, A, D simple\n'\
' C, B, A simple\n'\
' D, C, B simple\n'\
' A, D, C simple\n'\
'end\n'
def test_p1_rename_errors(p1):
"""Check wrong arguments trigger exceptions when renaming."""
with pytest.raises(TypeError):
p1.rename(5678)
with pytest.raises(ValueError):
p1.rename('KJLIZ')
def test_p1_renamed(p1):
"""Check renaming Polygon is OK."""
p1.rename('YOGA')
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'G = point(3, 1)\n'\
'O = point(3.2, 4)\n'\
'Y = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.G.O.Y)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ G 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ O 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ Y 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ G 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ O 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ Y 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "G" G 318.7 deg, font("sffamily")\n'\
' "O" O 54.3 deg, font("sffamily")\n'\
' "Y" Y 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' G, A, Y simple\n'\
' O, G, A simple\n'\
' Y, O, G simple\n'\
' A, Y, O simple\n'\
'end\n'
|
nicolashainaux/mathmaker
|
tests/01_core_objects/test_110_polygons.py
|
Python
|
gpl-3.0
| 4,579
| 0.003494
|
from idl.Annotatable import Annotatable
from idl.IDLSyntaxError import IDLSyntaxError
from idl.Type import Type
class EnumField(Annotatable):
'''
Object that represents a single enumeration field.
'''
def __init__(self, enum, name, value):
Annotatable.__init__(self)
self._enum = enum
self._name = name
self._value = value
@property
def enum(self):
'''
Enumeration type this field is associated with.
'''
return self._enum
@property
def name(self):
'''
Field name.
'''
return self._name
@property
def value(self):
'''
Integer field value.
'''
return self._value
class Enum(Type):
def __init__(self, module, desc):
Type.__init__(self, module, Type.ENUM, desc.name)
self._desc = desc
self._fields = []
for field in self._desc.fields:
if field.value:
# Evaluate value
value = eval(field.value)
# Duplicate value check
for i in self._fields:
if i.value == value:
raise IDLSyntaxError(self.module,
field.line,
'Duplicate explicit field value %d given for field %r in enumeration %r' % (value, field.name, self.pathStr)
)
else:
value = self._generateFieldValue()
newField = EnumField(self, field.name, value)
# Duplicate name check
if self.getField(newField.name):
raise IDLSyntaxError(self.module,
field.line,
'Duplicate field name %r in enumeration %r' % (newField.name, self.pathStr)
)
# Annotations
newField._assignAnnotations(field.annotations)
self._fields.append(newField)
@property
def fields(self):
'''
List of enumeration fields.
'''
return self._fields
def getField(self, name):
'''
Gets a field with a specific name.
@param name: Field name.
@return: EnumField object or None.
'''
for field in self._fields:
if field.name == name:
return field
return None
def _generateFieldValue(self):
# Assign value
value = 0
while True:
taken = False
for field in self._fields:
if field.value == value:
taken = True
value += 1
break
if not taken:
break
return value
|
spiricn/libIDL
|
idl/Enum.py
|
Python
|
mit
| 3,126
| 0.011836
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import signal
exception = {
signal.SIGINT:KeyboardInterrupt
}
class Signal:
def __init__(self, sig):
self.signal = sig
self.oldhandler = signal.getsignal(sig)
self.pending = False
class SignalHandler:
def __init__(self):
self.signals = {}
def signal_handler(self, sig, frame):
signal.signal(sig, signal.SIG_IGN)
self.signals[sig].pending = True
def disable_signal(self, sig):
if sig not in self.signals.keys():
self.signals[sig] = Signal(sig)
signal.signal(sig, self.signal_handler)
def enable_signal(self, sig):
if sig in self.signals.keys():
if self.signals[sig].oldhandler:
oldhandler = self.signals[sig].oldhandler
else:
oldhandler = signal.SIG_DFL
pending = self.signals[sig].pending
del self.signals[sig]
signal.signal(sig, oldhandler)
if pending:
raise exception[sig]
def signal_disabled(self, sig):
return sig in self.signals.keys()
def signal_pending(self, sig):
return self.signal_disabled(sig) and self.signals[sig].pending
|
SolusOS-discontinued/pisi
|
pisi/signalhandler.py
|
Python
|
gpl-2.0
| 1,553
| 0.001932
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
This module provides the ability to read and write AMBER NetCDF trajectories.
The code is heavily based on amber_netcdf_trajectory_tools.py by John Chodera.
"""
##############################################################################
# imports
##############################################################################
from __future__ import print_function, division
# stdlib
import os
import socket
import warnings
from datetime import datetime
from distutils.version import StrictVersion
import numpy as np
from mdtraj import version
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices
__all__ = ['NetCDFTrajectoryFile', 'load_netcdf']
##############################################################################
# classes
##############################################################################
@_FormatRegistry.register_loader('.nc')
@_FormatRegistry.register_loader('.netcdf')
def load_netcdf(filename, top=None, stride=None, atom_indices=None, frame=None):
"""Load an AMBER NetCDF file. Since the NetCDF format doesn't contain
information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
filename of AMBER NetCDF file.
top : {str, Trajectory, Topology}
The NetCDF format does not contain topology information. Pass in either
the path to a pdb file, a trajectory, or a topology to supply this
information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it
requires an extra copy, but will save memory.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
See Also
--------
mdtraj.NetCDFTrajectoryFile : Low level interface to NetCDF files
"""
from mdtraj.core.trajectory import _parse_topology, Trajectory
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
if atom_indices is not None:
topology = topology.subset(atom_indices)
with NetCDFTrajectoryFile(filename) as f:
if frame is not None:
f.seek(frame)
xyz, time, cell_lengths, cell_angles = f.read(n_frames=1, atom_indices=atom_indices)
else:
xyz, time, cell_lengths, cell_angles = f.read(stride=stride, atom_indices=atom_indices)
xyz = in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)
cell_lengths = in_units_of(cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
trajectory = Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
return trajectory
@_FormatRegistry.register_fileobject('.nc')
@_FormatRegistry.register_fileobject('.netcdf')
class NetCDFTrajectoryFile(object):
"""Interface for reading and writing to AMBER NetCDF files. This is a
file-like object, that supports both reading or writing depending
on the `mode` flag. It implements the context manager protocol,
so you can also use it with the python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' and 'w' for
'read' and 'write', respectively.
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber
it and overwrite it.
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
self._closed = True # is the file currently closed?
self._mode = mode # what mode were we opened in
if StrictVersion(import_('scipy.version').short_version) < StrictVersion('0.12.0'):
raise ImportError('MDTraj NetCDF support requires scipy>=0.12.0. '
'You have %s' % import_('scipy.version').short_version)
netcdf = import_('scipy.io').netcdf_file
if mode not in ['r', 'w']:
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
# AMBER uses the NetCDF3 format, with 64 bit encodings, which
# for scipy.io.netcdf_file is "version=2"
self._handle = netcdf(filename, mode=mode, version=2)
self._closed = False
# self._frame_index is the current frame that we're at in the
# file
# self._needs_initialization indicates whether we need to set the
# global properties of the file. This is required before the first
# write operation on a new file
if mode == 'w':
self._frame_index = 0
self._needs_initialization = True
elif mode == 'r':
self._frame_index = 0
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized.')
return self._handle.dimensions['atom']
@property
def n_frames(self):
self._validate_open()
if not self._needs_initialization:
return self._handle.variables['coordinates'].shape[0]
return 0
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a molecular dynamics trajectory in the AMBER NetCDF
format.
Parameters
----------
n_frames : int, optional
If n_frames is not None, the next n_frames of data from the file
will be read. Otherwise, all of the frames in the file will be read.
stride : int, optional
If stride is not None, read only every stride-th frame from disk.
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms.
time : np.ndarray, None
The time corresponding to each frame, in units of picoseconds, or
None if no time information is present in the trajectory.
cell_lengths : np.ndarray, None
The lengths (a,b,c) of the unit cell for each frame, or None if
the information is not present in the file.
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for
each frame, or None if the information is not present in the file.
"""
self._validate_open()
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not allowed.' % self._mode)
if n_frames is None:
n_frames = np.inf
total_n_frames = self.n_frames
frame_slice = slice(self._frame_index, self._frame_index + min(n_frames, total_n_frames), stride)
if self._frame_index >= total_n_frames:
# just return something that'll look like len(xyz) == 0
# this is basically just an alternative to throwing an indexerror
return np.array([]), None, None, None
if atom_indices is None:
# get all of the atoms
atom_slice = slice(None)
else:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice < self.n_atoms):
raise ValueError('As a zero-based index, the entries in '
'atom_indices must all be less than the number of atoms '
'in the trajectory, %d' % self.n_atoms)
if not np.all(atom_slice >= 0):
raise ValueError('The entries in atom_indices must be greater '
'than or equal to zero')
if 'coordinates' in self._handle.variables:
coordinates = self._handle.variables['coordinates'][frame_slice, atom_slice, :]
else:
raise ValueError('No coordinates found in the NetCDF file. The only '
'variables in the file were %s' %
self._handle.variables.keys())
if 'time' in self._handle.variables:
time = self._handle.variables['time'][frame_slice]
else:
warnings.warn('No time information found in the NetCDF file')
time = None
if 'cell_lengths' in self._handle.variables:
cell_lengths = self._handle.variables['cell_lengths'][frame_slice]
else:
cell_lengths = None
if 'cell_angles' in self._handle.variables:
cell_angles = self._handle.variables['cell_angles'][frame_slice]
else:
cell_angles = None
if cell_lengths is None and cell_angles is not None:
warnings.warn('cell_lengths were found, but no cell_angles')
if cell_lengths is not None and cell_angles is None:
warnings.warn('cell_angles were found, but no cell_lengths')
self._frame_index = self._frame_index + min(n_frames, total_n_frames)
# scipy.io.netcdf variables are mem-mapped, and are only backed
# by valid memory while the file handle is open. This is _bad_.
# because we need to support the user opening the file, reading
# the coordinates, and then closing it, and still having the
# coordinates be a valid memory segment.
# https://github.com/rmcgibbo/mdtraj/issues/440
if coordinates is not None and not coordinates.flags['WRITEABLE']:
coordinates = np.array(coordinates, copy=True)
if time is not None and not time.flags['WRITEABLE']:
time = np.array(time, copy=True)
if cell_lengths is not None and not cell_lengths.flags['WRITEABLE']:
cell_lengths = np.array(cell_lengths, copy=True)
if cell_angles is not None and not cell_angles.flags['WRITEABLE']:
cell_angles = np.array(cell_angles, copy=True)
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None, cell_angles=None):
"""Write one or more frames of a molecular dynamics trajectory to disk
in the AMBER NetCDF format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms.
time : np.ndarray, dtype=np.float32, shape=(n_frames), optional
The time index corresponding to each frame, in units of picoseconds.
cell_lengths : np.ndarray, dtype=np.double, shape=(n_frames, 3)
The lengths (a,b,c) of the unit cell for each frame.
cell_angles : np.ndarray, dtype=np.double, shape=(n_frames, 3)
The angles (\alpha, \beta, \gamma) defining the unit cell for
each frame.
Notes
-----
If the input arrays are of dimension deficient by one, for example
if the coordinates array is two dimensional, the time is a single
scalar or cell_lengths and cell_angles are a 1d array of length three,
that is okay. You'll simply be saving a single frame.
"""
self._validate_open()
if self._mode not in ['w', 'ws', 'a', 'as']:
raise IOError('The file was opened in mode=%s. Writing is not allowed.' % self._mode)
coordinates = in_units_of(coordinates, None, 'angstroms')
time = in_units_of(time, None, 'picoseconds')
cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates', length=None,
can_be_none=False, shape=(None, None, 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms = coordinates.shape[0], coordinates.shape[1]
time = ensure_type(time, np.float32, 1, 'time', length=n_frames,
can_be_none=True, warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths', length=n_frames,
can_be_none=True, shape=(n_frames, 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles', length=n_frames,
can_be_none=True, shape=(n_frames, 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
# are we dealing with a periodic system?
if (cell_lengths is None and cell_angles is not None) or (cell_lengths is not None and cell_angles is None):
provided, neglected = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
provided, neglected = neglected, provided
raise ValueError('You provided the variable "%s", but neglected to '
'provide "%s". They either BOTH must be provided, or '
'neither. Having one without the other is meaningless' % (
provided, neglected))
if self._needs_initialization:
self._initialize_headers(
n_atoms=n_atoms,
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None and cell_angles is not None))
self._needs_initialization = False
# this slice object says where we're going to put the data in the
# arrays
frame_slice = slice(self._frame_index, self._frame_index + n_frames)
# deposit the data
try:
self._handle.variables['coordinates'][frame_slice, :, :] = coordinates
if time is not None:
self._handle.variables['time'][frame_slice] = time
if cell_lengths is not None:
self._handle.variables['cell_lengths'][frame_slice, :] = cell_lengths
if cell_angles is not None:
self._handle.variables['cell_angles'][frame_slice, :] = cell_angles
except KeyError as e:
raise ValueError("The file that you're trying to save to doesn't "
"contain the field %s." % str(e))
# check for missing attributes
missing = None
if (time is None and 'time' in self._handle.variables):
missing = 'time'
elif (cell_angles is None and 'cell_angles' in self._handle.variables):
missing = 'cell_angles'
elif (cell_lengths is None and 'cell_lengths' in self._handle.variables):
missing = 'cell_lengths'
if missing is not None:
raise ValueError("The file that you're saving to expects each frame "
"to contain %s information, but you did not supply it."
"I don't allow 'ragged' arrays." % missing)
# update the frame index pointers. this should be done at the
# end so that if anything errors out, we don't actually get here
self._frame_index += n_frames
def flush(self):
"Write all buffered data in the to the disk file."
self._validate_open()
self._handle.sync()
def _initialize_headers(self, set_coordinates, n_atoms, set_time, set_cell):
"""Initialize the NetCDF file according to the AMBER NetCDF Convention,
Version 1.0, revision B.
The convention is defined here: http://ambermd.org/netcdf/nctraj.xhtml
"""
# Set attributes.
setattr(self._handle, 'title', 'CREATED at %s on %s' %
(datetime.now(), socket.gethostname()))
setattr(self._handle, 'application', 'Omnia')
setattr(self._handle, 'program', 'MDTraj')
setattr(self._handle, 'programVersion', version.short_version)
setattr(self._handle, 'Conventions', 'AMBER')
setattr(self._handle, 'ConventionVersion', '1.0')
# set the dimensions
# unlimited number of frames in trajectory
self._handle.createDimension('frame', 0)
# number of spatial coordinates
self._handle.createDimension('spatial', 3)
# number of atoms
self._handle.createDimension('atom', n_atoms)
if set_cell:
# three spatial coordinates for the length of the unit cell
self._handle.createDimension('cell_spatial', 3)
# three spatial coordinates for the angles that define the shape
# of the unit cell
self._handle.createDimension('cell_angular', 3)
# length of the longest string used for a label
self._handle.createDimension('label', 5)
# Define variables to store unit cell data
self._handle.createVariable('cell_spatial', 'c', ('cell_spatial',))
cell_angles = self._handle.createVariable('cell_angular', 'c', ('cell_spatial', 'label'))
cell_lengths = self._handle.createVariable('cell_lengths', 'd', ('frame', 'cell_spatial'))
setattr(cell_lengths, 'units', 'angstrom')
cell_angles = self._handle.createVariable('cell_angles', 'd', ('frame', 'cell_angular'))
setattr(cell_angles, 'units', 'degree')
self._handle.variables['cell_spatial'][0] = 'x'
self._handle.variables['cell_spatial'][1] = 'y'
self._handle.variables['cell_spatial'][2] = 'z'
self._handle.variables['cell_angular'][0] = 'alpha'
self._handle.variables['cell_angular'][1] = 'beta '
self._handle.variables['cell_angular'][2] = 'gamma'
if set_time:
# Define coordinates and snapshot times.
frame_times = self._handle.createVariable('time', 'f', ('frame',))
setattr(frame_times, 'units', 'picosecond')
if set_coordinates:
frame_coordinates = self._handle.createVariable('coordinates', 'f', ('frame', 'atom', 'spatial'))
setattr(frame_coordinates, 'units', 'angstrom')
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
if whence == 0 and offset >= 0:
self._frame_index = offset
elif whence == 1:
self._frame_index = self._frame_index + offset
elif whence == 2 and offset <= 0:
self._frame_index = self.n_frames + offset
else:
raise IOError('Invalid argument')
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return int(self._frame_index)
def close(self):
"""Close the NetCDF file handle"""
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __enter__(self):
# supports the context manager protocol
return self
def __exit__(self, *exc_info):
# supports the context manager protocol
self.close()
def __del__(self):
self.close()
def __len__(self):
if self._closed:
raise ValueError('I/O operation on closed file')
return self.n_frames
|
kyleabeauchamp/mdtraj
|
mdtraj/formats/netcdf.py
|
Python
|
lgpl-2.1
| 21,654
| 0.002448
|
# coding=utf-8
# Copyright 2019 The Google UDA Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transforms used in the Augmentation Policies.
Copied from AutoAugment: https://github.com/tensorflow/models/blob/master/research/autoaugment/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
# pylint:disable=g-multiple-import
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
# pylint:enable=g-multiple-import
import tensorflow as tf
FLAGS = tf.flags.FLAGS
IMAGE_SIZE = 32
# What is the dataset mean and std of the images on the training set
PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted
def get_mean_and_std():
if FLAGS.task_name == "cifar10":
means = [0.49139968, 0.48215841, 0.44653091]
stds = [0.24703223, 0.24348513, 0.26158784]
elif FLAGS.task_name == "svhn":
means = [0.4376821, 0.4437697, 0.47280442]
stds = [0.19803012, 0.20101562, 0.19703614]
else:
assert False
return means, stds
def _width_height_from_img_shape(img_shape):
"""`img_shape` in autoaugment is (height, width)."""
return (img_shape[1], img_shape[0])
def random_flip(x):
"""Flip the input x horizontally with 50% probability."""
if np.random.rand(1)[0] > 0.5:
return np.fliplr(x)
return x
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount:
img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img
def create_cutout_mask(img_height, img_width, num_channels, size):
"""Creates a zero mask used for cutout of shape `img_height` x `img_width`.
Args:
img_height: Height of image cutout mask will be applied to.
img_width: Width of image cutout mask will be applied to.
num_channels: Number of channels in the image.
size: Size of the zeros mask.
Returns:
A mask of shape `img_height` x `img_width` with all ones except for a
square of zeros of shape `size` x `size`. This mask is meant to be
elementwise multiplied with the original image. Additionally returns
the `upper_coord` and `lower_coord` which specify where the cutout mask
will be applied.
"""
assert img_height == img_width
# Sample center where cutout mask will be applied
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
# Determine upper right and lower left corners of patch
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = np.ones((img_height, img_width, num_channels))
zeros = np.zeros((mask_height, mask_width, num_channels))
mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (
zeros)
return mask, upper_coord, lower_coord
def cutout_numpy(img, size=16):
"""Apply cutout with mask of shape `size` x `size` to `img`.
The cutout operation is from the paper https://arxiv.org/abs/1708.04552.
This operation applies a `size`x`size` mask of zeros to a random location
within `img`.
Args:
img: Numpy image that cutout will be applied to.
size: Height/width of the cutout mask that will be
Returns:
A numpy tensor that is the result of applying the cutout mask to `img`.
"""
img_height, img_width, num_channels = (img.shape[0], img.shape[1],
img.shape[2])
assert len(img.shape) == 3
mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size)
return img * mask
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
def pil_wrap(img, use_mean_std):
"""Convert the `img` numpy tensor to a PIL Image."""
if use_mean_std:
MEANS, STDS = get_mean_and_std()
else:
MEANS = [0, 0, 0]
STDS = [1, 1, 1]
img_ori = (img * STDS + MEANS) * 255
return Image.fromarray(
np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA')
def pil_unwrap(pil_img, use_mean_std, img_shape):
"""Converts the PIL img to a numpy array."""
if use_mean_std:
MEANS, STDS = get_mean_and_std()
else:
MEANS = [0, 0, 0]
STDS = [1, 1, 1]
pic_array = np.array(pil_img.getdata()).reshape((img_shape[0], img_shape[1], 4)) / 255.0
i1, i2 = np.where(pic_array[:, :, 3] == 0)
pic_array = (pic_array[:, :, :3] - MEANS) / STDS
pic_array[i1, i2] = [0, 0, 0]
return pic_array
def apply_policy(policy, img, use_mean_std=True):
"""Apply the `policy` to the numpy `img`.
Args:
policy: A list of tuples with the form (name, probability, level) where
`name` is the name of the augmentation operation to apply, `probability`
is the probability of applying the operation and `level` is what strength
the operation to apply.
img: Numpy image that will have `policy` applied to it.
Returns:
The result of applying `policy` to `img`.
"""
img_shape = img.shape
pil_img = pil_wrap(img, use_mean_std)
for xform in policy:
assert len(xform) == 3
name, probability, level = xform
xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(
probability, level, img_shape)
pil_img = xform_fn(pil_img)
return pil_unwrap(pil_img, use_mean_std, img_shape)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def pil_transformer(self, probability, level, img_shape):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level, img_shape)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level, _: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level, _: ImageOps.autocontrast(
pil_img.convert('RGB')).convert('RGBA'))
equalize = TransformT(
'Equalize',
lambda pil_img, level, _: ImageOps.equalize(
pil_img.convert('RGB')).convert('RGBA'))
invert = TransformT(
'Invert',
lambda pil_img, level, _: ImageOps.invert(
pil_img.convert('RGB')).convert('RGBA'))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level, _: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level, _: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level, _):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, 30)
if random.random() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees)
rotate = TransformT('Rotate', _rotate_impl)
def _posterize_impl(pil_img, level, _):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, 4)
return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA')
posterize = TransformT('Posterize', _posterize_impl)
def _shear_x_impl(pil_img, level, img_shape):
"""Applies PIL ShearX to `pil_img`.
The ShearX operation shears the image along the horizontal axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, level, 0, 0, 1, 0))
shear_x = TransformT('ShearX', _shear_x_impl)
def _shear_y_impl(pil_img, level, img_shape):
"""Applies PIL ShearY to `pil_img`.
The ShearY operation shears the image along the vertical axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, 0, 0, level, 1, 0))
shear_y = TransformT('ShearY', _shear_y_impl)
def _translate_x_impl(pil_img, level, img_shape):
"""Applies PIL TranslateX to `pil_img`.
Translate the image in the horizontal direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateX applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, 0, level, 0, 1, 0))
translate_x = TransformT('TranslateX', _translate_x_impl)
def _translate_y_impl(pil_img, level, img_shape):
"""Applies PIL TranslateY to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateY applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, 0, 0, 0, 1, level))
translate_y = TransformT('TranslateY', _translate_y_impl)
def _crop_impl(pil_img, level, img_shape, interpolation=Image.BILINEAR):
"""Applies a crop to `pil_img` with the size depending on the `level`."""
cropped = pil_img.crop((level, level, img_shape[0] - level, img_shape[1] - level))
resized = cropped.resize((img_shape[0], img_shape[1]), interpolation)
return resized
crop_bilinear = TransformT('CropBilinear', _crop_impl)
def _solarize_impl(pil_img, level, _):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, 256)
return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
solarize = TransformT('Solarize', _solarize_impl)
def _cutout_pil_impl(pil_img, level, img_shape):
"""Apply cutout to pil_img at the specified level."""
size = int_parameter(level, 20)
if size <= 0:
return pil_img
img_height, img_width, num_channels = (img_shape[0], img_shape[1], 3)
_, upper_coord, lower_coord = (
create_cutout_mask(img_height, img_width, num_channels, size))
pixels = pil_img.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly
return pil_img
cutout = TransformT('Cutout', _cutout_pil_impl)
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level, _):
v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
color = TransformT('Color', _enhancer_impl(ImageEnhance.Color))
contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))
brightness = TransformT('Brightness', _enhancer_impl(
ImageEnhance.Brightness))
sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))
ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS}
TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()
|
google-research/uda
|
image/randaugment/augmentation_transforms.py
|
Python
|
apache-2.0
| 14,832
| 0.007821
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Description',
'version': '8.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project Description
===================
Adds account_analytic_account description field on project form view
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'project',
],
'data': [
'view/project_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sysadminmatmoz/ingadhoc
|
project_description/__openerp__.py
|
Python
|
agpl-3.0
| 1,639
| 0
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zookeeper, zktestbase, unittest, threading
class DeletionTest(zktestbase.TestBase):
"""Test whether we can delete znodes"""
def test_sync_delete(self):
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
self.assertEqual(self.connected, True)
ret = zookeeper.create(self.handle, "/zk-python-deletetest", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL)
self.assertEqual(ret, "/zk-python-deletetest")
ret = zookeeper.delete(self.handle,"/zk-python-deletetest")
self.assertEqual(ret, zookeeper.OK)
children = zookeeper.get_children(self.handle, "/")
self.assertEqual(False, "zk-python-deletetest" in children)
# test exception
self.assertRaises(zookeeper.NoNodeException,
zookeeper.delete,
self.handle,
"/zk-python-deletetest")
def test_async_delete(self):
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
self.assertEqual(self.connected, True)
ret = zookeeper.create(self.handle, "/zk-python-adeletetest", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL)
self.assertEqual(ret, "/zk-python-adeletetest")
self.cv = threading.Condition()
self.callback_flag = False
self.rc = -1
def callback(handle, rc):
self.cv.acquire()
self.callback_flag = True
self.cv.notify()
self.rc = rc # don't assert this here, as if the assertion fails, the test will block
self.cv.release()
self.cv.acquire()
ret = zookeeper.adelete(self.handle,"/zk-python-adeletetest",-1,callback)
self.assertEqual(ret, zookeeper.OK, "adelete failed")
while not self.callback_flag:
self.cv.wait(15)
self.cv.release()
self.assertEqual(self.callback_flag, True, "adelete timed out")
self.assertEqual(self.rc, zookeeper.OK)
if __name__ == '__main__':
unittest.main()
|
zhushuchen/Ocean
|
组件/zookeeper-3.3.6/src/contrib/zkpython/src/test/delete_test.py
|
Python
|
agpl-3.0
| 2,872
| 0.006964
|
from selenium import webdriver
import logging
logger = logging.getLogger()
driver = webdriver.Firefox()
|
OrangeTux/MafBot
|
mafbot/__init__.py
|
Python
|
gpl-3.0
| 106
| 0
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
from recipe_engine import config_types
class CheckoutApi(recipe_api.RecipeApi):
@property
def default_checkout_root(self):
"""The default location for cached persistent checkouts."""
return self.m.vars.cache_dir.join('work')
def git(self, checkout_root):
"""Run the steps to perform a pure-git checkout without DEPS."""
skia_dir = checkout_root.join('skia')
self.m.git.checkout(
self.m.properties['repository'], dir_path=skia_dir,
ref=self.m.properties['revision'], submodules=False)
if self.m.vars.is_trybot:
self.m.git('fetch', 'origin', self.m.properties['patch_ref'])
self.m.git('checkout', 'FETCH_HEAD')
self.m.git('rebase', self.m.properties['revision'])
return self.m.properties['revision']
def bot_update(self, checkout_root, gclient_cache=None,
checkout_chromium=False, checkout_flutter=False,
extra_gclient_env=None, parent_rev=False,
flutter_android=False):
"""Run the steps to obtain a checkout using bot_update.
Args:
checkout_root: Root directory where the code will be synced.
gclient_cache: Optional, directory of the gclient cache.
checkout_chromium: If True, will check out chromium/src.git in addition
to the primary repo.
checkout_flutter: If True, will checkout flutter in addition to the
primary repo.
extra_gclient_env: Map of extra environment variable names to their values
to supply while running gclient.
parent_rev: If True, checks out the parent of the specified revision,
rather than the revision itself, ie. HEAD^ for normal jobs and HEAD
(no patch) for try jobs.
flutter_android: Indicates that we're checking out flutter for Android.
"""
if not gclient_cache:
gclient_cache = self.m.vars.cache_dir.join('git')
if not extra_gclient_env:
extra_gclient_env = {}
cfg_kwargs = {}
# Use a persistent gclient cache for Swarming.
cfg_kwargs['CACHE_DIR'] = gclient_cache
# Create the checkout path if necessary.
# TODO(borenet): 'makedirs checkout_root'
self.m.file.ensure_directory('makedirs checkout_path', checkout_root)
# Initial cleanup.
gclient_cfg = self.m.gclient.make_config(**cfg_kwargs)
main_repo = self.m.properties['repository']
if checkout_flutter:
main_repo = 'https://github.com/flutter/engine.git'
main_name = self.m.path.basename(main_repo)
if main_name.endswith('.git'):
main_name = main_name[:-len('.git')]
# Special case for flutter because it seems to need a very specific
# directory structure to successfully build.
if checkout_flutter and main_name == 'engine':
main_name = 'src/flutter'
main = gclient_cfg.solutions.add()
main.name = main_name
main.managed = False
main.url = main_repo
main.revision = self.m.properties.get('revision') or 'origin/master'
m = gclient_cfg.got_revision_mapping
m[main_name] = 'got_revision'
patch_root = main_name
patch_repo = main.url
if self.m.properties.get('patch_repo'):
patch_repo = self.m.properties['patch_repo']
patch_root = patch_repo.split('/')[-1]
if patch_root.endswith('.git'):
patch_root = patch_root[:-4]
if checkout_flutter:
# Skia is a DEP of Flutter; the 'revision' property is a Skia revision,
# and any patch should be applied to Skia, not Flutter.
main.revision = 'origin/master'
main.managed = True
m[main_name] = 'got_flutter_revision'
if flutter_android:
gclient_cfg.target_os.add('android')
skia_dep_path = 'src/third_party/skia'
gclient_cfg.repo_path_map['https://skia.googlesource.com/skia'] = (
skia_dep_path, 'HEAD')
gclient_cfg.revisions[skia_dep_path] = self.m.properties['revision']
m[skia_dep_path] = 'got_revision'
patch_root = skia_dep_path
if checkout_chromium:
main.custom_vars['checkout_chromium'] = True
extra_gclient_env['GYP_CHROMIUM_NO_ACTION'] = '0'
# TODO(rmistry): Remove the below block after there is a solution for
# crbug.com/616443
entries_file = checkout_root.join('.gclient_entries')
if self.m.path.exists(entries_file) or self._test_data.enabled:
self.m.file.remove('remove %s' % entries_file,
entries_file)
# Run bot_update.
if not self.m.vars.is_trybot and parent_rev:
main.revision = main.revision + '^'
patch_refs = None
patch_ref = self.m.properties.get('patch_ref')
if patch_ref:
patch_refs = ['%s@%s:%s' % (self.m.properties['patch_repo'],
self.m.properties['revision'],
patch_ref)]
self.m.gclient.c = gclient_cfg
with self.m.context(cwd=checkout_root):
update_step = self.m.bot_update.ensure_checkout(
patch_root=patch_root,
# The logic in ensure_checkout for this arg is fairly naive, so if
# patch=False, we'll see "... (without patch)" in the step names, even
# for non-trybot runs, which is misleading and confusing. Therefore,
# always specify patch=True for non-trybot runs.
patch=not (self.m.vars.is_trybot and parent_rev),
patch_refs=patch_refs,
)
if checkout_chromium or checkout_flutter:
gclient_env = {'DEPOT_TOOLS_UPDATE': '0'}
if extra_gclient_env:
gclient_env.update(extra_gclient_env)
with self.m.context(cwd=checkout_root, env=gclient_env):
self.m.gclient.runhooks()
return update_step.presentation.properties['got_revision']
|
youtube/cobalt
|
third_party/skia/infra/bots/recipe_modules/checkout/api.py
|
Python
|
bsd-3-clause
| 5,928
| 0.006748
|
""" Tablib - DataFrame Support.
"""
import sys
if sys.version_info[0] > 2:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
try:
from pandas import DataFrame
except ImportError:
DataFrame = None
import tablib
from tablib.compat import unicode
title = 'df'
extensions = ('df', )
def detect(stream):
"""Returns True if given stream is a DataFrame."""
if DataFrame is None:
return False
try:
DataFrame(stream)
return True
except ValueError:
return False
def export_set(dset, index=None):
"""Returns DataFrame representation of DataBook."""
if DataFrame is None:
raise NotImplementedError(
'DataFrame Format requires `pandas` to be installed.'
' Try `pip install tablib[pandas]`.')
dataframe = DataFrame(dset.dict, columns=dset.headers)
return dataframe
def import_set(dset, in_stream):
"""Returns dataset from DataFrame."""
dset.wipe()
dset.dict = in_stream.to_dict(orient='records')
|
htwenhe/DJOA
|
env/Lib/site-packages/tablib/formats/_df.py
|
Python
|
mit
| 1,040
| 0.000962
|
import os
from lxml import etree
class device:
def __init__(self,ipadress,name):
self.ipadress=str(ipadress)
self.name=str(name)
self.status="off"
def turn_on(self):
self.status="on"
def turn_off(self):
self.status="off"
def getstatus(devices):
ips=[]
for instance in devices:
instance.turn_off()
test=os.popen("nmap -sP --unprivileged 192.168.2.0/24")
for i in test.readlines():
if i.split(' ')[0]=='Nmap' and i.split(' ')[1]=='scan' :
ips.append(i.split('(')[1][:-2])
for i in xrange(0,len(ips)):
for j in xrange(0,len(devices)):
if ips[i]== devices[j].ipadress:
devices[j].turn_on()
return devices
def writexmlrow(device,container,number):
if (number==1):
col=etree.SubElement(container,'div',{'class':'col-lg-2 col-lg-offset-1 col-md-2 col-md-offset-1 placeholder'})
else:
col=etree.SubElement(container,'div',{'class':'col-lg-2 col-md-2 placeholder'})
if (device.status=='on'):
image1=etree.SubElement(col,'img',{'src':'./images/green.png','width':'200','height':'200','class':'img-responsive','align':'center'})
else:
image1=etree.SubElement(col,'img',{'src':'./images/gray.png','width':'200','height':'200','class':'img-responsive','align':'center'})
label1=etree.SubElement(col,'h4',{'align':'center'})
label1.text=device.name
return
def writexmlpart(devices):
container=etree.Element('div',{'class':'row placeholder'})
i=1
for instance in devices:
writexmlrow(instance,container,i)
i=i+1
output=etree.tostring(container, pretty_print=True)
with open("./parts/part1_1.html","r") as file:
part1=file.read()
with open("./parts/part1_2.html","r") as file:
part2=file.read()
with open("./parts/part1.html","w") as file:
file.write(part1+output+part2)
return
def writescanlog():
localtime==time.localtime(time.time())
with open("./log/scanlog.txt","a") as log:
log.write(str(localtime[3])+':'+str(localtime[4])+'on the'+str(localtime[2])+'.'+str(localtime[1])+'.'+str(localtime[0])[-2:])
log.write("Scanned Wifi for my Devices")
|
RoboWoodhouse/RoboButler
|
python/scanlibrary.py
|
Python
|
mit
| 2,231
| 0.037203
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(name='django-darkknight',
version='0.9.0',
license="BSD",
description="He's a silent guardian, a watchful protector",
long_description=read('README.rst'),
author="Fusionbox, Inc",
author_email="programmers@fusionbox.com",
url='http://github.com/fusionbox/django-darkknight',
packages=['darkknight', 'darkknight_gpg'],
install_requires=[
'django-dotenv',
'Django>=1.5',
'pyOpenSSL',
'django-localflavor',
'django-countries',
],
extras_require = {
'gpg': ['gnupg>=2.0.2,<3', 'django-apptemplates'],
},
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Security :: Cryptography",
],
)
|
fusionbox/django-darkknight
|
setup.py
|
Python
|
bsd-2-clause
| 1,503
| 0.002661
|
from .app import App
from .model import User, Group, ResetNonce
class ViewPermission:
pass
class EditPermission:
pass
@App.permission_rule(model=object, permission=object)
def admin_has_global_permission(identity, model, permission):
user = User.get(email=identity.userid)
return Group.get(name="Admin") in user.groups
@App.permission_rule(model=User, permission=object)
def user_has_self_permission(identity, model, permission):
user = User.get(email=identity.userid)
if user is not None and Group.get(name="Admin") in user.groups:
return True
else:
return model.email == identity.userid
@App.permission_rule(model=ResetNonce, permission=EditPermission)
def user_has_permission_to_reset_nonce(identity, model, permission):
user = User.get(email=identity.userid)
if user is not None and Group.get(name="Admin") in user.groups:
return True
else:
return user.id == int(model.id)
|
yacoma/auth-boilerplate
|
server/permissions.py
|
Python
|
mit
| 959
| 0
|
def _filename(obj):
try:
return obj.__filename__()
except:
pass
return str(obj)
|
dalejung/trtools
|
trtools/io/common.py
|
Python
|
mit
| 109
| 0.009174
|
"""Support for dynamic COM client support.
Introduction
Dynamic COM client support is the ability to use a COM server without
prior knowledge of the server. This can be used to talk to almost all
COM servers, including much of MS Office.
In general, you should not use this module directly - see below.
Example
>>> import win32com.client
>>> xl = win32com.client.Dispatch("Excel.Application")
# The line above invokes the functionality of this class.
# xl is now an object we can use to talk to Excel.
>>> xl.Visible = 1 # The Excel window becomes visible.
"""
import traceback
import string
import new
import pythoncom
import winerror
import build
from types import StringType, IntType, TupleType, ListType
from pywintypes import UnicodeType, IIDType
import win32com.client # Needed as code we eval() references it.
from win32com.client import NeedUnicodeConversions
debugging=0 # General debugging
debugging_attr=0 # Debugging dynamic attribute lookups.
LCID = 0x0
# These errors generally mean the property or method exists,
# but can't be used in this context - eg, property instead of a method, etc.
# Used to determine if we have a real error or not.
ERRORS_BAD_CONTEXT = [
winerror.DISP_E_MEMBERNOTFOUND,
winerror.DISP_E_BADPARAMCOUNT,
winerror.DISP_E_PARAMNOTOPTIONAL,
winerror.DISP_E_TYPEMISMATCH,
winerror.E_INVALIDARG,
]
ALL_INVOKE_TYPES = [
pythoncom.INVOKE_PROPERTYGET,
pythoncom.INVOKE_PROPERTYPUT,
pythoncom.INVOKE_PROPERTYPUTREF,
pythoncom.INVOKE_FUNC
]
def debug_print(*args):
if debugging:
for arg in args:
print arg,
print
def debug_attr_print(*args):
if debugging_attr:
for arg in args:
print arg,
print
# get the type objects for IDispatch and IUnknown
dispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch]
iunkType = pythoncom.TypeIIDs[pythoncom.IID_IUnknown]
_GoodDispatchType=[StringType,IIDType,UnicodeType]
_defaultDispatchItem=build.DispatchItem
def _GetGoodDispatch(IDispatch, clsctx = pythoncom.CLSCTX_SERVER):
if type(IDispatch) in _GoodDispatchType:
try:
IDispatch = pythoncom.connect(IDispatch)
except pythoncom.ole_error:
IDispatch = pythoncom.CoCreateInstance(IDispatch, None, clsctx, pythoncom.IID_IDispatch)
else:
# may already be a wrapped class.
IDispatch = getattr(IDispatch, "_oleobj_", IDispatch)
return IDispatch
def _GetGoodDispatchAndUserName(IDispatch, userName, clsctx):
# Get a dispatch object, and a 'user name' (ie, the name as
# displayed to the user in repr() etc.
if userName is None:
if type(IDispatch) == StringType:
userName = IDispatch
elif type(IDispatch) == UnicodeType:
# We always want the displayed name to be a real string
userName = IDispatch.encode("ascii", "replace")
elif type(userName) == UnicodeType:
# As above - always a string...
userName = userName.encode("ascii", "replace")
else:
userName = str(userName)
return (_GetGoodDispatch(IDispatch, clsctx), userName)
def _GetDescInvokeType(entry, default_invoke_type):
if not entry or not entry.desc: return default_invoke_type
return entry.desc[4]
def Dispatch(IDispatch, userName = None, createClass = None, typeinfo = None, UnicodeToString=NeedUnicodeConversions, clsctx = pythoncom.CLSCTX_SERVER):
IDispatch, userName = _GetGoodDispatchAndUserName(IDispatch,userName,clsctx)
if createClass is None:
createClass = CDispatch
lazydata = None
try:
if typeinfo is None:
typeinfo = IDispatch.GetTypeInfo()
try:
#try for a typecomp
typecomp = typeinfo.GetTypeComp()
lazydata = typeinfo, typecomp
except pythoncom.com_error:
pass
except pythoncom.com_error:
typeinfo = None
olerepr = MakeOleRepr(IDispatch, typeinfo, lazydata)
return createClass(IDispatch, olerepr, userName,UnicodeToString, lazydata)
def MakeOleRepr(IDispatch, typeinfo, typecomp):
olerepr = None
if typeinfo is not None:
try:
attr = typeinfo.GetTypeAttr()
# If the type info is a special DUAL interface, magically turn it into
# a DISPATCH typeinfo.
if attr[5] == pythoncom.TKIND_INTERFACE and attr[11] & pythoncom.TYPEFLAG_FDUAL:
# Get corresponding Disp interface;
# -1 is a special value which does this for us.
href = typeinfo.GetRefTypeOfImplType(-1);
typeinfo = typeinfo.GetRefTypeInfo(href)
attr = typeinfo.GetTypeAttr()
if typecomp is None:
olerepr = build.DispatchItem(typeinfo, attr, None, 0)
else:
olerepr = build.LazyDispatchItem(attr, None)
except pythoncom.ole_error:
pass
if olerepr is None: olerepr = build.DispatchItem()
return olerepr
def DumbDispatch(IDispatch, userName = None, createClass = None,UnicodeToString=NeedUnicodeConversions, clsctx=pythoncom.CLSCTX_SERVER):
"Dispatch with no type info"
IDispatch, userName = _GetGoodDispatchAndUserName(IDispatch,userName,clsctx)
if createClass is None:
createClass = CDispatch
return createClass(IDispatch, build.DispatchItem(), userName,UnicodeToString)
class CDispatch:
def __init__(self, IDispatch, olerepr, userName = None, UnicodeToString=NeedUnicodeConversions, lazydata = None):
if userName is None: userName = "<unknown>"
self.__dict__['_oleobj_'] = IDispatch
self.__dict__['_username_'] = userName
self.__dict__['_olerepr_'] = olerepr
self.__dict__['_mapCachedItems_'] = {}
self.__dict__['_builtMethods_'] = {}
self.__dict__['_enum_'] = None
self.__dict__['_unicode_to_string_'] = UnicodeToString
self.__dict__['_lazydata_'] = lazydata
def __call__(self, *args):
"Provide 'default dispatch' COM functionality - allow instance to be called"
if self._olerepr_.defaultDispatchName:
invkind, dispid = self._find_dispatch_type_(self._olerepr_.defaultDispatchName)
else:
invkind, dispid = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET, pythoncom.DISPID_VALUE
if invkind is not None:
allArgs = (dispid,LCID,invkind,1) + args
return self._get_good_object_(self._oleobj_.Invoke(*allArgs),self._olerepr_.defaultDispatchName,None)
raise TypeError, "This dispatch object does not define a default method"
def __nonzero__(self):
return 1 # ie "if object:" should always be "true" - without this, __len__ is tried.
# _Possibly_ want to defer to __len__ if available, but Im not sure this is
# desirable???
def __repr__(self):
return "<COMObject %s>" % (self._username_)
def __str__(self):
# __str__ is used when the user does "print object", so we gracefully
# fall back to the __repr__ if the object has no default method.
try:
return str(self.__call__())
except pythoncom.com_error, details:
if details[0] not in ERRORS_BAD_CONTEXT:
raise
return self.__repr__()
# Delegate comparison to the oleobjs, as they know how to do identity.
def __cmp__(self, other):
other = getattr(other, "_oleobj_", other)
return cmp(self._oleobj_, other)
def __int__(self):
return int(self.__call__())
def __len__(self):
invkind, dispid = self._find_dispatch_type_("Count")
if invkind:
return self._oleobj_.Invoke(dispid, LCID, invkind, 1)
raise TypeError, "This dispatch object does not define a Count method"
def _NewEnum(self):
try:
invkind = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET
enum = self._oleobj_.InvokeTypes(pythoncom.DISPID_NEWENUM,LCID,invkind,(13, 10),())
except pythoncom.com_error:
return None # no enumerator for this object.
import util
return util.WrapEnum(enum, None)
def __getitem__(self, index): # syver modified
# Improved __getitem__ courtesy Syver Enstad
# Must check _NewEnum before Item, to ensure b/w compat.
if isinstance(index, IntType):
if self.__dict__['_enum_'] is None:
self.__dict__['_enum_'] = self._NewEnum()
if self.__dict__['_enum_'] is not None:
return self._get_good_object_(self._enum_.__getitem__(index))
# See if we have an "Item" method/property we can use (goes hand in hand with Count() above!)
invkind, dispid = self._find_dispatch_type_("Item")
if invkind is not None:
return self._get_good_object_(self._oleobj_.Invoke(dispid, LCID, invkind, 1, index))
raise TypeError, "This object does not support enumeration"
def __setitem__(self, index, *args):
# XXX - todo - We should support calling Item() here too!
# print "__setitem__ with", index, args
if self._olerepr_.defaultDispatchName:
invkind, dispid = self._find_dispatch_type_(self._olerepr_.defaultDispatchName)
else:
invkind, dispid = pythoncom.DISPATCH_PROPERTYPUT | pythoncom.DISPATCH_PROPERTYPUTREF, pythoncom.DISPID_VALUE
if invkind is not None:
allArgs = (dispid,LCID,invkind,0,index) + args
return self._get_good_object_(self._oleobj_.Invoke(*allArgs),self._olerepr_.defaultDispatchName,None)
raise TypeError, "This dispatch object does not define a default method"
def _find_dispatch_type_(self, methodName):
if self._olerepr_.mapFuncs.has_key(methodName):
item = self._olerepr_.mapFuncs[methodName]
return item.desc[4], item.dispid
if self._olerepr_.propMapGet.has_key(methodName):
item = self._olerepr_.propMapGet[methodName]
return item.desc[4], item.dispid
try:
dispid = self._oleobj_.GetIDsOfNames(0,methodName)
except: ### what error?
return None, None
return pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET, dispid
def _ApplyTypes_(self, dispid, wFlags, retType, argTypes, user, resultCLSID, *args):
result = self._oleobj_.InvokeTypes(*(dispid, LCID, wFlags, retType, argTypes) + args)
return self._get_good_object_(result, user, resultCLSID)
def _wrap_dispatch_(self, ob, userName = None, returnCLSID = None, UnicodeToString = NeedUnicodeConversions):
# Given a dispatch object, wrap it in a class
return Dispatch(ob, userName, UnicodeToString=UnicodeToString)
def _get_good_single_object_(self,ob,userName = None, ReturnCLSID=None):
if iunkType==type(ob):
try:
ob = ob.QueryInterface(pythoncom.IID_IDispatch)
# If this works, we then enter the "is dispatch" test below.
except pythoncom.com_error:
# It is an IUnknown, but not an IDispatch, so just let it through.
pass
if dispatchType==type(ob):
# make a new instance of (probably this) class.
return self._wrap_dispatch_(ob, userName, ReturnCLSID)
elif self._unicode_to_string_ and UnicodeType==type(ob):
return str(ob)
else:
return ob
def _get_good_object_(self,ob,userName = None, ReturnCLSID=None):
"""Given an object (usually the retval from a method), make it a good object to return.
Basically checks if it is a COM object, and wraps it up.
Also handles the fact that a retval may be a tuple of retvals"""
if ob is None: # Quick exit!
return None
elif type(ob)==TupleType:
return tuple(map(lambda o, s=self, oun=userName, rc=ReturnCLSID: s._get_good_single_object_(o, oun, rc), ob))
else:
return self._get_good_single_object_(ob)
def _make_method_(self, name):
"Make a method object - Assumes in olerepr funcmap"
methodName = build.MakePublicAttributeName(name) # translate keywords etc.
methodCodeList = self._olerepr_.MakeFuncMethod(self._olerepr_.mapFuncs[name], methodName,0)
methodCode = string.join(methodCodeList,"\n")
try:
# print "Method code for %s is:\n" % self._username_, methodCode
# self._print_details_()
codeObject = compile(methodCode, "<COMObject %s>" % self._username_,"exec")
# Exec the code object
tempNameSpace = {}
# "Dispatch" in the exec'd code is win32com.client.Dispatch, not ours.
globNameSpace = globals().copy()
globNameSpace["Dispatch"] = win32com.client.Dispatch
exec codeObject in globNameSpace, tempNameSpace # self.__dict__, self.__dict__
name = methodName
# Save the function in map.
fn = self._builtMethods_[name] = tempNameSpace[name]
newMeth = new.instancemethod(fn, self, self.__class__)
return newMeth
except:
debug_print("Error building OLE definition for code ", methodCode)
traceback.print_exc()
return None
def _Release_(self):
"""Cleanup object - like a close - to force cleanup when you dont
want to rely on Python's reference counting."""
for childCont in self._mapCachedItems_.values():
childCont._Release_()
self._mapCachedItems_ = {}
if self._oleobj_:
self._oleobj_.Release()
self.__dict__['_oleobj_'] = None
if self._olerepr_:
self.__dict__['_olerepr_'] = None
self._enum_ = None
def _proc_(self, name, *args):
"""Call the named method as a procedure, rather than function.
Mainly used by Word.Basic, which whinges about such things."""
try:
item = self._olerepr_.mapFuncs[name]
dispId = item.dispid
return self._get_good_object_(self._oleobj_.Invoke(*(dispId, LCID, item.desc[4], 0) + (args) ))
except KeyError:
raise AttributeError, name
def _print_details_(self):
"Debug routine - dumps what it knows about an object."
print "AxDispatch container",self._username_
try:
print "Methods:"
for method in self._olerepr_.mapFuncs.keys():
print "\t", method
print "Props:"
for prop, entry in self._olerepr_.propMap.items():
print "\t%s = 0x%x - %s" % (prop, entry.dispid, `entry`)
print "Get Props:"
for prop, entry in self._olerepr_.propMapGet.items():
print "\t%s = 0x%x - %s" % (prop, entry.dispid, `entry`)
print "Put Props:"
for prop, entry in self._olerepr_.propMapPut.items():
print "\t%s = 0x%x - %s" % (prop, entry.dispid, `entry`)
except:
traceback.print_exc()
def __LazyMap__(self, attr):
try:
if self._LazyAddAttr_(attr):
debug_attr_print("%s.__LazyMap__(%s) added something" % (self._username_,attr))
return 1
except AttributeError:
return 0
# Using the typecomp, lazily create a new attribute definition.
def _LazyAddAttr_(self,attr):
if self._lazydata_ is None: return 0
res = 0
typeinfo, typecomp = self._lazydata_
olerepr = self._olerepr_
# We need to explicitly check each invoke type individually - simply
# specifying '0' will bind to "any member", which may not be the one
# we are actually after (ie, we may be after prop_get, but returned
# the info for the prop_put.)
for i in ALL_INVOKE_TYPES:
try:
x,t = typecomp.Bind(attr,i)
# Support 'Get' and 'Set' properties - see
# bug 1587023
if x==0 and attr[:3] in ('Set', 'Get'):
x,t = typecomp.Bind(attr[3:], i)
if x==1: #it's a FUNCDESC
r = olerepr._AddFunc_(typeinfo,t,0)
elif x==2: #it's a VARDESC
r = olerepr._AddVar_(typeinfo,t,0)
else: #not found or TYPEDESC/IMPLICITAPP
r=None
if not r is None:
key, map = r[0],r[1]
item = map[key]
if map==olerepr.propMapPut:
olerepr._propMapPutCheck_(key,item)
elif map==olerepr.propMapGet:
olerepr._propMapGetCheck_(key,item)
res = 1
except:
pass
return res
def _FlagAsMethod(self, *methodNames):
"""Flag these attribute names as being methods.
Some objects do not correctly differentiate methods and
properties, leading to problems when calling these methods.
Specifically, trying to say: ob.SomeFunc()
may yield an exception "None object is not callable"
In this case, an attempt to fetch the *property*has worked
and returned None, rather than indicating it is really a method.
Calling: ob._FlagAsMethod("SomeFunc")
should then allow this to work.
"""
for name in methodNames:
details = build.MapEntry(self.__AttrToID__(name), (name,))
self._olerepr_.mapFuncs[name] = details
def __AttrToID__(self,attr):
debug_attr_print("Calling GetIDsOfNames for property %s in Dispatch container %s" % (attr, self._username_))
return self._oleobj_.GetIDsOfNames(0,attr)
def __getattr__(self, attr):
if attr=='__iter__':
# We can't handle this as a normal method, as if the attribute
# exists, then it must return an iterable object.
try:
invkind = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET
enum = self._oleobj_.InvokeTypes(pythoncom.DISPID_NEWENUM,LCID,invkind,(13, 10),())
except pythoncom.com_error:
raise AttributeError, "This object can not function as an iterator"
# We must return a callable object.
class Factory:
def __init__(self, ob):
self.ob = ob
def __call__(self):
import win32com.client.util
return win32com.client.util.Iterator(self.ob)
return Factory(enum)
if attr[0]=='_' and attr[-1]=='_': # Fast-track.
raise AttributeError, attr
# If a known method, create new instance and return.
try:
return new.instancemethod(self._builtMethods_[attr], self, self.__class__)
except KeyError:
pass
# XXX - Note that we current are case sensitive in the method.
#debug_attr_print("GetAttr called for %s on DispatchContainer %s" % (attr,self._username_))
# First check if it is in the method map. Note that an actual method
# must not yet exist, (otherwise we would not be here). This
# means we create the actual method object - which also means
# this code will never be asked for that method name again.
if self._olerepr_.mapFuncs.has_key(attr):
return self._make_method_(attr)
# Delegate to property maps/cached items
retEntry = None
if self._olerepr_ and self._oleobj_:
# first check general property map, then specific "put" map.
retEntry = self._olerepr_.propMap.get(attr)
if retEntry is None:
retEntry = self._olerepr_.propMapGet.get(attr)
# Not found so far - See what COM says.
if retEntry is None:
try:
if self.__LazyMap__(attr):
if self._olerepr_.mapFuncs.has_key(attr): return self._make_method_(attr)
retEntry = self._olerepr_.propMap.get(attr)
if retEntry is None:
retEntry = self._olerepr_.propMapGet.get(attr)
if retEntry is None:
retEntry = build.MapEntry(self.__AttrToID__(attr), (attr,))
except pythoncom.ole_error:
pass # No prop by that name - retEntry remains None.
if not retEntry is None: # see if in my cache
try:
ret = self._mapCachedItems_[retEntry.dispid]
debug_attr_print ("Cached items has attribute!", ret)
return ret
except (KeyError, AttributeError):
debug_attr_print("Attribute %s not in cache" % attr)
# If we are still here, and have a retEntry, get the OLE item
if not retEntry is None:
invoke_type = _GetDescInvokeType(retEntry, pythoncom.INVOKE_PROPERTYGET)
debug_attr_print("Getting property Id 0x%x from OLE object" % retEntry.dispid)
try:
ret = self._oleobj_.Invoke(retEntry.dispid,0,invoke_type,1)
except pythoncom.com_error, details:
if details[0] in ERRORS_BAD_CONTEXT:
# May be a method.
self._olerepr_.mapFuncs[attr] = retEntry
return self._make_method_(attr)
raise pythoncom.com_error, details
debug_attr_print("OLE returned ", ret)
return self._get_good_object_(ret)
# no where else to look.
raise AttributeError, "%s.%s" % (self._username_, attr)
def __setattr__(self, attr, value):
if self.__dict__.has_key(attr): # Fast-track - if already in our dict, just make the assignment.
# XXX - should maybe check method map - if someone assigns to a method,
# it could mean something special (not sure what, tho!)
self.__dict__[attr] = value
return
# Allow property assignment.
debug_attr_print("SetAttr called for %s.%s=%s on DispatchContainer" % (self._username_, attr, `value`))
if self._olerepr_:
# Check the "general" property map.
if self._olerepr_.propMap.has_key(attr):
entry = self._olerepr_.propMap[attr]
invoke_type = _GetDescInvokeType(entry, pythoncom.INVOKE_PROPERTYPUT)
self._oleobj_.Invoke(entry.dispid, 0, invoke_type, 0, value)
return
# Check the specific "put" map.
if self._olerepr_.propMapPut.has_key(attr):
entry = self._olerepr_.propMapPut[attr]
invoke_type = _GetDescInvokeType(entry, pythoncom.INVOKE_PROPERTYPUT)
self._oleobj_.Invoke(entry.dispid, 0, invoke_type, 0, value)
return
# Try the OLE Object
if self._oleobj_:
if self.__LazyMap__(attr):
# Check the "general" property map.
if self._olerepr_.propMap.has_key(attr):
entry = self._olerepr_.propMap[attr]
invoke_type = _GetDescInvokeType(entry, pythoncom.INVOKE_PROPERTYPUT)
self._oleobj_.Invoke(entry.dispid, 0, invoke_type, 0, value)
return
# Check the specific "put" map.
if self._olerepr_.propMapPut.has_key(attr):
entry = self._olerepr_.propMapPut[attr]
invoke_type = _GetDescInvokeType(entry, pythoncom.INVOKE_PROPERTYPUT)
self._oleobj_.Invoke(entry.dispid, 0, invoke_type, 0, value)
return
try:
entry = build.MapEntry(self.__AttrToID__(attr),(attr,))
except pythoncom.com_error:
# No attribute of that name
entry = None
if entry is not None:
try:
invoke_type = _GetDescInvokeType(entry, pythoncom.INVOKE_PROPERTYPUT)
self._oleobj_.Invoke(entry.dispid, 0, invoke_type, 0, value)
self._olerepr_.propMap[attr] = entry
debug_attr_print("__setattr__ property %s (id=0x%x) in Dispatch container %s" % (attr, entry.dispid, self._username_))
return
except pythoncom.com_error:
pass
raise AttributeError, "Property '%s.%s' can not be set." % (self._username_, attr)
|
Southpaw-TACTIC/Team
|
src/python/Lib/site-packages/win32com/client/dynamic.py
|
Python
|
epl-1.0
| 21,580
| 0.031279
|
import numpy as np
import pytest
import pandas as pd
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseSeriesIndexing:
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name="xxx")
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name="xxx")
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name="xxx")
res = s[::2]
exp = pd.SparseSeries(
[0, 2, 4, 6], index=[0, 2, 4, 6], fill_value=0, name="xxx"
)
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
assert sparse.loc["A"] == 1
assert np.isnan(sparse.loc["B"])
result = sparse.loc[["A", "C", "D"]]
exp = orig.loc[["A", "C", "D"]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc["A"] == 1
assert np.isnan(sparse.loc["B"])
result = sparse.loc[["A", "C", "D"]]
exp = orig.loc[["A", "C", "D"]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(
sparse.loc["C":], orig.loc["C":].to_sparse(fill_value=0)
)
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(
sparse.iloc[2:], orig.iloc[2:].to_sparse(fill_value=0)
)
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("abcde"))
sparse = orig.to_sparse()
assert sparse.at["a"] == orig.at["a"]
assert np.isnan(sparse.at["b"])
assert np.isnan(sparse.at["c"])
assert sparse.at["d"] == orig.at["d"]
assert np.isnan(sparse.at["e"])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("abcde"))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at["a"] == orig.at["a"]
assert np.isnan(sparse.at["b"])
assert sparse.at["c"] == orig.at["c"]
assert sparse.at["d"] == orig.at["d"]
assert sparse.at["e"] == orig.at["e"]
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list("ABCDE"))
assert s.get("A") == 1
assert np.isnan(s.get("B"))
assert s.get("C") == 0
assert s.get("XX") is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list("ABCDE"), fill_value=0)
assert s.get("A") == 1
assert np.isnan(s.get("B"))
assert s.get("C") == 0
assert s.get("XX") is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]), orig.take([0]).to_sparse())
tm.assert_sp_series_equal(
sparse.take([0, 1, 3]), orig.take([0, 1, 3]).to_sparse()
)
tm.assert_sp_series_equal(
sparse.take([-1, -2]), orig.take([-1, -2]).to_sparse()
)
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(
sparse.take([0]), orig.take([0]).to_sparse(fill_value=0)
)
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(["B", "E", "C"])
exp = orig.reindex(["B", "E", "C"]).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(["A", "B", "C"])
exp = orig.reindex(["A", "B", "C"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1.0, 2.0, 3.0]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2.0, 3.0, 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype="float64")).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method="nearest")
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method="nearest", tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
@pytest.mark.parametrize("kind", ["integer", "block"])
@pytest.mark.parametrize("fill", [True, False, np.nan])
def tests_indexing_with_sparse(self, kind, fill):
# see gh-13985
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill, dtype=bool)
expected = arr[indexer]
result = pd.SparseArray([1, 3], kind=kind)
tm.assert_sp_array_equal(result, expected)
s = pd.SparseSeries(arr, index=["a", "b", "c"], dtype=np.float64)
expected = pd.SparseSeries(
[1, 3],
index=["a", "c"],
kind=kind,
dtype=SparseDtype(np.float64, s.fill_value),
)
tm.assert_sp_series_equal(s[indexer], expected)
tm.assert_sp_series_equal(s.loc[indexer], expected)
tm.assert_sp_series_equal(s.iloc[indexer], expected)
indexer = pd.SparseSeries(indexer, index=["a", "b", "c"])
tm.assert_sp_series_equal(s[indexer], expected)
tm.assert_sp_series_equal(s.loc[indexer], expected)
msg = "iLocation based boolean indexing cannot use an indexable as a mask"
with pytest.raises(ValueError, match=msg):
s.iloc[indexer]
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples(
[("A", 0), ("A", 1), ("B", 0), ("C", 0), ("C", 1)]
)
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == orig[0]
assert np.isnan(sparse[1])
assert sparse[3] == orig[3]
tm.assert_sp_series_equal(sparse["A"], orig["A"].to_sparse())
tm.assert_sp_series_equal(sparse["B"], orig["B"].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse["C", 0] == orig["C", 0]
assert np.isnan(sparse["A", 1])
assert np.isnan(sparse["B", 0])
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc["B":], orig.loc["B":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["C":], orig.loc["C":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["A":"B"], orig.loc["A":"B"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:"B"], orig.loc[:"B"].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc["A"], orig.loc["A"].to_sparse())
tm.assert_sp_series_equal(sparse.loc["B"], orig.loc["B"].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# single element list (GH 15447)
result = sparse.loc[["A"]]
exp = orig.loc[["A"]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc["C", 0] == orig.loc["C", 0]
assert np.isnan(sparse.loc["A", 1])
assert np.isnan(sparse.loc["B", 0])
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc["A":], orig.loc["A":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["B":], orig.loc["B":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["C":], orig.loc["C":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["A":"B"], orig.loc["A":"B"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:"B"], orig.loc[:"B"].to_sparse())
def test_reindex(self):
# GH 15447
orig = self.orig
sparse = self.sparse
res = sparse.reindex([("A", 0), ("C", 1)])
exp = orig.reindex([("A", 0), ("C", 1)]).to_sparse()
tm.assert_sp_series_equal(res, exp)
# On specific level:
res = sparse.reindex(["A", "C", "B"], level=0)
exp = orig.reindex(["A", "C", "B"], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
# single element list (GH 15447)
res = sparse.reindex(["A"], level=0)
exp = orig.reindex(["A"], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
with pytest.raises(TypeError):
# Incomplete keys are not accepted for reindexing:
sparse.reindex(["A", "C"])
# "copy" argument:
res = sparse.reindex(sparse.index, copy=True)
exp = orig.reindex(orig.index, copy=True).to_sparse()
tm.assert_sp_series_equal(res, exp)
assert sparse is not res
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseDataFrameIndexing:
def test_getitem(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse["x"], orig["x"].to_sparse())
tm.assert_sp_frame_equal(sparse[["x"]], orig[["x"]].to_sparse())
tm.assert_sp_frame_equal(sparse[["z", "x"]], orig[["z", "x"]].to_sparse())
tm.assert_sp_frame_equal(
sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse(),
)
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], orig.iloc[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
result = sparse[["z"]]
expected = orig[["z"]].to_sparse(fill_value=0)
tm.assert_sp_frame_equal(result, expected, check_fill_value=False)
tm.assert_sp_series_equal(sparse["y"], orig["y"].to_sparse(fill_value=0))
exp = orig[["x"]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[["x"]], exp)
exp = orig[["z", "x"]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[["z", "x"]], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig.iloc[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.loc[0, "x"] == 1
assert np.isnan(sparse.loc[1, "z"])
assert sparse.loc[2, "z"] == 4
# have to specify `kind='integer'`, since we construct a
# new SparseArray here, and the default sparse type is
# integer there, but block in SparseSeries
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse(kind="integer"))
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse(kind="integer"))
tm.assert_sp_series_equal(
sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ["x", "z"]]
exp = orig.loc[:, ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ["x", "z"]]
exp = orig.loc[[0, 2], ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
index=list("abc"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.loc["a", "x"] == 1
assert np.isnan(sparse.loc["b", "z"])
assert sparse.loc["c", "z"] == 4
tm.assert_sp_series_equal(
sparse.loc["a"], orig.loc["a"].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b"], orig.loc["b"].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
result = sparse.loc[["a", "b"]]
exp = orig.loc[["a", "b"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[["a", "b"], :]
exp = orig.loc[["a", "b"], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ["x", "z"]]
exp = orig.loc[:, ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[["c", "a"], ["x", "z"]]
exp = orig.loc[["c", "a"], ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]])
sparse = orig.to_sparse()
assert sparse.iloc[1, 1] == 3
assert np.isnan(sparse.iloc[2, 0])
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse(kind="integer"))
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse(kind="integer"))
tm.assert_sp_series_equal(
sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.at["A", "x"] == orig.at["A", "x"]
assert np.isnan(sparse.at["B", "z"])
assert np.isnan(sparse.at["C", "y"])
assert sparse.at["D", "x"] == orig.at["D", "x"]
def test_at_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
assert sparse.at["A", "x"] == orig.at["A", "x"]
assert np.isnan(sparse.at["B", "z"])
assert np.isnan(sparse.at["C", "y"])
assert sparse.at["D", "x"] == orig.at["D", "x"]
def test_iat(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_iat_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_take(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]), orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]), orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]), orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
index=list("ABCD"),
columns=list("xyz"),
dtype=np.int,
)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestMultitype:
def setup_method(self, method):
self.cols = ["string", "int", "float", "object"]
self.string_series = pd.SparseSeries(["a", "b", "c"])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame(
{
"string": self.string_series,
"int": self.int_series,
"float": self.float_series,
"object": self.object_series,
}
)
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(["a", 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
assert row.dtype == SparseDtype(object)
tm.assert_sp_series_equal(
self.sdf["string"], self.string_series, check_names=False
)
tm.assert_sp_series_equal(self.sdf["int"], self.int_series, check_names=False)
tm.assert_sp_series_equal(
self.sdf["float"], self.float_series, check_names=False
)
tm.assert_sp_series_equal(
self.sdf["object"], self.object_series, check_names=False
)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(
self.sdf.iloc[0],
pd.SparseSeries(["a", 1, 1.1, []], index=self.cols),
check_names=False,
)
tm.assert_sp_series_equal(
self.sdf.iloc[1],
pd.SparseSeries(["b", 2, 1.2, {}], index=self.cols),
check_names=False,
)
tm.assert_sp_series_equal(
self.sdf.iloc[2],
pd.SparseSeries(["c", 3, 1.3, set()], index=self.cols),
check_names=False,
)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(
self.sdf.iloc[[1, 2]],
pd.SparseDataFrame(
{
"string": self.string_series.iloc[[1, 2]],
"int": self.int_series.iloc[[1, 2]],
"float": self.float_series.iloc[[1, 2]],
"object": self.object_series.iloc[[1, 2]],
},
index=[1, 2],
)[self.cols],
)
tm.assert_sp_frame_equal(
self.sdf[["int", "string"]],
pd.SparseDataFrame({"int": self.int_series, "string": self.string_series}),
)
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
assert self.ss.iloc[i] == self.ss[idx]
tm.assert_class_equal(self.ss.iloc[i], self.ss[idx], obj="series index")
assert self.ss["string"] == "a"
assert self.ss["int"] == 1
assert self.ss["float"] == 1.1
assert self.ss["object"] == []
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(
self.ss.loc[["string", "int"]],
pd.SparseSeries(["a", 1], index=["string", "int"]),
)
tm.assert_sp_series_equal(
self.ss.loc[["string", "object"]],
pd.SparseSeries(["a", []], index=["string", "object"]),
)
|
toobaz/pandas
|
pandas/tests/sparse/test_indexing.py
|
Python
|
bsd-3-clause
| 38,613
| 0.000829
|
from quotes.models import Quote
from django.contrib import admin
class QuoteAdmin(admin.ModelAdmin):
list_display = ('message', 'name', 'program', 'class_of',
'submission_time')
admin.site.register(Quote, QuoteAdmin)
|
k4rtik/alpo
|
quotes/admin.py
|
Python
|
mit
| 243
| 0.00823
|
#!/usr/bin/env python
# vim: set expandtab shiftwidth=4:
# http://www.voip-info.org/wiki/view/asterisk+manager+events
import sys,time
import simplejson as json
from stompy.simple import Client
import ConfigParser
config = ConfigParser.ConfigParser()
devel_config = ConfigParser.ConfigParser()
config.read('/opt/ucall/etc/config.ini')
devel_config.read('/opt/ucall/etc/devel_config.ini')
stomp_host = config.get('STOMP', 'host')
stomp_username = config.get('STOMP', 'username')
stomp_password = config.get('STOMP', 'password')
stomp_queue = "/queue/messages/" + devel_config.get('GENERAL', 'agent')
print '='*80
print 'Stomp host:', stomp_host
print 'Stomp username:', stomp_username
print 'Stomp password:', stomp_password
print 'Stomp queue:', stomp_queue
print '='*80
stomp = Client(stomp_host)
stomp.connect(stomp_username, stomp_password)
stomp.subscribe("jms.queue.msg.ctrl")
while True:
message = stomp.get()
print message.body
stomp.disconnect()
|
gryzz/uCall
|
utils/asterisk-connector/ami2stomp-get.py
|
Python
|
gpl-3.0
| 972
| 0.005144
|
def _magic_get_file_type(f, _):
file_type = magic.from_buffer(f.read(1024), mime=True)
f.seek(0)
return file_type.decode('utf-8')
def _guess_file_type(_, filename):
return mimetypes.guess_type(filename)[0]
try:
import magic
except ImportError:
import mimetypes
get_file_type = _guess_file_type
else:
get_file_type = _magic_get_file_type
|
xbot/alfred-pushbullet
|
lib/pushbullet/filetype.py
|
Python
|
mit
| 373
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class FloatingIPsTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
server_id = None
floating_ip = None
@classmethod
def setUpClass(cls):
super(FloatingIPsTestJSON, cls).setUpClass()
cls.client = cls.floating_ips_client
cls.servers_client = cls.servers_client
# Server creation
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
# Floating IP creation
resp, body = cls.client.create_floating_ip()
cls.floating_ip_id = body['id']
cls.floating_ip = body['ip']
# Generating a nonexistent floatingIP id
cls.floating_ip_ids = []
resp, body = cls.client.list_floating_ips()
for i in range(len(body)):
cls.floating_ip_ids.append(body[i]['id'])
while True:
cls.non_exist_id = data_utils.rand_int_id(start=999)
if cls.config.service_available.neutron:
cls.non_exist_id = str(uuid.uuid4())
if cls.non_exist_id not in cls.floating_ip_ids:
break
@classmethod
def tearDownClass(cls):
# Deleting the floating IP which is created in this method
resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).tearDownClass()
@attr(type='gate')
def test_allocate_floating_ip(self):
# Positive test:Allocation of a new floating IP to a project
# should be successful
resp, body = self.client.create_floating_ip()
self.assertEqual(200, resp.status)
floating_ip_id_allocated = body['id']
try:
resp, floating_ip_details = \
self.client.get_floating_ip_details(floating_ip_id_allocated)
# Checking if the details of allocated IP is in list of floating IP
resp, body = self.client.list_floating_ips()
self.assertIn(floating_ip_details, body)
finally:
# Deleting the floating IP which is created in this method
self.client.delete_floating_ip(floating_ip_id_allocated)
@attr(type=['negative', 'gate'])
def test_allocate_floating_ip_from_nonexistent_pool(self):
# Positive test:Allocation of a new floating IP from a nonexistent_pool
# to a project should fail
self.assertRaises(exceptions.NotFound,
self.client.create_floating_ip,
"non_exist_pool")
@attr(type='gate')
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
# Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
# Storing the details of floating IP before deleting it
cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
resp, floating_ip_details = cli_resp
# Deleting the floating IP from the project
resp, body = self.client.delete_floating_ip(floating_ip_body['id'])
self.assertEqual(202, resp.status)
# Check it was really deleted.
self.client.wait_for_resource_deletion(floating_ip_body['id'])
@attr(type='gate')
def test_associate_disassociate_floating_ip(self):
# Positive test:Associate and disassociate the provided floating IP
# to a specific server should be successful
# Association of floating IP to fixed IP address
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
# Disassociation of floating IP that was associated in this method
resp, body = self.client.disassociate_floating_ip_from_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_floating_ip(self):
# Negative test:Deletion of a nonexistent floating IP
# from project should fail
# Deleting the non existent floating IP
self.assertRaises(exceptions.NotFound, self.client.delete_floating_ip,
self.non_exist_id)
@attr(type=['negative', 'gate'])
def test_associate_nonexistant_floating_ip(self):
# Negative test:Association of a non existent floating IP
# to specific server should fail
# Associating non existent floating IP
self.assertRaises(exceptions.NotFound,
self.client.associate_floating_ip_to_server,
"0.0.0.0", self.server_id)
@attr(type=['negative', 'gate'])
def test_dissociate_nonexistant_floating_ip(self):
# Negative test:Dissociation of a non existent floating IP should fail
# Dissociating non existent floating IP
self.assertRaises(exceptions.NotFound,
self.client.disassociate_floating_ip_from_server,
"0.0.0.0", self.server_id)
@attr(type='gate')
def test_associate_already_associated_floating_ip(self):
# positive test:Association of an already associated floating IP
# to specific server should change the association of the Floating IP
# Create server so as to use for Multiple association
resp, body = self.servers_client.create_server('floating-server2',
self.image_ref,
self.flavor_ref)
self.servers_client.wait_for_server_status(body['id'], 'ACTIVE')
self.new_server_id = body['id']
# Associating floating IP for the first time
resp, _ = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
# Associating floating IP for the second time
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.new_server_id)
self.addCleanup(self.servers_client.delete_server, self.new_server_id)
if (resp['status'] is not None):
self.addCleanup(self.client.disassociate_floating_ip_from_server,
self.floating_ip,
self.new_server_id)
# Make sure no longer associated with old server
self.assertRaises((exceptions.NotFound,
exceptions.UnprocessableEntity),
self.client.disassociate_floating_ip_from_server,
self.floating_ip, self.server_id)
@attr(type=['negative', 'gate'])
def test_associate_ip_to_server_without_passing_floating_ip(self):
# Negative test:Association of empty floating IP to specific server
# should raise NotFound exception
self.assertRaises(exceptions.NotFound,
self.client.associate_floating_ip_to_server,
'', self.server_id)
class FloatingIPsTestXML(FloatingIPsTestJSON):
_interface = 'xml'
|
eltonkevani/tempest_el_env
|
tempest/api/compute/floating_ips/test_floating_ips_actions.py
|
Python
|
apache-2.0
| 7,990
| 0
|
# coding:utf-8
'''
Created on 2017/11/7.
@author: chk01
'''
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from class_two.week_three.tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
np.random.seed(1)
def exam1():
y_hat = tf.constant(36, name='Y-hat')
y = tf.constant(39, name='y')
loss = tf.Variable((y - y_hat) ** 2, name='loss')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(loss))
def exam2():
a = tf.constant(2)
b = tf.constant(3)
c = tf.multiply(a, b)
return c
def exam3(x_input):
with tf.Session() as sess:
x = tf.placeholder(tf.int64, name='x')
y = 2 * x
print(sess.run(y, feed_dict={x: x_input}))
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
X = tf.constant(np.random.randn(3, 1), tf.float32, name='X')
W = tf.constant(np.random.randn(4, 3), tf.float32, name='W')
b = tf.constant(np.random.randn(4, 1), tf.float32, name='b')
Y = tf.matmul(W, X) + b
with tf.Session() as sess:
result = sess.run(Y)
return result
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
x = tf.placeholder(tf.float32, name='x')
sigmoid = tf.nn.sigmoid(x)
with tf.Session() as sess:
result = sess.run(sigmoid, feed_dict={x: z})
return result
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
z = tf.placeholder(tf.float32, name='z-input')
y = tf.placeholder(tf.float32, name='y-input')
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
with tf.Session() as sess:
cost = sess.run(cost, feed_dict={z: logits, y: labels})
return cost
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
C = tf.constant(C, name='C')
one_hot_matrix = tf.one_hot(labels, C, axis=0)
tf.nn.sigmoid_cross_entropy_with_logits()
with tf.Session() as sess:
one_hot = sess.run(one_hot_matrix)
return one_hot
if __name__ == '__main__':
# exam1()
logits = np.array([0.2, 0.4, 0.7, 0.9])
cost = cost(logits, np.array([0, 0, 1, 1]))
print("cost = " + str(cost))
tf.one_hot(labels,C,axis=0)
|
sunyihuan326/DeltaLab
|
Andrew_NG_learning/class_two/week_three/Dxq_1.py
|
Python
|
mit
| 3,691
| 0.00246
|
"""
raven.core.processors
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import re
from raven.utils import varmap
from raven.utils import six
class Processor(object):
def __init__(self, client):
self.client = client
def get_data(self, data, **kwargs):
return
def process(self, data, **kwargs):
resp = self.get_data(data, **kwargs)
if resp:
data = resp
if 'exception' in data:
if 'values' in data['exception']:
for value in data['exception'].get('values', []):
if 'stacktrace' in value:
self.filter_stacktrace(value['stacktrace'])
if 'request' in data:
self.filter_http(data['request'])
if 'extra' in data:
data['extra'] = self.filter_extra(data['extra'])
return data
def filter_stacktrace(self, data):
pass
def filter_http(self, data):
pass
def filter_extra(self, data):
return data
class RemovePostDataProcessor(Processor):
"""
Removes HTTP post data.
"""
def filter_http(self, data, **kwargs):
data.pop('data', None)
class RemoveStackLocalsProcessor(Processor):
"""
Removes local context variables from stacktraces.
"""
def filter_stacktrace(self, data, **kwargs):
for frame in data.get('frames', []):
frame.pop('vars', None)
class SanitizePasswordsProcessor(Processor):
"""
Asterisk out things that look like passwords, credit card numbers,
and API keys in frames, http, and basic extra data.
"""
MASK = '*' * 8
FIELDS = frozenset([
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'sentry_dsn',
])
VALUES_RE = re.compile(r'^(?:\d[ -]*?){13,16}$')
def sanitize(self, key, value):
if value is None:
return
if isinstance(value, six.string_types) and self.VALUES_RE.match(value):
return self.MASK
if not key: # key can be a NoneType
return value
key = key.lower()
for field in self.FIELDS:
if field in key:
# store mask as a fixed length for security
return self.MASK
return value
def filter_stacktrace(self, data):
for frame in data.get('frames', []):
if 'vars' not in frame:
continue
frame['vars'] = varmap(self.sanitize, frame['vars'])
def filter_http(self, data):
for n in ('data', 'cookies', 'headers', 'env', 'query_string'):
if n not in data:
continue
if isinstance(data[n], six.string_types) and '=' in data[n]:
# at this point we've assumed it's a standard HTTP query
# or cookie
if n == 'cookies':
delimiter = ';'
else:
delimiter = '&'
data[n] = self._sanitize_keyvals(data[n], delimiter)
else:
data[n] = varmap(self.sanitize, data[n])
if n == 'headers' and 'Cookie' in data[n]:
data[n]['Cookie'] = self._sanitize_keyvals(
data[n]['Cookie'], ';'
)
def filter_extra(self, data):
return varmap(self.sanitize, data)
def _sanitize_keyvals(self, keyvals, delimiter):
sanitized_keyvals = []
for keyval in keyvals.split(delimiter):
keyval = keyval.split('=')
if len(keyval) == 2:
sanitized_keyvals.append((keyval[0], self.sanitize(*keyval)))
else:
sanitized_keyvals.append(keyval)
return delimiter.join('='.join(keyval) for keyval in sanitized_keyvals)
|
nikolas/raven-python
|
raven/processors.py
|
Python
|
bsd-3-clause
| 3,988
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""HEPData application factories."""
import os
import sys
from invenio_base.app import create_app_factory
from invenio_base.wsgi import create_wsgi_factory
from invenio_config import create_config_loader
from . import config
env_prefix = 'APP'
conf_loader = create_config_loader(config=config, env_prefix=env_prefix)
instance_path = os.getenv(env_prefix + '_INSTANCE_PATH') or \
os.path.join(sys.prefix, 'var', 'hepdata-instance')
static_folder = os.getenv(env_prefix + '_STATIC_FOLDER') or \
os.path.join(instance_path, 'static')
create_api = create_app_factory(
'hepdata',
config_loader=conf_loader,
extension_entry_points=['invenio_base.api_apps'],
blueprint_entry_points=['invenio_base.api_blueprints'],
instance_path=instance_path,
)
create_app = create_app_factory(
'hepdata',
config_loader=conf_loader,
extension_entry_points=['invenio_base.apps'],
blueprint_entry_points=['invenio_base.blueprints'],
wsgi_factory=create_wsgi_factory({'/api': create_api}),
instance_path=instance_path,
static_folder=static_folder,
)
|
HEPData/hepdata3
|
hepdata/factory.py
|
Python
|
gpl-2.0
| 2,063
| 0
|
from datetime import datetime
from django.db import models
from uuidfield.fields import UUIDField
from access import acl
import amo.models
from translations.fields import save_signal
from mkt.constants import comm as const
class CommunicationPermissionModel(amo.models.ModelBase):
# Read permissions imply write permissions as well.
read_permission_public = models.BooleanField()
read_permission_developer = models.BooleanField()
read_permission_reviewer = models.BooleanField()
read_permission_senior_reviewer = models.BooleanField()
read_permission_mozilla_contact = models.BooleanField()
read_permission_staff = models.BooleanField()
class Meta:
abstract = True
def check_acls(user, obj, acl_type):
"""Check ACLs."""
if acl_type == 'moz_contact':
try:
return user.email in obj.addon.get_mozilla_contacts()
except AttributeError:
return user.email in obj.thread.addon.get_mozilla_contacts()
if acl_type == 'admin':
return acl.action_allowed_user(user, 'Admin', '%')
elif acl_type == 'reviewer':
return acl.action_allowed_user(user, 'Apps', 'Review')
elif acl_type == 'senior_reviewer':
return acl.action_allowed_user(user, 'Apps', 'ReviewEscalated')
else:
raise Exception('Invalid ACL lookup.')
return False
def check_acls_comm_obj(obj, profile):
"""Cross-reference ACLs and Note/Thread permissions."""
if obj.read_permission_public:
return True
if (obj.read_permission_reviewer and
check_acls(profile, obj, 'reviewer')):
return True
if (obj.read_permission_senior_reviewer and
check_acls(profile, obj, 'senior_reviewer')):
return True
if (obj.read_permission_mozilla_contact and
check_acls(profile, obj, 'moz_contact')):
return True
if (obj.read_permission_staff and
check_acls(profile, obj, 'admin')):
return True
return False
def user_has_perm_thread(thread, profile):
"""
Check if the user has read/write permissions on the given thread.
Developers of the add-on used in the thread, users in the CC list,
and users who post to the thread are allowed to access the object.
Moreover, other object permissions are also checked agaisnt the ACLs
of the user.
"""
user_post = CommunicationNote.objects.filter(
author=profile, thread=thread)
user_cc = CommunicationThreadCC.objects.filter(
user=profile, thread=thread)
if user_post.exists() or user_cc.exists():
return True
# User is a developer of the add-on and has the permission to read.
user_is_author = profile.addons.filter(pk=thread.addon_id)
if thread.read_permission_developer and user_is_author.exists():
return True
return check_acls_comm_obj(thread, profile)
def user_has_perm_note(note, profile):
"""
Check if the user has read/write permissions on the given note.
Developers of the add-on used in the note, users in the CC list,
and users who post to the thread are allowed to access the object.
Moreover, other object permissions are also checked agaisnt the ACLs
of the user.
"""
if note.author.id == profile.id:
# Let the dude access his own note.
return True
# User is a developer of the add-on and has the permission to read.
user_is_author = profile.addons.filter(pk=note.thread.addon_id)
if note.read_permission_developer and user_is_author.exists():
return True
return check_acls_comm_obj(note, profile)
class CommunicationThread(CommunicationPermissionModel):
addon = models.ForeignKey('addons.Addon', related_name='threads')
version = models.ForeignKey('versions.Version', related_name='threads',
null=True)
class Meta:
db_table = 'comm_threads'
class CommunicationThreadCC(amo.models.ModelBase):
thread = models.ForeignKey(CommunicationThread,
related_name='thread_cc')
user = models.ForeignKey('users.UserProfile',
related_name='comm_thread_cc')
class Meta:
db_table = 'comm_thread_cc'
unique_together = ('user', 'thread',)
class CommunicationNoteManager(models.Manager):
def with_perms(self, profile, thread):
ids = [note.id for note in self.filter(thread=thread) if
user_has_perm_note(note, profile)]
return self.filter(id__in=ids)
class CommunicationNote(CommunicationPermissionModel):
thread = models.ForeignKey(CommunicationThread, related_name='notes')
author = models.ForeignKey('users.UserProfile', related_name='comm_notes')
note_type = models.IntegerField()
body = models.TextField(null=True)
reply_to = models.ForeignKey('self', related_name='replies', null=True,
blank=True)
read_by_users = models.ManyToManyField('users.UserProfile',
through='CommunicationNoteRead')
objects = CommunicationNoteManager()
class Meta:
db_table = 'comm_thread_notes'
def save(self, *args, **kwargs):
super(CommunicationNote, self).save(*args, **kwargs)
self.thread.modified = self.created
self.thread.save()
class CommunicationNoteRead(models.Model):
user = models.ForeignKey('users.UserProfile')
note = models.ForeignKey(CommunicationNote)
class Meta:
db_table = 'comm_notes_read'
class CommunicationThreadToken(amo.models.ModelBase):
thread = models.ForeignKey(CommunicationThread, related_name='token')
user = models.ForeignKey('users.UserProfile',
related_name='comm_thread_tokens')
uuid = UUIDField(unique=True, auto=True)
use_count = models.IntegerField(default=0,
help_text='Stores the number of times the token has been used')
class Meta:
db_table = 'comm_thread_tokens'
unique_together = ('thread', 'user')
def is_valid(self):
# TODO: Confirm the expiration and max use count values.
timedelta = datetime.now() - self.modified
return (timedelta.days <= const.THREAD_TOKEN_EXPIRY and
self.use_count < const.MAX_TOKEN_USE_COUNT)
def reset_uuid(self):
# Generate a new UUID.
self.uuid = UUIDField()._create_uuid().hex
models.signals.pre_save.connect(save_signal, sender=CommunicationNote,
dispatch_uid='comm_thread_notes_translations')
|
Joergen/zamboni
|
apps/comm/models.py
|
Python
|
bsd-3-clause
| 6,495
| 0.001232
|
""" This class represents a Queue Node to store values and also
links others Nodes with values."""
class Node:
""" It starts with a value at all times. A note can not be
created without a value associated. """
def __init__(self, value):
self.value = value
self.next = None
""" This class represents a Queue to store values. The Queue starts
with a node called head. Every single element is going to be added
after the last node entered."""
class Queue:
""" The Queue is created with it's size zero and the head element
head is None (undefined)."""
def __init__(self):
self.head = None
self.size = 0
""" It adds a new value. The value is going to be added always after the
last value added. If the Queue has no elements, the value added is
going to be the head/head and also the last/tail value."""
def enqueue(self, value):
if (self.head is None):
self.head = Node(value)
self.size += 1
else:
pointer = self.head
while(pointer.next is not None):
pointer = pointer.next
pointer.next = Node(value)
""" This routine removes and also returns the first element. After the
remotion of the element, the head is updated and it turns to be the next
element of the queue (it's next element). If there are no more elements
other than the head, the Queue turns to be empty. If there are no elements
at all, there will be no remotion or return."""
def dequeue(self):
if (self.head is not None):
removed = self.head.value
self.head = self.head.next
self.size -= 1
return removed
""" It shows all the Queue elements one by one in a correct
order. """
def display(self):
pointer = self.head
while (pointer is not None):
print pointer.value
pointer = pointer.next
""" It returns the head node value, but it doesn't remove the
node. """
def head(self):
return self.head.value
""" It verifies whether or not the Queue has elements. If the Queue
doesn't have any elements, the head or head element is going to
be None. """
def is_empty(self):
return self.head is None
|
ericxlive/abstract-data-types
|
abstract-data-types/adt_queue.py
|
Python
|
mit
| 2,361
| 0.002541
|
NAME = 'django-adminactions'
VERSION = __version__ = (0, 4, 0, 'final', 0)
__author__ = 'sax'
import subprocess
import datetime
import os
def get_version(version=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.a%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
updatengine/updatengine-server
|
adminactions/__init__.py
|
Python
|
gpl-2.0
| 1,648
| 0.002427
|
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""`APIdoc` skin.
$Id$
"""
__docformat__ = "reStructuredText"
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class apidoc(IBrowserRequest):
"""The `apidoc` layer."""
class APIDOC(apidoc, IDefaultBrowserLayer):
"""The `APIDOC` skin."""
# BBB 2006/02/18, to be removed after 12 months
import zope.app.skins
zope.app.skins.set('APIDOC', APIDOC)
|
Donkyhotay/MoonPy
|
zope/app/apidoc/browser/skin.py
|
Python
|
gpl-3.0
| 1,074
| 0.003724
|
# Copyright (c) 2013, Kevin Greenan (kmgreen2@gmail.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. THIS SOFTWARE IS
# PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
tsg-/pyeclib
|
pyeclib/__init__.py
|
Python
|
bsd-2-clause
| 1,348
| 0
|
"""
tflite backend (https://github.com/tensorflow/tensorflow/lite)
"""
# pylint: disable=unused-argument,missing-docstring,useless-super-delegation
from threading import Lock
try:
# try dedicated tflite package first
import tflite_runtime
import tflite_runtime.interpreter as tflite
_version = tflite_runtime.__version__
_git_version = tflite_runtime.__git_version__
except:
# fall back to tflite bundled in tensorflow
import tensorflow as tf
from tensorflow.lite.python import interpreter as tflite
_version = tf.__version__
_git_version = tf.__git_version__
import backend
class BackendTflite(backend.Backend):
def __init__(self):
super(BackendTflite, self).__init__()
self.sess = None
self.lock = Lock()
def version(self):
return _version + "/" + _git_version
def name(self):
return "tflite"
def image_format(self):
# tflite is always NHWC
return "NHWC"
def load(self, model_path, inputs=None, outputs=None):
self.sess = tflite.Interpreter(model_path=model_path)
self.sess.allocate_tensors()
# keep input/output name to index mapping
self.input2index = {i["name"]: i["index"] for i in self.sess.get_input_details()}
self.output2index = {i["name"]: i["index"] for i in self.sess.get_output_details()}
# keep input/output names
self.inputs = list(self.input2index.keys())
self.outputs = list(self.output2index.keys())
return self
def predict(self, feed):
self.lock.acquire()
# set inputs
for k, v in self.input2index.items():
self.sess.set_tensor(v, feed[k])
self.sess.invoke()
# get results
res = [self.sess.get_tensor(v) for _, v in self.output2index.items()]
self.lock.release()
return res
|
plaidml/plaidml
|
mlperf/backend_tflite.py
|
Python
|
apache-2.0
| 1,874
| 0.001601
|
from __future__ import with_statement
from sympy import Symbol, exp, Integer, Float, sin, cos, log, Poly, Lambda, \
Function, I, S, sqrt, srepr, Rational, Tuple, Matrix, Interval
from sympy.abc import x, y
from sympy.core.sympify import sympify, _sympify, SympifyError, kernS
from sympy.core.decorators import _sympifyit
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.geometry import Point, Line
from sympy.functions.combinatorial.factorials import factorial, factorial2
from sympy.abc import _clash, _clash1, _clash2
from sympy.core.compatibility import HAS_GMPY
from sympy import mpmath
def test_439():
v = sympify("exp(x)")
assert v == exp(x)
assert type(v) == type(exp(x))
assert str(type(v)) == str(type(exp(x)))
def test_sympify1():
assert sympify("x") == Symbol("x")
assert sympify(" x") == Symbol("x")
assert sympify(" x ") == Symbol("x")
# 1778
n1 = Rational(1, 2)
assert sympify('--.5') == n1
assert sympify('-1/2') == -n1
assert sympify('-+--.5') == -n1
assert sympify('-.[3]') == Rational(-1, 3)
assert sympify('.[3]') == Rational(1, 3)
assert sympify('+.[3]') == Rational(1, 3)
assert sympify('+0.[3]*10**-2') == Rational(1, 300)
assert sympify('.[052631578947368421]') == Rational(1, 19)
assert sympify('.0[526315789473684210]') == Rational(1, 19)
assert sympify('.034[56]') == Rational(1711, 49500)
# options to make reals into rationals
assert sympify('1.22[345]', rational=True) == \
1 + Rational(22, 100) + Rational(345, 99900)
assert sympify('2/2.6', rational=True) == Rational(10, 13)
assert sympify('2.6/2', rational=True) == Rational(13, 10)
assert sympify('2.6e2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e+2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e-2/17', rational=True) == Rational(26, 17000)
assert sympify('2.1+3/4', rational=True) == \
Rational(21, 10) + Rational(3, 4)
assert sympify('2.234456', rational=True) == Rational(279307, 125000)
assert sympify('2.234456e23', rational=True) == 223445600000000000000000
assert sympify('2.234456e-23', rational=True) == \
Rational(279307, 12500000000000000000000000000)
assert sympify('-2.234456e-23', rational=True) == \
Rational(-279307, 12500000000000000000000000000)
assert sympify('12345678901/17', rational=True) == \
Rational(12345678901, 17)
assert sympify('1/.3 + x', rational=True) == Rational(10, 3) + x
# make sure longs in fractions work
assert sympify('222222222222/11111111111') == \
Rational(222222222222, 11111111111)
# ... even if they come from repetend notation
assert sympify('1/.2[123456789012]') == Rational(333333333333, 70781892967)
# ... or from high precision reals
assert sympify('.1234567890123456', rational=True) == \
Rational(19290123283179, 156250000000000)
def test_sympify_Fraction():
try:
import fractions
except ImportError:
pass
else:
value = sympify(fractions.Fraction(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
def test_sympify_gmpy():
if HAS_GMPY:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
value = sympify(gmpy.mpz(1000001))
assert value == Integer(1000001) and type(value) is Integer
value = sympify(gmpy.mpq(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
@conserve_mpmath_dps
def test_sympify_mpmath():
value = sympify(mpmath.mpf(1.0))
assert value == Float(1.0) and type(value) is Float
mpmath.mp.dps = 12
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-12")) is True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-13")) is False
mpmath.mp.dps = 6
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-5")) is True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-6")) is False
assert sympify(mpmath.mpc(1.0 + 2.0j)) == Float(1.0) + Float(2.0)*I
def test_sympify2():
class A:
def _sympy_(self):
return Symbol("x")**3
a = A()
assert _sympify(a) == x**3
assert sympify(a) == x**3
assert a == x**3
def test_sympify3():
assert sympify("x**3") == x**3
assert sympify("x^3") == x**3
assert sympify("1/2") == Integer(1)/2
raises(SympifyError, lambda: _sympify('x**3'))
raises(SympifyError, lambda: _sympify('1/2'))
def test_sympify_keywords():
raises(SympifyError, lambda: sympify('if'))
raises(SympifyError, lambda: sympify('for'))
raises(SympifyError, lambda: sympify('while'))
raises(SympifyError, lambda: sympify('lambda'))
def test_sympify_float():
assert sympify("1e-64") != 0
assert sympify("1e-20000") != 0
def test_sympify_bool():
"""Test that sympify accepts boolean values
and that output leaves them unchanged"""
assert sympify(True) is True
assert sympify(False) is False
def test_sympyify_iterables():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify(['.3', '.2'], rational=True) == ans
assert sympify(set(['.3', '.2']), rational=True) == set(ans)
assert sympify(tuple(['.3', '.2']), rational=True) == Tuple(*ans)
assert sympify(dict(x=0, y=1)) == {x: 0, y: 1}
assert sympify(['1', '2', ['3', '4']]) == [S(1), S(2), [S(3), S(4)]]
def test_sympify4():
class A:
def _sympy_(self):
return Symbol("x")
a = A()
assert _sympify(a)**3 == x**3
assert sympify(a)**3 == x**3
assert a == x
def test_sympify_text():
assert sympify('some') == Symbol('some')
assert sympify('core') == Symbol('core')
assert sympify('True') is True
assert sympify('False') is False
assert sympify('Poly') == Poly
assert sympify('sin') == sin
def test_sympify_function():
assert sympify('factor(x**2-1, x)') == -(1 - x)*(x + 1)
assert sympify('sin(pi/2)*cos(pi)') == -Integer(1)
def test_sympify_poly():
p = Poly(x**2 + x + 1, x)
assert _sympify(p) is p
assert sympify(p) is p
def test_sympify_factorial():
assert sympify('x!') == factorial(x)
assert sympify('(x+1)!') == factorial(x + 1)
assert sympify('(1 + y*(x + 1))!') == factorial(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!)^2') == (1 + y*factorial(x + 1))**2
assert sympify('y*x!') == y*factorial(x)
assert sympify('x!!') == factorial2(x)
assert sympify('(x+1)!!') == factorial2(x + 1)
assert sympify('(1 + y*(x + 1))!!') == factorial2(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!!)^2') == (1 + y*factorial2(x + 1))**2
assert sympify('y*x!!') == y*factorial2(x)
assert sympify('factorial2(x)!') == factorial(factorial2(x))
raises(SympifyError, lambda: sympify("+!!"))
raises(SympifyError, lambda: sympify(")!!"))
raises(SympifyError, lambda: sympify("!"))
raises(SympifyError, lambda: sympify("(!)"))
raises(SympifyError, lambda: sympify("x!!!"))
def test_sage():
# how to effectivelly test for the _sage_() method without having SAGE
# installed?
assert hasattr(x, "_sage_")
assert hasattr(Integer(3), "_sage_")
assert hasattr(sin(x), "_sage_")
assert hasattr(cos(x), "_sage_")
assert hasattr(x**2, "_sage_")
assert hasattr(x + y, "_sage_")
assert hasattr(exp(x), "_sage_")
assert hasattr(log(x), "_sage_")
def test_bug496():
assert sympify("a_") == Symbol("a_")
assert sympify("_a") == Symbol("_a")
@XFAIL
def test_lambda():
x = Symbol('x')
assert sympify('lambda: 1') == Lambda((), 1)
assert sympify('lambda x: 2*x') == Lambda(x, 2*x)
assert sympify('lambda x, y: 2*x+y') == Lambda([x, y], 2*x + y)
def test_lambda_raises():
with raises(SympifyError):
_sympify('lambda: 1')
def test_sympify_raises():
raises(SympifyError, lambda: sympify("fx)"))
def test__sympify():
x = Symbol('x')
f = Function('f')
# positive _sympify
assert _sympify(x) is x
assert _sympify(f) is f
assert _sympify(1) == Integer(1)
assert _sympify(0.5) == Float("0.5")
assert _sympify(1 + 1j) == 1.0 + I*1.0
class A:
def _sympy_(self):
return Integer(5)
a = A()
assert _sympify(a) == Integer(5)
# negative _sympify
raises(SympifyError, lambda: _sympify('1'))
raises(SympifyError, lambda: _sympify([1, 2, 3]))
def test_sympifyit():
x = Symbol('x')
y = Symbol('y')
@_sympifyit('b', NotImplemented)
def add(a, b):
return a + b
assert add(x, 1) == x + 1
assert add(x, 0.5) == x + Float('0.5')
assert add(x, y) == x + y
assert add(x, '1') == NotImplemented
@_sympifyit('b')
def add_raises(a, b):
return a + b
assert add_raises(x, 1) == x + 1
assert add_raises(x, 0.5) == x + Float('0.5')
assert add_raises(x, y) == x + y
raises(SympifyError, lambda: add_raises(x, '1'))
def test_int_float():
class F1_1(object):
def __float__(self):
return 1.1
class F1_1b(object):
"""
This class is still a float, even though it also implements __int__().
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
class F1_1c(object):
"""
This class is still a float, because it implements _sympy_()
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
def _sympy_(self):
return Float(1.1)
class I5(object):
def __int__(self):
return 5
class I5b(object):
"""
This class implements both __int__() and __float__(), so it will be
treated as Float in SymPy. One could change this behavior, by using
float(a) == int(a), but deciding that integer-valued floats represent
exact numbers is arbitrary and often not correct, so we do not do it.
If, in the future, we decide to do it anyway, the tests for I5b need to
be changed.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
class I5c(object):
"""
This class implements both __int__() and __float__(), but also
a _sympy_() method, so it will be Integer.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
def _sympy_(self):
return Integer(5)
i5 = I5()
i5b = I5b()
i5c = I5c()
f1_1 = F1_1()
f1_1b = F1_1b()
f1_1c = F1_1c()
assert sympify(i5) == 5
assert isinstance(sympify(i5), Integer)
assert sympify(i5b) == 5
assert isinstance(sympify(i5b), Float)
assert sympify(i5c) == 5
assert isinstance(sympify(i5c), Integer)
assert abs(sympify(f1_1) - 1.1) < 1e-5
assert abs(sympify(f1_1b) - 1.1) < 1e-5
assert abs(sympify(f1_1c) - 1.1) < 1e-5
assert _sympify(i5) == 5
assert isinstance(_sympify(i5), Integer)
assert _sympify(i5b) == 5
assert isinstance(_sympify(i5b), Float)
assert _sympify(i5c) == 5
assert isinstance(_sympify(i5c), Integer)
assert abs(_sympify(f1_1) - 1.1) < 1e-5
assert abs(_sympify(f1_1b) - 1.1) < 1e-5
assert abs(_sympify(f1_1c) - 1.1) < 1e-5
def test_issue1034():
a = sympify('Integer(4)')
assert a == Integer(4)
assert a.is_Integer
def test_issue883():
a = [3, 2.0]
assert sympify(a) == [Integer(3), Float(2.0)]
assert sympify(tuple(a)) == Tuple(Integer(3), Float(2.0))
assert sympify(set(a)) == set([Integer(3), Float(2.0)])
def test_S_sympify():
assert S(1)/2 == sympify(1)/2
assert (-2)**(S(1)/2) == sqrt(2)*I
def test_issue1689():
assert srepr(S(1.0 + 0J)) == srepr(S(1.0)) == srepr(Float(1.0))
def test_issue1699_None():
assert S(None) is None
def test_issue3218():
assert sympify("x+\ny") == x + y
def test_issue1889_builtins():
C = Symbol('C')
vars = {}
vars['C'] = C
exp1 = sympify('C')
assert exp1 == C # Make sure it did not get mixed up with sympy.C
exp2 = sympify('C', vars)
assert exp2 == C # Make sure it did not get mixed up with sympy.C
def test_geometry():
p = sympify(Point(0, 1))
assert p == Point(0, 1) and type(p) == Point
L = sympify(Line(p, (1, 0)))
assert L == Line((0, 1), (1, 0)) and type(L) == Line
def test_kernS():
s = '-1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x)))'
# when 1497 is fixed, this no longer should pass: the expression
# should be unchanged
assert -1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) == -1
# sympification should not allow the constant to enter a Mul
# or else the structure can change dramatically
ss = kernS(s)
assert ss != -1 and ss.simplify() == -1
s = '-1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x)))'.replace(
'x', '_kern')
ss = kernS(s)
assert ss != -1 and ss.simplify() == -1
# issue 3588
assert kernS('Interval(-1,-2 - 4*(-3))') == Interval(-1, 10)
assert kernS('_kern') == Symbol('_kern')
assert kernS('E**-(x)') == exp(-x)
e = 2*(x + y)*y
assert kernS(['2*(x + y)*y', ('2*(x + y)*y',)]) == [e, (e,)]
assert kernS('-(2*sin(x)**2 + 2*sin(x)*cos(x))*y/2') == \
-y*(2*sin(x)**2 + 2*sin(x)*cos(x))/2
def test_issue_3441_3453():
assert S('[[1/3,2], (2/5,)]') == [[Rational(1, 3), 2], (Rational(2, 5),)]
assert S('[[2/6,2], (2/4,)]') == [[Rational(1, 3), 2], (Rational(1, 2),)]
assert S('[[[2*(1)]]]') == [[[2]]]
assert S('Matrix([2*(1)])') == Matrix([2])
def test_issue_2497():
assert str(S("Q & C", locals=_clash1)) == 'And(C, Q)'
assert str(S('pi(x)', locals=_clash2)) == 'pi(x)'
assert str(S('pi(C, Q)', locals=_clash)) == 'pi(C, Q)'
locals = {}
exec "from sympy.abc import Q, C" in locals
assert str(S('C&Q', locals)) == 'And(C, Q)'
|
lidavidm/mathics-heroku
|
venv/lib/python2.7/site-packages/sympy/core/tests/test_sympify.py
|
Python
|
gpl-3.0
| 14,157
| 0.000212
|
from .ScatterplotStructure import ScatterplotStructure
from .BasicHTMLFromScatterplotStructure import BasicHTMLFromScatterplotStructure
from scattertext.viz.PairPlotFromScattertextStructure import PairPlotFromScatterplotStructure
from .VizDataAdapter import VizDataAdapter
from .HTMLSemioticSquareViz import HTMLSemioticSquareViz
|
JasonKessler/scattertext
|
scattertext/viz/__init__.py
|
Python
|
apache-2.0
| 329
| 0.009119
|
'''
Created by auto_sdk on 2013.11.26
'''
from top.api.base import RestApi
class PictureIsreferencedGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.picture_id = None
def getapiname(self):
return 'taobao.picture.isreferenced.get'
|
colaftc/webtool
|
top/api/rest/PictureIsreferencedGetRequest.py
|
Python
|
mit
| 318
| 0.028302
|
"""
Meteorology visualisation examples
==================================
"""
|
pp-mo/iris
|
docs/iris/example_code/Meteorology/__init__.py
|
Python
|
lgpl-3.0
| 78
| 0
|
def test_assert():
assert 'soup' == 'soup'
def test_pass():
pass
def test_fail():
assert False
test_fail.will_fail = True
|
wilas/lab-ci
|
samples/py_garden/py_simple_tdd/test_nose_flat_play.py
|
Python
|
apache-2.0
| 137
| 0.021898
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# Proceed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Proceed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Proceed. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
# ---------------------------------------------------------------------------
import sys
import os
import random
import tempfile
from datetime import date
# ---------------------------------------------------------------------------
class GenName():
"""
@authors: Brigitte Bigi
@contact: brigitte.bigi@gmail.com
@license: GPL
@summary: A class to generates a random file name of a non-existing file.
"""
def __init__(self,extension=""):
self.name = "/"
while (os.path.exists(self.name)==True):
self.set_name(extension)
def set_name(self, extension):
"""
Set a new file name.
"""
# random float value
randval = str(int(random.random()*10000))
# process pid
pid = str(os.getpid())
# today's date
today = str(date.today())
# filename
filename = "tmp_"+today+"_"+pid+"_"+randval
# final file name is path/filename
self.name = filename + extension
def get_name(self):
"""
Get the current file name.
"""
return str(self.name)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print GenName().get_name()
# ---------------------------------------------------------------------------
|
brigittebigi/proceed
|
proceed/src/TagPDF/name.py
|
Python
|
gpl-3.0
| 3,002
| 0.003331
|
"""
POZ Development Application.
"""
import numpy as np
# import cv2
import pozutil as pu
import test_util as tpu
def perspective_test(_y, _z, _ele, _azi):
print "--------------------------------------"
print "Perspective Transform tests"
print
cam = pu.CameraHelper()
# some landmarks in a 3x3 grid pattern
p0 = np.float32([-1., _y - 1.0, _z])
p1 = np.float32([0., _y - 1.0, _z])
p2 = np.float32([1., _y - 1.0, _z])
p3 = np.float32([-1., _y + 1.0, _z])
p4 = np.float32([0., _y + 1.0, _z])
p5 = np.float32([1., _y + 1.0, _z])
p6 = np.float32([-1., _y, _z])
p7 = np.float32([0, _y, _z])
p8 = np.float32([1., _y, _z])
# 3x3 grid array
ppp = np.array([p0, p1, p2, p3, p4, p5, p6, p7, p8])
print "Here are some landmarks in world"
print ppp
puv_acc = []
quv_acc = []
for vp in ppp:
# original view of landmarks
u, v = cam.project_xyz_to_uv(vp)
puv_acc.append(np.float32([u, v]))
# rotated view of landmarks
xyz_r = pu.calc_xyz_after_rotation_deg(vp, _ele, _azi, 0)
u, v = cam.project_xyz_to_uv(xyz_r)
quv_acc.append(np.float32([u, v]))
puv = np.array(puv_acc)
quv = np.array(quv_acc)
# 4-pt "diamond" array
quv4 = np.array([quv[1], quv[4], quv[6], quv[8]])
puv4 = np.array([puv[1], puv[4], puv[6], puv[8]])
print
print "Landmark img coords before rotate:"
print puv
print "Landmark img coords after rotate:"
print quv
print quv4
print
# h, _ = cv2.findHomography(puv, quv)
# hh = cv2.getPerspectiveTransform(puv4, quv4)
# print h
# print hh
# perspectiveTransform needs an extra dimension
puv1 = np.expand_dims(puv, axis=0)
# print "Test perspectiveTransform with findHomography matrix:"
# xpersp = cv2.perspectiveTransform(puv1, h)
# print xpersp
# print "Test perspectiveTransform with getPerspectiveTransform matrix:"
# xpersp = cv2.perspectiveTransform(puv1, hh)
# print xpersp
# print
if __name__ == "__main__":
# robot always knows the Y and Elevation of its camera
# (arbitrary assignments for testing)
known_cam_y = -3.
known_cam_el = 0.0
tests = [(1., 1., tpu.lm_vis_1_1),
(7., 6., tpu.lm_vis_7_6)]
print "--------------------------------------"
print "Landmark Test"
print
test_index = 0
vis_map = tests[test_index][2]
# robot does not know its (X, Z) position
# it will have to solve for it
cam_x = tests[test_index][0]
cam_z = tests[test_index][1]
print "Known (X,Z): ", (cam_x, cam_z)
for key in sorted(vis_map.keys()):
cam_azim = vis_map[key].az + 0. # change offset for testing
cam_elev = vis_map[key].el + known_cam_el
print "-----------"
# print "Known Camera Elev =", cam_elev
xyz = [cam_x, known_cam_y, cam_z]
angs = [cam_azim, cam_elev]
print "Landmark {:s}. Camera Azim = {:8.2f}".format(key, cam_azim)
lm1 = tpu.mark1[key]
f, x, z, a = tpu.landmark_test(lm1, tpu.mark2[key], xyz, angs)
print "Robot is at: {:6.3f},{:6.3f},{:20.14f}".format(x, z, a)
f, x, z, a = tpu.landmark_test(lm1, tpu.mark3[key], xyz, angs)
print "Robot is at: {:6.3f},{:6.3f},{:20.14f}".format(x, z, a)
tpu.pnp_test(key, xyz, angs)
|
mwgit00/poz
|
poz.py
|
Python
|
mit
| 3,487
| 0.000287
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pysensu-yelp',
version='0.4.4',
provides=['pysensu_yelp'],
description='Emits Yelp-flavored Sensu events to a Sensu Client',
url='https://github.com/Yelp/pysensu-yelp',
author='Yelp Operations Team',
author_email='operations@yelp.com',
packages=find_packages(exclude=['tests']),
install_requires=['six'],
license='Copyright Yelp 2014, all rights reserved',
)
|
Yelp/pysensu-yelp
|
setup.py
|
Python
|
apache-2.0
| 480
| 0
|
# Peerz - P2P python library using ZeroMQ sockets and gevent
# Copyright (C) 2014-2015 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from transitions import Machine
class MessageState(object):
states = ['initialised', 'waiting response', 'complete', 'timedout']
transitions = [
{'trigger': 'query', 'source': 'initialised', 'dest': 'waiting response', 'before': '_update', 'after': '_send_query'},
{'trigger': 'response', 'source': 'waiting response', 'dest': 'complete', 'before': '_update', 'after': '_completed'},
{'trigger': 'timeout', 'source': '*', 'dest': 'timedout', 'before': '_update', 'after': '_completed', },
]
def __init__(self, engine, txid, msg, callback=None, max_duration=5000, max_concurrency=3):
self.engine = engine
self.callback = callback
self.machine = Machine(model=self,
states=self.states,
transitions=self.transitions,
initial='initialised')
self.start = self.last_change = time.time() * 1000
self.max_duration = max_duration
self.max_concurrency = max_concurrency
self.txid = txid
self.times = {}
self.parse_message(msg)
self.query()
def query(self):
pass
def parse_message(self, msg):
self.val = msg.pop(0)
def is_complete(self):
return self.state in ['complete', 'timedout']
def pack_request(self):
return None
@staticmethod
def unpack_response(content):
return None
@staticmethod
def pack_response(content):
return None
def _update(self):
now = time.time() * 1000
self.times.setdefault(self.state, 0.0)
self.times[self.state] += (now - self.last_change)
self.last_change = now
def duration(self):
return time.time() * 1000 - self.start
def latency(self):
return self.times.setdefault('waiting response', 0.0)
def _send_query(self):
pass
def _completed(self):
pass
|
shendo/peerz
|
peerz/messaging/base.py
|
Python
|
gpl-3.0
| 2,720
| 0.002941
|
import hashlib
puzzle_input = 'iwrupvqb'
current = 0
done = False
while not done:
combined_input = puzzle_input + str(current)
solution = hashlib.md5(combined_input.encode())
solution = str(solution.hexdigest())
print(solution)
if solution.startswith('000000'):
done = True
print(current)
current += 1
|
rubiconjosh/aoc-2015
|
day4-part2.py
|
Python
|
mit
| 345
| 0
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
with pytest.raises(ValueError):
cal_clf.fit(X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator():
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(n_samples=50, n_features=10, random_state=7,
centers=10, cluster_std=15.0)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
base_estimator=clf, calibrators=[calibrator], classes=clf.classes_)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1. / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
with pytest.raises(ValueError):
_SigmoidCalibration().fit(np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
with pytest.raises(ValueError):
calibration_curve([1.1], [-0.1], normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
with pytest.raises(ValueError):
calibration_curve(y_true2, y_pred2, strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def dict_data():
dict_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return dict_data, text_labels
@pytest.fixture
def dict_data_pipeline(dict_data):
X, y = dict_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_dict_pipeline(dict_data, dict_data_pipeline):
"""Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
"""
X, y = dict_data
clf = dict_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
# Neither the pipeline nor the calibration meta-estimator
# expose the n_features_in_ check on this kind of data.
assert not hasattr(clf, 'n_features_in_')
assert not hasattr(calib_clf, 'n_features_in_')
# Ensure that no error is thrown with predict and predict_proba
calib_clf.predict(X)
calib_clf.predict_proba(X)
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
def test_calibration_inconsistent_prefit_n_features_in():
# Check that `n_features_in_` from prefit base estimator
# is consistent with training set
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
clf = LinearSVC(C=1).fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
msg = "X has 3 features, but LinearSVC is expecting 5 features as input."
with pytest.raises(ValueError, match=msg):
calib_clf.fit(X[:, :3], y)
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
|
glemaitre/scikit-learn
|
sklearn/tests/test_calibration.py
|
Python
|
bsd-3-clause
| 23,376
| 0
|
#! /usr/bin/env python
"""WSGI server interface to mw-render and mw-zip/mw-post"""
import os
import re
import shutil
import signal
import StringIO
import subprocess
import time
import urllib2
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import json
except ImportError:
import simplejson as json
from mwlib import filequeue, log, podclient, utils, wsgi, _version
# ==============================================================================
log = log.Log('mwlib.serve')
# ==============================================================================
def no_job_queue(job_type, collection_id, args):
"""Just spawn a new process for the given job"""
if os.name == 'nt':
kwargs = {}
else:
kwargs = {'close_fds': True}
try:
log.info('queueing %r' % args)
subprocess.Popen(args, **kwargs)
except OSError, exc:
raise RuntimeError('Could not execute command %r: %s' % (
args[0], exc,
))
# ==============================================================================
collection_id_rex = re.compile(r'^[a-z0-9]{16}$')
def make_collection_id(data):
sio = StringIO.StringIO()
for key in (
_version.version,
'metabook',
'base_url',
'script_extension',
'template_blacklist',
'template_exclusion_category',
'login_credentials',
):
sio.write(repr(data.get(key)))
return md5(sio.getvalue()).hexdigest()[:16]
# ==============================================================================
def json_response(fn):
"""Decorator wrapping result of decorated function in JSON response"""
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
if isinstance(result, wsgi.Response):
return result
return wsgi.Response(
content=json.dumps(result),
headers={'Content-Type': 'application/json'},
)
return wrapper
# ==============================================================================
class Application(wsgi.Application):
metabook_filename = 'metabook.json'
error_filename = 'errors'
status_filename = 'status'
output_filename = 'output'
pid_filename = 'pid'
zip_filename = 'collection.zip'
mwpostlog_filename = 'mw-post.log'
mwziplog_filename = 'mw-zip.log'
mwrenderlog_filename = 'mw-render.log'
def __init__(self, cache_dir,
mwrender_cmd, mwrender_logfile,
mwzip_cmd, mwzip_logfile,
mwpost_cmd, mwpost_logfile,
queue_dir,
default_writer='rl',
report_from_mail=None,
report_recipients=None,
):
self.cache_dir = utils.ensure_dir(cache_dir)
self.mwrender_cmd = mwrender_cmd
self.mwrender_logfile = mwrender_logfile
self.mwzip_cmd = mwzip_cmd
self.mwzip_logfile = mwzip_logfile
self.mwpost_cmd = mwpost_cmd
self.mwpost_logfile = mwpost_logfile
if queue_dir:
self.queue_job = filequeue.FileJobQueuer(utils.ensure_dir(queue_dir))
else:
self.queue_job = no_job_queue
self.default_writer = default_writer
self.report_from_mail = report_from_mail
self.report_recipients = report_recipients
def dispatch(self, request):
try:
command = request.post_data['command']
except KeyError:
return self.error_response('no command given')
try:
method = getattr(self, 'do_%s' % command)
except AttributeError:
return self.error_response('invalid command %r' % command)
try:
return method(request.post_data)
except Exception, exc:
return self.error_response('error executing command %r: %s' % (
command, exc,
))
@json_response
def error_response(self, error):
if isinstance(error, str):
error = unicode(error, 'utf-8', 'ignore')
elif not isinstance(error, unicode):
error = unicode(repr(error), 'ascii')
self.send_report_mail('error response', error=error)
return {'error': error}
def send_report_mail(self, subject, **kwargs):
if not (self.report_from_mail and self.report_recipients):
return
utils.report(
system='mwlib.serve',
subject=subject,
from_email=self.report_from_mail,
mail_recipients=self.report_recipients,
write_file=False,
**kwargs
)
def get_collection_dir(self, collection_id):
return os.path.join(self.cache_dir, collection_id)
def check_collection_id(self, collection_id):
if not collection_id or not collection_id_rex.match(collection_id):
raise RuntimeError('invalid collection ID %r' % collection_id)
collection_dir = self.get_collection_dir(collection_id)
if not os.path.exists(collection_dir):
raise RuntimeError('no such collection: %r' % collection_id)
def new_collection(self, post_data):
collection_id = make_collection_id(post_data)
collection_dir = self.get_collection_dir(collection_id)
if not os.path.isdir(collection_dir):
log.info('Creating new collection dir %r' % collection_dir)
os.makedirs(collection_dir)
return collection_id
def get_path(self, collection_id, filename, ext=None):
p = os.path.join(self.get_collection_dir(collection_id), filename)
if ext is not None:
p += '.' + ext[:10]
return p
@json_response
def do_render(self, post_data):
metabook_data = post_data.get('metabook')
collection_id = post_data.get('collection_id')
if not (metabook_data or collection_id):
return self.error_response('POST argument metabook or collection_id required')
if metabook_data and collection_id:
return self.error_response('Specify either metabook or collection_id, not both')
try:
base_url = post_data['base_url']
writer = post_data.get('writer', self.default_writer)
except KeyError, exc:
return self.error_response('POST argument required: %s' % exc)
writer_options = post_data.get('writer_options', '')
template_blacklist = post_data.get('template_blacklist', '')
template_exclusion_category = post_data.get('template_exclusion_category', '')
login_credentials = post_data.get('login_credentials', '')
force_render = bool(post_data.get('force_render'))
script_extension = post_data.get('script_extension', '')
if not collection_id:
collection_id = self.new_collection(post_data)
log.info('render %s %s' % (collection_id, writer))
response = {
'collection_id': collection_id,
'writer': writer,
'is_cached': False,
}
pid_path = self.get_path(collection_id, self.pid_filename, writer)
if os.path.exists(pid_path):
log.info('mw-render already running for collection %r' % collection_id)
return response
output_path = self.get_path(collection_id, self.output_filename, writer)
if os.path.exists(output_path):
if force_render:
log.info('removing rendered file %r (forced rendering)' % output_path)
utils.safe_unlink(output_path)
else:
log.info('re-using rendered file %r' % output_path)
response['is_cached'] = True
return response
status_path = self.get_path(collection_id, self.status_filename, writer)
if os.path.exists(status_path):
if force_render:
log.info('removing status file %r (forced rendering)' % status_path)
utils.safe_unlink(status_path)
else:
log.info('status file exists %r' % status_path)
return response
error_path = self.get_path(collection_id, self.error_filename, writer)
if os.path.exists(error_path):
if force_render:
log.info('removing error file %r (forced rendering)' % error_path)
utils.safe_unlink(error_path)
else:
log.info('error file exists %r' % error_path)
return response
if self.mwrender_logfile:
logfile = self.mwrender_logfile
else:
logfile = self.get_path(collection_id, self.mwrenderlog_filename, writer)
args = [
self.mwrender_cmd,
'--logfile', logfile,
'--error-file', error_path,
'--status-file', status_path,
'--writer', writer,
'--output', output_path,
'--pid-file', pid_path,
]
zip_path = self.get_path(collection_id, self.zip_filename)
if os.path.exists(zip_path):
log.info('using existing ZIP file to render %r' % output_path)
args.extend(['--config', zip_path])
if writer_options:
args.extend(['--writer-options', writer_options])
if template_blacklist:
args.extend(['--template-blacklist', template_blacklist])
if template_exclusion_category:
args.extend(['--template-exclusion-category', template_exclusion_category])
else:
if force_render:
return self.error_response('Forced to render document which has not been previously rendered.')
log.info('rendering %r' % output_path)
metabook_path = self.get_path(collection_id, self.metabook_filename)
f = open(metabook_path, 'wb')
f.write(metabook_data)
f.close()
args.extend([
'--metabook', metabook_path,
'--config', base_url,
'--keep-zip', zip_path,
])
if writer_options:
args.extend(['--writer-options', writer_options])
if template_blacklist:
args.extend(['--template-blacklist', template_blacklist])
if template_exclusion_category:
args.extend(['--template-exclusion-category', template_exclusion_category])
if login_credentials:
args.extend(['--login', login_credentials])
if script_extension:
args.extend(['--script-extension', script_extension])
self.queue_job('render', collection_id, args)
return response
def read_status_file(self, collection_id, writer):
status_path = self.get_path(collection_id, self.status_filename, writer)
try:
f = open(status_path, 'rb')
return json.loads(f.read())
f.close()
except (IOError, ValueError):
return {'progress': 0}
@json_response
def do_render_status(self, post_data):
try:
collection_id = post_data['collection_id']
writer = post_data.get('writer', self.default_writer)
except KeyError, exc:
return self.error_response('POST argument required: %s' % exc)
self.check_collection_id(collection_id)
log.info('render_status %s %s' % (collection_id, writer))
output_path = self.get_path(collection_id, self.output_filename, writer)
if os.path.exists(output_path):
return {
'collection_id': collection_id,
'writer': writer,
'state': 'finished',
}
error_path = self.get_path(collection_id, self.error_filename, writer)
if os.path.exists(error_path):
text = unicode(open(error_path, 'rb').read(), 'utf-8', 'ignore')
self.send_report_mail('rendering failed',
collection_id=collection_id,
writer=writer,
error=text,
)
return {
'collection_id': collection_id,
'writer': writer,
'state': 'failed',
'error': text,
}
return {
'collection_id': collection_id,
'writer': writer,
'state': 'progress',
'status': self.read_status_file(collection_id, writer),
}
@json_response
def do_render_kill(self, post_data):
try:
collection_id = post_data['collection_id']
writer = post_data.get('writer', self.default_writer)
except KeyError, exc:
return self.error_response('POST argument required: %s' % exc)
self.check_collection_id(collection_id)
log.info('render_kill %s %s' % (collection_id, writer))
pid_path = self.get_path(collection_id, self.pid_filename, writer)
killed = False
try:
pid = int(open(pid_path, 'rb').read())
os.kill(pid, signal.SIGINT)
killed = True
except (OSError, ValueError, IOError):
pass
return {
'collection_id': collection_id,
'writer': writer,
'killed': killed,
}
def do_download(self, post_data):
try:
collection_id = post_data['collection_id']
writer = post_data.get('writer', self.default_writer)
except KeyError, exc:
log.ERROR('POST argument required: %s' % exc)
return self.http500()
try:
self.check_collection_id(collection_id)
log.info('download %s %s' % (collection_id, writer))
output_path = self.get_path(collection_id, self.output_filename, writer)
status = self.read_status_file(collection_id, writer)
response = wsgi.Response(content=open(output_path, 'rb'))
os.utime(output_path, None)
if 'content_type' in status:
response.headers['Content-Type'] = status['content_type'].encode('utf-8', 'ignore')
else:
log.warn('no content type in status file')
if 'file_extension' in status:
response.headers['Content-Disposition'] = 'inline;filename="collection.%s"' % (
status['file_extension'].encode('utf-8', 'ignore'),
)
else:
log.warn('no file extension in status file')
return response
except Exception, exc:
log.ERROR('exception in do_download(): %r' % exc)
return self.http500()
@json_response
def do_zip_post(self, post_data):
try:
metabook_data = post_data['metabook']
base_url = post_data['base_url']
except KeyError, exc:
return self.error_response('POST argument required: %s' % exc)
template_blacklist = post_data.get('template_blacklist', '')
template_exclusion_category = post_data.get('template_exclusion_category', '')
login_credentials = post_data.get('login_credentials', '')
script_extension = post_data.get('script_extension', '')
pod_api_url = post_data.get('pod_api_url', '')
if pod_api_url:
result = json.loads(urllib2.urlopen(pod_api_url, data="any").read())
post_url = result['post_url']
response = {
'state': 'ok',
'redirect_url': result['redirect_url'],
}
else:
try:
post_url = post_data['post_url']
except KeyError:
return self.error_response('POST argument required: post_url')
response = {'state': 'ok'}
collection_id = self.new_collection(post_data)
log.info('zip_post %s %s' % (collection_id, pod_api_url))
pid_path = self.get_path(collection_id, self.pid_filename, 'zip')
if os.path.exists(pid_path):
log.info('mw-zip/mw-post already running for collection %r' % collection_id)
return response
zip_path = self.get_path(collection_id, self.zip_filename)
if os.path.exists(zip_path):
log.info('POSTing ZIP file %r' % zip_path)
if self.mwpost_logfile:
logfile = self.mwpost_logfile
else:
logfile = self.get_path(collection_id, self.mwpostlog_filename)
args = [
self.mwpost_cmd,
'--logfile', logfile,
'--posturl', post_url,
'--input', zip_path,
'--pid-file', pid_path,
]
else:
log.info('Creating and POSting ZIP file %r' % zip_path)
if self.mwzip_logfile:
logfile = self.mwzip_logfile
else:
logfile = self.get_path(collection_id, self.mwziplog_filename)
metabook_path = self.get_path(collection_id, self.metabook_filename)
f = open(metabook_path, 'wb')
f.write(metabook_data)
f.close()
args = [
self.mwzip_cmd,
'--logfile', logfile,
'--metabook', metabook_path,
'--config', base_url,
'--posturl', post_url,
'--output', zip_path,
'--pid-file', pid_path,
]
if template_blacklist:
args.extend(['--template-blacklist', template_blacklist])
if template_exclusion_category:
args.extend(['--template-exclusion-category', template_exclusion_category])
if login_credentials:
args.extend(['--login', login_credentials])
if script_extension:
args.extend(['--script-extension', script_extension])
self.queue_job('post', collection_id, args)
return response
# ==============================================================================
def clean_cache(max_age, cache_dir):
"""Clean all subdirectories of cache_dir whose mtime is before now-max_age
@param max_age: max age of directories in seconds
@type max_age: int
@param cache_dir: cache directory
@type cache_dir: basestring
"""
now = time.time()
for d in os.listdir(cache_dir):
path = os.path.join(cache_dir, d)
if not os.path.isdir(path) or not collection_id_rex.match(d):
log.warn('unknown item in cache dir %r: %r' % (cache_dir, d))
continue
if now - os.stat(path).st_mtime < max_age:
continue
try:
log.info('removing directory %r' % path)
shutil.rmtree(path)
except Exception, exc:
log.ERROR('could not remove directory %r: %s' % (path, exc))
|
cscott/wikiserver
|
mwlib/serve.py
|
Python
|
gpl-2.0
| 19,146
| 0.004805
|
#!/usr/bin/env python
"""
Simple-stupid time tracker script
=================================
Timetrack
opyright (C) 2010, Branko Vukelic <studio@brankovukelic.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import getopt
import os
import re
import sqlite3
HOME_DIR = os.path.expanduser('~')
DEFAULT_FILE = os.path.join(HOME_DIR, 'timesheet.db')
PID_RE = re.compile(r'^[A-Za-z]{3}$')
def optpair(opts):
""" Pair option switches and their own arguments """
optdict = {}
for sw, a in opts:
optdict[sw] = a
return optdict
def check_pid(pname):
""" Check project name, return true if it is correct """
if PID_RE.match(pname):
return True
return False
def generate_timestamp():
from datetime import datetime
timenow = datetime.now()
return (datetime.strftime(timenow, '%Y-%m-%d %H:%M:%S'), timenow)
def getduration(seconds):
seconds = int(seconds)
hours = seconds // 3600
seconds = seconds - hours * 3600
minutes = seconds // 60
seconds = seconds - minutes * 60
return (hours, minutes, seconds)
def get_pids(connection):
""" Get unique PIDs from database """
pids = []
c = connection.cursor()
c.execute("SELECT DISTINCT pid FROM timesheet ORDER BY pid ASC;")
for pid in c:
pids.append(pid[0])
c.close()
return pids
def get_times(connection, pidfilter):
""" Return a dictionary of PIDs with [job, time] pairs """
if pidfilter:
pids = [pidfilter]
else:
pids = get_pids(connection)
pid_times = {}
for pid in pids:
c = connection.cursor()
c.execute("SELECT desc, TOTAL(dur) FROM timesheet WHERE pid = ? GROUP BY desc;", (pid,))
results = []
for result in c:
results.append(result)
pid_times[pid] = results
c.close()
return pid_times
def read_stats(connection, pidfilter):
pid_times = get_times(connection, pidfilter)
if not pid_times:
print "No data in database. Exiting."
return True
for k in pid_times.keys():
print ""
print "=========================="
print "PID: %s" % k
print "=========================="
print ""
for j in pid_times[k]:
print "Job: %s" % j[0]
print "Time: %02d:%02d:%02d" % getduration(j[1])
print ""
print "=========================="
print ""
def export_tsv(connection, filename, pidfilter):
pid_times = get_times(connection, pidfilter)
if not pid_times:
print "No data in database. Exiting."
return True
f = open(filename, 'w')
# Write header
f.write('PID\tJob\tTime\n')
for k in pid_times.keys():
for j in pid_times[k]:
f.write('%s\t%s\t%s\n' % (k, j[0], j[1]))
f.close()
def clean_string(s):
""" Escapes characters in a string for SQL """
return s.replace(';', '\\;').replace('\'', '\\\'')
def add_data(connection, pidfilter):
""" Gives user a prompt and writes data to the fhandle file """
import readline
print "Press Ctrl+C to exit."
try:
while True:
pid = pidfilter
while not check_pid(pid):
pid = raw_input("PID: ")
if not check_pid(pid):
print "'%s' is not a valid pid, please use a 3 letter sequence" % pid
print "Project ID is %s" % pid
desc = raw_input("Job: ")
desc = clean_string(desc)
if pid and desc:
timestamp, starttime = generate_timestamp()
print "Timer started at %s" % timestamp
raw_input("Press Enter to stop the timer or Ctrl+C to abort")
endtimestamp, endtime = generate_timestamp()
print "Timer stopped at %s" % endtimestamp
delta = endtime - starttime
dsecs = delta.seconds
print "Total duration was %s seconds" % dsecs
args = (timestamp, pid, desc, dsecs)
c = connection.cursor()
try:
c.execute("INSERT INTO timesheet (timestamp, pid, desc, dur) VALUES (?, ?, ?, ?)", args)
except:
connection.rollback()
print "DB error: Data was not written"
raise
else:
connection.commit()
c.close()
print "\n"
except KeyboardInterrupt:
connection.rollback()
def usage():
print """Timetrack
Copyright (c) 2010, Branko Vukelic
Released under GNU/GPL v3, see LICENSE file for details.
Usage: tt.py [-a] [-r] [-t FILE] [-p PID]
[--add] [--read] [--tsv FILE] [--pid PID] [dbfile]
-r --read : Display the stats.
-a --add : Start timer session (default action).
-t --tsv : Export into a tab-separated table (TSV). FILE is the filename to
use for exporting.
-p --pid : With argument 'PID' (3 letters, no numbers or non-alphanumeric
characters. Limits all operations to a single PID.
dbfile : Use this file as database, instead of default file. If the
specified file does not exist, it will be creadted.
More information at:
http://github.com/foxbunny/timetrack
"""
def main(argv):
try:
opts, args = getopt.getopt(argv, 'rat:p:', ['read', 'add', 'tsv=', 'pid='])
except getopt.GetoptError:
usage()
sys.exit(2)
optdict = optpair(opts)
statsfile = len(args) and args[0] or DEFAULT_FILE
print "Using stats file '%s'" % statsfile
pidfilter = optdict.get('-p', '') or optdict.get('--pid', '')
if pidfilter:
if check_pid(pidfilter):
print "Using project ID filter '%s'" % pidfilter
else:
print "Project ID filter '%s' is invalid and will be ignored." % pidfilter
print "Opening connection to database."
try:
connection = sqlite3.connect(statsfile)
except:
print "Database error. Exiting."
sys.exit(2)
print "Initialize table if none exists"
c = connection.cursor()
try:
c.execute("""CREATE TABLE IF NOT EXISTS timesheet (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT (datetime('now')),
pid VARCHAR(3) NOT NULL,
desc VARCHAR(255) NOT NULL,
dur INTEGER NOT NULL);""")
except:
connection.rollback()
raise
else:
connection.commit()
c.close()
if ('-r' in optdict.keys()) or ('--read' in optdict.keys()):
read_stats(connection, pidfilter)
elif ('-t' in optdict.keys()) or ('--tsv' in optdict.keys()):
filename = optdict.get('-t', None) or optdict.get('--tsv')
export_tsv(connection, filename, pidfilter)
else:
add_data(connection, pidfilter)
print "Closing connection to database"
connection.close()
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
foxbunny/Timetrack
|
tt.py
|
Python
|
gpl-3.0
| 7,554
| 0.003574
|
from typing import Dict, Set
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.models.models import Sample, Experiment, OriginalFile
class SurveyJob(models.Model):
"""Records information about a Surveyor Job."""
class Meta:
db_table = "survey_jobs"
source_type = models.CharField(max_length=256)
success = models.NullBooleanField(null=True)
no_retry = models.BooleanField(default=False)
nomad_job_id = models.CharField(max_length=256, null=True)
ram_amount = models.IntegerField(default=256)
# The start time of the job
start_time = models.DateTimeField(null=True)
# The end time of the job
end_time = models.DateTimeField(null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SurveyJob, self).save(*args, **kwargs)
def get_properties(self) -> Dict:
""" Return all associated SurveyJobKeyValues as a dict"""
return {pair.key: pair.value for pair in self.surveyjobkeyvalue_set.all()}
def get_accession_code(self):
""" Return `experiment_accession_code`, the most important code."""
try:
kvp = self.surveyjobkeyvalue_set.get(key="experiment_accession_code")
return kvp.value
except:
return None
def __str__(self):
return "SurveyJob " + str(self.pk) + ": " + str(self.source_type)
class SurveyJobKeyValue(models.Model):
"""Tracks additional fields for SurveyJobs.
Useful for fields that would be sparsely populated if they were
their own columns. I.e. one source may have an extra field or two
that are worth tracking but are specific to that source.
"""
survey_job = models.ForeignKey(SurveyJob, on_delete=models.CASCADE)
key = models.CharField(max_length=256)
value = models.CharField(max_length=256)
class Meta:
db_table = "survey_job_key_values"
class ProcessorJob(models.Model):
"""Records information about running a processor."""
class Meta:
db_table = "processor_jobs"
# This field will contain an enumerated value specifying which
# processor pipeline was applied during the processor job.
pipeline_applied = models.CharField(max_length=256)
original_files = models.ManyToManyField('OriginalFile', through='ProcessorJobOriginalFileAssociation')
datasets = models.ManyToManyField('DataSet', through='ProcessorJobDataSetAssociation')
no_retry = models.BooleanField(default=False)
# Resources
ram_amount = models.IntegerField(default=2048)
volume_index = models.CharField(max_length=3, null=True)
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey('self', on_delete=models.PROTECT, null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ProcessorJob, self).save(*args, **kwargs)
def __str__(self):
return "ProcessorJob " + str(self.pk) + ": " + str(self.pipeline_applied)
class DownloaderJob(models.Model):
"""Records information about running a Downloader."""
class Meta:
db_table = "downloader_jobs"
# This field contains a string which corresponds to a valid
# Downloader Task. Valid values are enumerated in:
# data_refinery_common.job_lookup.Downloaders
downloader_task = models.CharField(max_length=256)
accession_code = models.CharField(max_length=256, blank=True, null=True)
no_retry = models.BooleanField(default=False)
original_files = models.ManyToManyField('OriginalFile', through='DownloaderJobOriginalFileAssociation')
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey('self', on_delete=models.PROTECT, null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(DownloaderJob, self).save(*args, **kwargs)
def __str__(self):
return "DownloaderJob " + str(self.pk) + ": " + str(self.downloader_task)
|
data-refinery/data_refinery
|
common/data_refinery_common/models/jobs.py
|
Python
|
bsd-3-clause
| 8,068
| 0.001363
|
import cpp11_decltype
a = cpp11_decltype.A()
a.i = 5
if a.i != 5:
raise RuntimeError, "Assignment to a.i failed."
a.j = 10
if a.j != 10:
raise RuntimeError, "Assignment to a.j failed."
b = a.foo(5)
if b != 10:
raise RuntimeError, "foo(5) should return 10."
b = a.foo(6)
if b != 0:
raise RuntimeError, "foo(6) should return 0."
|
DGA-MI-SSI/YaCo
|
deps/swig-3.0.7/Examples/test-suite/python/cpp11_decltype_runme.py
|
Python
|
gpl-3.0
| 347
| 0.011527
|
import numpy as np
import cudarray as ca
from .base import PickleMixin
_FLT_MIN = np.finfo(ca.float_).tiny
class Loss(PickleMixin):
# abll: I suspect that this interface is not ideal. It would be more
# elegant if Loss only provided loss() and grad(). However, where should
# we place the logic from fprop()?
@classmethod
def from_any(cls, arg):
if isinstance(arg, Loss):
return arg
elif isinstance(arg, str):
if arg == 'softmaxce':
return SoftmaxCrossEntropy()
elif arg == 'bce':
return BinaryCrossEntropy()
elif arg == 'mse':
return MeanSquaredError()
raise ValueError('Invalid constructor arguments: %s' % arg)
def _setup(self, x_shape):
pass
def fprop(self, x):
return x
def loss(self, target, x):
""" Returns the loss calculated from the target and the input. """
raise NotImplementedError()
def grad(self, target, x):
""" Returns the input gradient. """
raise NotImplementedError()
def y_shape(self, x_shape):
return x_shape
class SoftmaxCrossEntropy(Loss):
"""
Softmax + cross entropy (aka. multinomial logistic loss)
"""
def __init__(self):
self.name = 'softmaxce'
self._tmp_x = None
self._tmp_y = None
self._tmp_target = None
self._tmp_one_hot = None
self.n_classes = None
def _setup(self, x_shape):
self.n_classes = x_shape[1]
def _softmax(self, x):
# caching wrapper
if self._tmp_x is not x:
self._tmp_y = ca.nnet.softmax(x)
self._tmp_x = x
return self._tmp_y
def _one_hot(self, target):
# caching wrapper
if self._tmp_target is not target:
self._tmp_one_hot = ca.nnet.one_hot_encode(target, self.n_classes)
self._tmp_target = target
return self._tmp_one_hot
def fprop(self, x):
return ca.nnet.one_hot_decode(self._softmax(x))
def loss(self, target, x):
y = self._softmax(x)
target = self._one_hot(target)
return ca.nnet.categorical_cross_entropy(y_pred=y, y_true=target)
def grad(self, target, x):
y = self._softmax(x)
target = self._one_hot(target)
return -(target - y)
def y_shape(self, x_shape):
return (x_shape[0],)
class BinaryCrossEntropy(Loss):
def __init__(self):
self.name = 'bce'
def loss(self, y, y_pred):
y_pred = ca.maximum(y_pred, _FLT_MIN)
return -ca.mean(y*ca.log(y_pred) + (1 - y)*ca.log(1 - y_pred), axis=1)
def grad(self, y, y_pred):
y_pred = ca.maximum(y_pred, _FLT_MIN)
return -(y/y_pred - (1-y)/(1-y_pred))
class MeanSquaredError(Loss):
def __init__(self):
self.name = 'mse'
self.n_targets = None
def _setup(self, x_shape):
self.n_targets = x_shape[1]
def loss(self, y, y_pred):
return ca.mean((y-y_pred)**2, axis=1)
def grad(self, y, y_pred):
return 2.0 / self.n_targets * (y_pred - y)
|
lre/deeppy
|
deeppy/loss.py
|
Python
|
mit
| 3,134
| 0
|
from rest_framework import serializers
class BaseModelSerializer(serializers.ModelSerializer):
id = serializers.SerializerMethodField()
def get_id(self, instance):
return str(instance.id)
|
mnazim/django-rest-kickstart
|
helpers/serializers.py
|
Python
|
mit
| 206
| 0.004854
|
from website.addons.base.serializer import CitationsAddonSerializer
class MendeleySerializer(CitationsAddonSerializer):
addon_short_name = 'mendeley'
|
zachjanicki/osf.io
|
website/addons/mendeley/serializer.py
|
Python
|
apache-2.0
| 155
| 0.006452
|
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from collections import OrderedDict
import attr
import py
import six
import pytest
from .compat import _PY2 as PY2
from .pathlib import Path
from .pathlib import resolve_from_str
from .pathlib import rmtree
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
CACHEDIR_TAG_CONTENT = b"""\
Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag created by pytest.
# For information about cache directory tags, see:
# http://www.bford.info/cachedir/spec.html
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
rmtree(cachedir, force=True)
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_warning_captured
from _pytest.warning_types import PytestWarning
_issue_warning_captured(
PytestWarning(fmt.format(**args) if args else fmt),
self._config.hook,
stacklevel=3,
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
if path.parent.is_dir():
cache_dir_exists_already = True
else:
cache_dir_exists_already = self._cachedir.exists()
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
if not cache_dir_exists_already:
self._ensure_supporting_files()
def _ensure_supporting_files(self):
"""Create supporting files in the cache dir that are not really part of the cache."""
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
gitignore_path = self._cachedir.joinpath(".gitignore")
if not gitignore_path.is_file():
msg = u"# Created by pytest automatically.\n*"
gitignore_path.write_text(msg, encoding="UTF-8")
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
if not cachedir_tag_path.is_file():
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
cache_dir_default = ".pytest_cache"
if "TOX_ENV_DIR" in os.environ:
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
"""Display cachedir with --cache-show and if non-default."""
if config.option.verbose or config.getini("cache_dir") != ".pytest_cache":
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0
|
hackebrot/pytest
|
src/_pytest/cacheprovider.py
|
Python
|
mit
| 13,931
| 0.00079
|
import os, sys
import myfun
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import interpolate
import lagrangian_stats
import scipy.fftpack
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = 'm_25_2_512'
label = 'm_25_1_particles'
dayi = 481 #10*24*2
dayf = 581 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = './Velocity_CG/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
# dimensions archives
# ML exp
Xlist = np.linspace(0,2000,161)
Ylist = np.linspace(0,2000,161)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl)
xn = len(Xlist)
yn = len(Ylist)
zn = len(Zlist)
dx = np.diff(Xlist)
z = 1
for time in range(dayi,dayf,days):
print 'time:', time
tlabel = str(time)
while len(tlabel) < 3: tlabel = '0'+tlabel
#Velocity_CG_m_50_6e_9.csv
fileU = path+'Velocity_CG_0_'+label+'_'+str(time)+'.csv'
fileV = path+'Velocity_CG_1_'+label+'_'+str(time)+'.csv'
fileT = '../RST/Temperature_CG/Temperature_CG_'+label+'_'+str(time)+'.csv'
file1 = 'Divergence_'+label+'_'+str(time)
#
U = lagrangian_stats.read_Scalar(fileU,xn,yn,zn)
V = lagrangian_stats.read_Scalar(fileV,xn,yn,zn)
T = lagrangian_stats.read_Scalar(fileT,xn,yn,zn)
for k in range(0,len(Zlist),5):
dU = np.asarray(np.gradient(U[:,:,k]))
dV = np.asarray(np.gradient(V[:,:,k]))
Div = dU[0,:,:]/dx + dV[1,:,:]/dy
#
FT = np.zeros((xn/1,yn))
#
for j in range(len(Ylist)):
tempfft = scipy.fftpack.fft(Div[:,j]**2,xn)
FT[:,j] = abs(tempfft)**2
w = scipy.fftpack.fftfreq(xn, dx[1])
# w = scipy.fftpack.fftshift(w)
FTp = np.mean(FT,1)/xn
fig = plt.figure(figsize=(10,8))
p25, = plt.loglog(w[w>0], FTp[w>0],'r',linewidth=2)
plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+5/3.)],'k',linewidth=1.5)
plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+3.)],'k',linewidth=1.5)
plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+1.)],'k',linewidth=1.5)
plt.text(6*10**-2, 5*10**-( -3+5/3.), '-5/3',fontsize=18)
plt.text(6*10**-2, 5*10**-( -3+3.), '-3',fontsize=18)
plt.text(6*10**-2, 5*10**-( -3+1.), '-1',fontsize=18)
plt.text(10**-3, 10**2,str(time*360./3600)+'hr',fontsize=18)
plt.xlabel(r'k $[m^{-1}]$',fontsize=20)
plt.ylabel(r'PSD',fontsize=20)
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlim([1/2000.,1/10.])
plt.savefig('./plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps',bbox_inches='tight')
print './plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps'
plt.close()
#
v = np.linspace(0, 10, 10, endpoint=True)
vl = np.linspace(0, 10, 5, endpoint=True)
fig = plt.figure(figsize=(6,6))
fig.add_subplot(111,aspect='equal')
plt.contourf(Xlist/1000,Ylist/1000,T,v,extend='both',cmap=plt.cm.PiYG)
plt.colorbar(ticks=vl)
plt.title(str(np.round(10*(time*360./3600))/10.0)+'h')
plt.ylabel('Y [km]',fontsize=16)
plt.xlabel('X [km]',fontsize=16)
plt.savefig('./plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps',bbox_inches='tight')
print './plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps'
plt.close()
|
jungla/ICOM-fluidity-toolbox
|
2D/U/plot_Div_spec.py
|
Python
|
gpl-2.0
| 3,442
| 0.049099
|
# -*- coding: utf-8 -*-
__version__ = '0.7.0'
|
MathieuDuponchelle/django-sortedm2m
|
sortedm2m/__init__.py
|
Python
|
bsd-3-clause
| 48
| 0
|
# Copyright (C) 2011-2017 2ndQuadrant Limited
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains functions to retrieve information about xlog
files
"""
import collections
import os
import re
from tempfile import NamedTemporaryFile
from barman.exceptions import BadHistoryFileContents, BadXlogSegmentName
# xlog file segment name parser (regular expression)
_xlog_re = re.compile(r'''
^
([\dA-Fa-f]{8}) # everything has a timeline
(?:
([\dA-Fa-f]{8})([\dA-Fa-f]{8}) # segment name, if a wal file
(?: # and optional
\.[\dA-Fa-f]{8}\.backup # offset, if a backup label
|
\.partial # partial, if a partial file
)?
|
\.history # or only .history, if a history file
)
$
''', re.VERBOSE)
# xlog location parser for concurrent backup (regular expression)
_location_re = re.compile(r'^([\dA-F]+)/([\dA-F]+)$')
# Taken from xlog_internal.h from PostgreSQL sources
#: XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2
#: and larger than XLOG_BLCKSZ (preferably, a great deal larger than
#: XLOG_BLCKSZ).
DEFAULT_XLOG_SEG_SIZE = 1 << 24
#: This namedtuple is a container for the information
#: contained inside history files
HistoryFileData = collections.namedtuple(
'HistoryFileData',
'tli parent_tli switchpoint reason')
def is_any_xlog_file(path):
"""
Return True if the xlog is either a WAL segment, a .backup file
or a .history file, False otherwise.
It supports either a full file path or a simple file name.
:param str path: the file name to test
:rtype: bool
"""
match = _xlog_re.match(os.path.basename(path))
if match:
return True
return False
def is_history_file(path):
"""
Return True if the xlog is a .history file, False otherwise
It supports either a full file path or a simple file name.
:param str path: the file name to test
:rtype: bool
"""
match = _xlog_re.search(os.path.basename(path))
if match and match.group(0).endswith('.history'):
return True
return False
def is_backup_file(path):
"""
Return True if the xlog is a .backup file, False otherwise
It supports either a full file path or a simple file name.
:param str path: the file name to test
:rtype: bool
"""
match = _xlog_re.search(os.path.basename(path))
if match and match.group(0).endswith('.backup'):
return True
return False
def is_partial_file(path):
"""
Return True if the xlog is a .partial file, False otherwise
It supports either a full file path or a simple file name.
:param str path: the file name to test
:rtype: bool
"""
match = _xlog_re.search(os.path.basename(path))
if match and match.group(0).endswith('.partial'):
return True
return False
def is_wal_file(path):
"""
Return True if the xlog is a regular xlog file, False otherwise
It supports either a full file path or a simple file name.
:param str path: the file name to test
:rtype: bool
"""
match = _xlog_re.search(os.path.basename(path))
if (match and
not match.group(0).endswith('.backup') and
not match.group(0).endswith('.history') and
not match.group(0).endswith('.partial')):
return True
return False
def decode_segment_name(path):
"""
Retrieve the timeline, log ID and segment ID
from the name of a xlog segment
It can handle either a full file path or a simple file name.
:param str path: the file name to decode
:rtype: list[int]
"""
name = os.path.basename(path)
match = _xlog_re.match(name)
if not match:
raise BadXlogSegmentName(name)
return [int(x, 16) if x else None for x in match.groups()]
def encode_segment_name(tli, log, seg):
"""
Build the xlog segment name based on timeline, log ID and segment ID
:param int tli: timeline number
:param int log: log number
:param int seg: segment number
:return str: segment file name
"""
return "%08X%08X%08X" % (tli, log, seg)
def encode_history_file_name(tli):
"""
Build the history file name based on timeline
:return str: history file name
"""
return "%08X.history" % (tli,)
def xlog_segments_per_file(xlog_segment_size):
"""
Given that WAL files are named using the following pattern:
<timeline_number><xlog_file_number><xlog_segment_number>
this is the number of XLOG segments in an XLOG file. By XLOG file
we don't mean an actual file on the filesystem, but the definition
used in the PostgreSQL sources: meaning a set of files containing the
same file number.
:param int xlog_segment_size: The XLOG segment size in bytes
:return int: The number of segments in an XLOG file
"""
return 0xffffffff // xlog_segment_size
def xlog_file_size(xlog_segment_size):
"""
Given that WAL files are named using the following pattern:
<timeline_number><xlog_file_number><xlog_segment_number>
this is the size in bytes of an XLOG file, which is composed on many
segments. See the documentation of `xlog_segments_per_file` for a
commentary on the definition of `XLOG` file.
:param int xlog_segment_size: The XLOG segment size in bytes
:return int: The size of an XLOG file
"""
return xlog_segment_size * xlog_segments_per_file(xlog_segment_size)
def generate_segment_names(begin, end=None, version=None,
xlog_segment_size=None):
"""
Generate a sequence of XLOG segments starting from ``begin``
If an ``end`` segment is provided the sequence will terminate after
returning it, otherwise the sequence will never terminate.
If the XLOG segment size is known, this generator is precise,
switching to the next file when required.
It the XLOG segment size is unknown, this generator will generate
all the possible XLOG file names.
The size of an XLOG segment can be every power of 2 between
the XLOG block size (8Kib) and the size of a log segment (4Gib)
:param str begin: begin segment name
:param str|None end: optional end segment name
:param int|None version: optional postgres version as an integer
(e.g. 90301 for 9.3.1)
:param int xlog_segment_size: the size of a XLOG segment
:rtype: collections.Iterable[str]
:raise: BadXlogSegmentName
"""
begin_tli, begin_log, begin_seg = decode_segment_name(begin)
end_tli, end_log, end_seg = None, None, None
if end:
end_tli, end_log, end_seg = decode_segment_name(end)
# this method doesn't support timeline changes
assert begin_tli == end_tli, (
"Begin segment (%s) and end segment (%s) "
"must have the same timeline part" % (begin, end))
# If version is less than 9.3 the last segment must be skipped
skip_last_segment = version is not None and version < 90300
# This is the number of XLOG segments in an XLOG file. By XLOG file
# we don't mean an actual file on the filesystem, but the definition
# used in the PostgreSQL sources: a set of files containing the
# same file number.
if xlog_segment_size:
# The generator is operating is precise and correct mode:
# knowing exactly when a switch to the next file is required
xlog_seg_per_file = xlog_segments_per_file(xlog_segment_size)
else:
# The generator is operating only in precise mode: generating every
# possible XLOG file name.
xlog_seg_per_file = 0x7ffff
# Start from the first xlog and generate the segments sequentially
# If ``end`` has been provided, the while condition ensure the termination
# otherwise this generator will never stop
cur_log, cur_seg = begin_log, begin_seg
while end is None or \
cur_log < end_log or \
(cur_log == end_log and cur_seg <= end_seg):
yield encode_segment_name(begin_tli, cur_log, cur_seg)
cur_seg += 1
if cur_seg > xlog_seg_per_file or (
skip_last_segment and cur_seg == xlog_seg_per_file):
cur_seg = 0
cur_log += 1
def hash_dir(path):
"""
Get the directory where the xlog segment will be stored
It can handle either a full file path or a simple file name.
:param str|unicode path: xlog file name
:return str: directory name
"""
tli, log, _ = decode_segment_name(path)
# tli is always not None
if log is not None:
return "%08X%08X" % (tli, log)
else:
return ''
def parse_lsn(lsn_string):
"""
Transform a string XLOG location, formatted as %X/%X, in the corresponding
numeric representation
:param str lsn_string: the string XLOG location, i.e. '2/82000168'
:rtype: int
"""
lsn_list = lsn_string.split('/')
if len(lsn_list) != 2:
raise ValueError('Invalid LSN: %s', lsn_string)
return (int(lsn_list[0], 16) << 32) + int(lsn_list[1], 16)
def diff_lsn(lsn_string1, lsn_string2):
"""
Calculate the difference in bytes between two string XLOG location,
formatted as %X/%X
Tis function is a Python implementation of
the ``pg_xlog_location_diff(str, str)`` PostgreSQL function.
:param str lsn_string1: the string XLOG location, i.e. '2/82000168'
:param str lsn_string2: the string XLOG location, i.e. '2/82000168'
:rtype: int
"""
# If one the input is None returns None
if lsn_string1 is None or lsn_string2 is None:
return None
return parse_lsn(lsn_string1) - parse_lsn(lsn_string2)
def format_lsn(lsn):
"""
Transform a numeric XLOG location, in the corresponding %X/%X string
representation
:param int lsn: numeric XLOG location
:rtype: str
"""
return "%X/%X" % (lsn >> 32, lsn & 0xFFFFFFFF)
def location_to_xlogfile_name_offset(location, timeline, xlog_segment_size):
"""
Convert transaction log location string to file_name and file_offset
This is a reimplementation of pg_xlogfile_name_offset PostgreSQL function
This method returns a dictionary containing the following data:
* file_name
* file_offset
:param str location: XLOG location
:param int timeline: timeline
:param int xlog_segment_size: the size of a XLOG segment
:rtype: dict
"""
lsn = parse_lsn(location)
log = lsn >> 32
seg = (lsn & xlog_file_size(xlog_segment_size)) >> 24
offset = lsn & 0xFFFFFF
return {
'file_name': encode_segment_name(timeline, log, seg),
'file_offset': offset,
}
def location_from_xlogfile_name_offset(file_name, file_offset):
"""
Convert file_name and file_offset to a transaction log location.
This is the inverted function of PostgreSQL's pg_xlogfile_name_offset
function.
:param str file_name: a WAL file name
:param int file_offset: a numeric offset
:rtype: str
"""
decoded_segment = decode_segment_name(file_name)
location = ((decoded_segment[1] << 32) +
(decoded_segment[2] << 24) +
file_offset)
return format_lsn(location)
def decode_history_file(wal_info, comp_manager):
"""
Read an history file and parse its contents.
Each line in the file represents a timeline switch, each field is
separated by tab, empty lines are ignored and lines starting with '#'
are comments.
Each line is composed by three fields: parentTLI, switchpoint and reason.
"parentTLI" is the ID of the parent timeline.
"switchpoint" is the WAL position where the switch happened
"reason" is an human-readable explanation of why the timeline was changed
The method requires a CompressionManager object to handle the eventual
compression of the history file.
:param barman.infofile.WalFileInfo wal_info: history file obj
:param comp_manager: compression manager used in case
of history file compression
:return List[HistoryFileData]: information from the history file
"""
path = wal_info.orig_filename
# Decompress the file if needed
if wal_info.compression:
# Use a NamedTemporaryFile to avoid explicit cleanup
uncompressed_file = NamedTemporaryFile(
dir=os.path.dirname(path),
prefix='.%s.' % wal_info.name,
suffix='.uncompressed')
path = uncompressed_file.name
comp_manager.get_compressor(wal_info.compression).decompress(
wal_info.orig_filename, path)
# Extract the timeline from history file name
tli, _, _ = decode_segment_name(wal_info.name)
lines = []
with open(path) as fp:
for line in fp:
line = line.strip()
# Skip comments and empty lines
if line.startswith("#"):
continue
# Skip comments and empty lines
if len(line) == 0:
continue
# Use tab as separator
contents = line.split('\t')
if len(contents) != 3:
# Invalid content of the line
raise BadHistoryFileContents(path)
history = HistoryFileData(
tli=tli,
parent_tli=int(contents[0]),
switchpoint=parse_lsn(contents[1]),
reason=contents[2])
lines.append(history)
# Empty history file or containing invalid content
if len(lines) == 0:
raise BadHistoryFileContents(path)
else:
return lines
|
infoxchange/barman
|
barman/xlog.py
|
Python
|
gpl-3.0
| 14,318
| 0
|
#
#
# Copyright (C) 2007, 2008, 2010, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""HTTP module.
"""
import logging
import mimetools
import OpenSSL
import select
import socket
import errno
from cStringIO import StringIO
from ganeti import constants
from ganeti import utils
HTTP_GANETI_VERSION = "Ganeti %s" % constants.RELEASE_VERSION
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_NOT_MODIFIED = 304
HTTP_0_9 = "HTTP/0.9"
HTTP_1_0 = "HTTP/1.0"
HTTP_1_1 = "HTTP/1.1"
HTTP_GET = "GET"
HTTP_HEAD = "HEAD"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_DELETE = "DELETE"
HTTP_ETAG = "ETag"
HTTP_HOST = "Host"
HTTP_SERVER = "Server"
HTTP_DATE = "Date"
HTTP_USER_AGENT = "User-Agent"
HTTP_CONTENT_TYPE = "Content-Type"
HTTP_CONTENT_LENGTH = "Content-Length"
HTTP_CONNECTION = "Connection"
HTTP_KEEP_ALIVE = "Keep-Alive"
HTTP_WWW_AUTHENTICATE = "WWW-Authenticate"
HTTP_AUTHORIZATION = "Authorization"
HTTP_AUTHENTICATION_INFO = "Authentication-Info"
HTTP_ALLOW = "Allow"
HTTP_APP_OCTET_STREAM = "application/octet-stream"
HTTP_APP_JSON = "application/json"
_SSL_UNEXPECTED_EOF = "Unexpected EOF"
# Socket operations
(SOCKOP_SEND,
SOCKOP_RECV,
SOCKOP_SHUTDOWN,
SOCKOP_HANDSHAKE) = range(4)
# send/receive quantum
SOCK_BUF_SIZE = 32768
class HttpError(Exception):
"""Internal exception for HTTP errors.
This should only be used for internal error reporting.
"""
class HttpConnectionClosed(Exception):
"""Internal exception for a closed connection.
This should only be used for internal error reporting. Only use
it if there's no other way to report this condition.
"""
class HttpSessionHandshakeUnexpectedEOF(HttpError):
"""Internal exception for errors during SSL handshake.
This should only be used for internal error reporting.
"""
class HttpSocketTimeout(Exception):
"""Internal exception for socket timeouts.
This should only be used for internal error reporting.
"""
class HttpException(Exception):
code = None
message = None
def __init__(self, message=None, headers=None):
Exception.__init__(self)
self.message = message
self.headers = headers
class HttpBadRequest(HttpException):
"""400 Bad Request
RFC2616, 10.4.1: The request could not be understood by the server
due to malformed syntax. The client SHOULD NOT repeat the request
without modifications.
"""
code = 400
class HttpUnauthorized(HttpException):
"""401 Unauthorized
RFC2616, section 10.4.2: The request requires user
authentication. The response MUST include a WWW-Authenticate header
field (section 14.47) containing a challenge applicable to the
requested resource.
"""
code = 401
class HttpForbidden(HttpException):
"""403 Forbidden
RFC2616, 10.4.4: The server understood the request, but is refusing
to fulfill it. Authorization will not help and the request SHOULD
NOT be repeated.
"""
code = 403
class HttpNotFound(HttpException):
"""404 Not Found
RFC2616, 10.4.5: The server has not found anything matching the
Request-URI. No indication is given of whether the condition is
temporary or permanent.
"""
code = 404
class HttpMethodNotAllowed(HttpException):
"""405 Method Not Allowed
RFC2616, 10.4.6: The method specified in the Request-Line is not
allowed for the resource identified by the Request-URI. The response
MUST include an Allow header containing a list of valid methods for
the requested resource.
"""
code = 405
class HttpNotAcceptable(HttpException):
"""406 Not Acceptable
RFC2616, 10.4.7: The resource identified by the request is only capable of
generating response entities which have content characteristics not
acceptable according to the accept headers sent in the request.
"""
code = 406
class HttpRequestTimeout(HttpException):
"""408 Request Timeout
RFC2616, 10.4.9: The client did not produce a request within the
time that the server was prepared to wait. The client MAY repeat the
request without modifications at any later time.
"""
code = 408
class HttpConflict(HttpException):
"""409 Conflict
RFC2616, 10.4.10: The request could not be completed due to a
conflict with the current state of the resource. This code is only
allowed in situations where it is expected that the user might be
able to resolve the conflict and resubmit the request.
"""
code = 409
class HttpGone(HttpException):
"""410 Gone
RFC2616, 10.4.11: The requested resource is no longer available at
the server and no forwarding address is known. This condition is
expected to be considered permanent.
"""
code = 410
class HttpLengthRequired(HttpException):
"""411 Length Required
RFC2616, 10.4.12: The server refuses to accept the request without a
defined Content-Length. The client MAY repeat the request if it adds
a valid Content-Length header field containing the length of the
message-body in the request message.
"""
code = 411
class HttpPreconditionFailed(HttpException):
"""412 Precondition Failed
RFC2616, 10.4.13: The precondition given in one or more of the
request-header fields evaluated to false when it was tested on the
server.
"""
code = 412
class HttpUnsupportedMediaType(HttpException):
"""415 Unsupported Media Type
RFC2616, 10.4.16: The server is refusing to service the request because the
entity of the request is in a format not supported by the requested resource
for the requested method.
"""
code = 415
class HttpInternalServerError(HttpException):
"""500 Internal Server Error
RFC2616, 10.5.1: The server encountered an unexpected condition
which prevented it from fulfilling the request.
"""
code = 500
class HttpNotImplemented(HttpException):
"""501 Not Implemented
RFC2616, 10.5.2: The server does not support the functionality
required to fulfill the request.
"""
code = 501
class HttpBadGateway(HttpException):
"""502 Bad Gateway
RFC2616, 10.5.3: The server, while acting as a gateway or proxy,
received an invalid response from the upstream server it accessed in
attempting to fulfill the request.
"""
code = 502
class HttpServiceUnavailable(HttpException):
"""503 Service Unavailable
RFC2616, 10.5.4: The server is currently unable to handle the
request due to a temporary overloading or maintenance of the server.
"""
code = 503
class HttpGatewayTimeout(HttpException):
"""504 Gateway Timeout
RFC2616, 10.5.5: The server, while acting as a gateway or proxy, did
not receive a timely response from the upstream server specified by
the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server
(e.g. DNS) it needed to access in attempting to complete the
request.
"""
code = 504
class HttpVersionNotSupported(HttpException):
"""505 HTTP Version Not Supported
RFC2616, 10.5.6: The server does not support, or refuses to support,
the HTTP protocol version that was used in the request message.
"""
code = 505
def ParseHeaders(buf):
"""Parses HTTP headers.
@note: This is just a trivial wrapper around C{mimetools.Message}
"""
return mimetools.Message(buf, 0)
def SocketOperation(sock, op, arg1, timeout):
"""Wrapper around socket functions.
This function abstracts error handling for socket operations, especially
for the complicated interaction with OpenSSL.
@type sock: socket
@param sock: Socket for the operation
@type op: int
@param op: Operation to execute (SOCKOP_* constants)
@type arg1: any
@param arg1: Parameter for function (if needed)
@type timeout: None or float
@param timeout: Timeout in seconds or None
@return: Return value of socket function
"""
# TODO: event_poll/event_check/override
if op in (SOCKOP_SEND, SOCKOP_HANDSHAKE):
event_poll = select.POLLOUT
elif op == SOCKOP_RECV:
event_poll = select.POLLIN
elif op == SOCKOP_SHUTDOWN:
event_poll = None
# The timeout is only used when OpenSSL requests polling for a condition.
# It is not advisable to have no timeout for shutdown.
assert timeout
else:
raise AssertionError("Invalid socket operation")
# Handshake is only supported by SSL sockets
if (op == SOCKOP_HANDSHAKE and
not isinstance(sock, OpenSSL.SSL.ConnectionType)):
return
# No override by default
event_override = 0
while True:
# Poll only for certain operations and when asked for by an override
if event_override or op in (SOCKOP_SEND, SOCKOP_RECV, SOCKOP_HANDSHAKE):
if event_override:
wait_for_event = event_override
else:
wait_for_event = event_poll
event = utils.WaitForFdCondition(sock, wait_for_event, timeout)
if event is None:
raise HttpSocketTimeout()
if event & (select.POLLNVAL | select.POLLHUP | select.POLLERR):
# Let the socket functions handle these
break
if not event & wait_for_event:
continue
# Reset override
event_override = 0
try:
try:
if op == SOCKOP_SEND:
return sock.send(arg1)
elif op == SOCKOP_RECV:
return sock.recv(arg1)
elif op == SOCKOP_SHUTDOWN:
if isinstance(sock, OpenSSL.SSL.ConnectionType):
# PyOpenSSL's shutdown() doesn't take arguments
return sock.shutdown()
else:
return sock.shutdown(arg1)
elif op == SOCKOP_HANDSHAKE:
return sock.do_handshake()
except OpenSSL.SSL.WantWriteError:
# OpenSSL wants to write, poll for POLLOUT
event_override = select.POLLOUT
continue
except OpenSSL.SSL.WantReadError:
# OpenSSL wants to read, poll for POLLIN
event_override = select.POLLIN | select.POLLPRI
continue
except OpenSSL.SSL.WantX509LookupError:
continue
except OpenSSL.SSL.ZeroReturnError, err:
# SSL Connection has been closed. In SSL 3.0 and TLS 1.0, this only
# occurs if a closure alert has occurred in the protocol, i.e. the
# connection has been closed cleanly. Note that this does not
# necessarily mean that the transport layer (e.g. a socket) has been
# closed.
if op == SOCKOP_SEND:
# Can happen during a renegotiation
raise HttpConnectionClosed(err.args)
elif op == SOCKOP_RECV:
return ""
# SSL_shutdown shouldn't return SSL_ERROR_ZERO_RETURN
raise socket.error(err.args)
except OpenSSL.SSL.SysCallError, err:
if op == SOCKOP_SEND:
# arg1 is the data when writing
if err.args and err.args[0] == -1 and arg1 == "":
# errors when writing empty strings are expected
# and can be ignored
return 0
if err.args == (-1, _SSL_UNEXPECTED_EOF):
if op == SOCKOP_RECV:
return ""
elif op == SOCKOP_HANDSHAKE:
# Can happen if peer disconnects directly after the connection is
# opened.
raise HttpSessionHandshakeUnexpectedEOF(err.args)
raise socket.error(err.args)
except OpenSSL.SSL.Error, err:
raise socket.error(err.args)
except socket.error, err:
if err.args and err.args[0] == errno.EAGAIN:
# Ignore EAGAIN
continue
raise
def ShutdownConnection(sock, close_timeout, write_timeout, msgreader, force):
"""Closes the connection.
@type sock: socket
@param sock: Socket to be shut down
@type close_timeout: float
@param close_timeout: How long to wait for the peer to close
the connection
@type write_timeout: float
@param write_timeout: Write timeout for shutdown
@type msgreader: http.HttpMessageReader
@param msgreader: Request message reader, used to determine whether
peer should close connection
@type force: bool
@param force: Whether to forcibly close the connection without
waiting for peer
"""
#print msgreader.peer_will_close, force
if msgreader and msgreader.peer_will_close and not force:
# Wait for peer to close
try:
# Check whether it's actually closed
if not SocketOperation(sock, SOCKOP_RECV, 1, close_timeout):
return
except (socket.error, HttpError, HttpSocketTimeout):
# Ignore errors at this stage
pass
# Close the connection from our side
try:
# We don't care about the return value, see NOTES in SSL_shutdown(3).
SocketOperation(sock, SOCKOP_SHUTDOWN, socket.SHUT_RDWR,
write_timeout)
except HttpSocketTimeout:
raise HttpError("Timeout while shutting down connection")
except socket.error, err:
# Ignore ENOTCONN
if not (err.args and err.args[0] == errno.ENOTCONN):
raise HttpError("Error while shutting down connection: %s" % err)
def Handshake(sock, write_timeout):
"""Shakes peer's hands.
@type sock: socket
@param sock: Socket to be shut down
@type write_timeout: float
@param write_timeout: Write timeout for handshake
"""
try:
return SocketOperation(sock, SOCKOP_HANDSHAKE, None, write_timeout)
except HttpSocketTimeout:
raise HttpError("Timeout during SSL handshake")
except socket.error, err:
raise HttpError("Error in SSL handshake: %s" % err)
class HttpSslParams(object):
"""Data class for SSL key and certificate.
"""
def __init__(self, ssl_key_path, ssl_cert_path):
"""Initializes this class.
@type ssl_key_path: string
@param ssl_key_path: Path to file containing SSL key in PEM format
@type ssl_cert_path: string
@param ssl_cert_path: Path to file containing SSL certificate
in PEM format
"""
self.ssl_key_pem = utils.ReadFile(ssl_key_path)
self.ssl_cert_pem = utils.ReadFile(ssl_cert_path)
self.ssl_cert_path = ssl_cert_path
def GetKey(self):
return OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.ssl_key_pem)
def GetCertificate(self):
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
self.ssl_cert_pem)
class HttpBase(object):
"""Base class for HTTP server and client.
"""
def __init__(self):
self.using_ssl = None
self._ssl_params = None
self._ssl_key = None
self._ssl_cert = None
def _CreateSocket(self, ssl_params, ssl_verify_peer, family,
ssl_verify_callback):
"""Creates a TCP socket and initializes SSL if needed.
@type ssl_params: HttpSslParams
@param ssl_params: SSL key and certificate
@type ssl_verify_peer: bool
@param ssl_verify_peer: Whether to require client certificate
and compare it with our certificate
@type family: int
@param family: socket.AF_INET | socket.AF_INET6
"""
assert family in (socket.AF_INET, socket.AF_INET6)
if ssl_verify_peer:
assert ssl_verify_callback is not None
self._ssl_params = ssl_params
sock = socket.socket(family, socket.SOCK_STREAM)
# Should we enable SSL?
self.using_ssl = ssl_params is not None
if not self.using_ssl:
return sock
self._ssl_key = ssl_params.GetKey()
self._ssl_cert = ssl_params.GetCertificate()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2)
ciphers = self.GetSslCiphers()
logging.debug("Setting SSL cipher string %s", ciphers)
ctx.set_cipher_list(ciphers)
ctx.use_privatekey(self._ssl_key)
ctx.use_certificate(self._ssl_cert)
ctx.check_privatekey()
if ssl_verify_peer:
ctx.set_verify(OpenSSL.SSL.VERIFY_PEER |
OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
ssl_verify_callback)
# Also add our certificate as a trusted CA to be sent to the client.
# This is required at least for GnuTLS clients to work.
try:
# This will fail for PyOpenssl versions before 0.10
ctx.add_client_ca(self._ssl_cert)
except AttributeError:
# Fall back to letting OpenSSL read the certificate file directly.
ctx.load_client_ca(ssl_params.ssl_cert_path)
return OpenSSL.SSL.Connection(ctx, sock)
def GetSslCiphers(self): # pylint: disable=R0201
"""Returns the ciphers string for SSL.
"""
return constants.OPENSSL_CIPHERS
def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):
"""Verify the certificate provided by the peer
We only compare fingerprints. The client must use the same certificate as
we do on our side.
"""
# some parameters are unused, but this is the API
# pylint: disable=W0613
assert self._ssl_params, "SSL not initialized"
return (self._ssl_cert.digest("sha1") == cert.digest("sha1") and
self._ssl_cert.digest("md5") == cert.digest("md5"))
class HttpMessage(object):
"""Data structure for HTTP message.
"""
def __init__(self):
self.start_line = None
self.headers = None
self.body = None
class HttpClientToServerStartLine(object):
"""Data structure for HTTP request start line.
"""
def __init__(self, method, path, version):
self.method = method
self.path = path
self.version = version
def __str__(self):
return "%s %s %s" % (self.method, self.path, self.version)
class HttpServerToClientStartLine(object):
"""Data structure for HTTP response start line.
"""
def __init__(self, version, code, reason):
self.version = version
self.code = code
self.reason = reason
def __str__(self):
return "%s %s %s" % (self.version, self.code, self.reason)
class HttpMessageWriter(object):
"""Writes an HTTP message to a socket.
"""
def __init__(self, sock, msg, write_timeout):
"""Initializes this class and writes an HTTP message to a socket.
@type sock: socket
@param sock: Socket to be written to
@type msg: http.HttpMessage
@param msg: HTTP message to be written
@type write_timeout: float
@param write_timeout: Write timeout for socket
"""
self._msg = msg
self._PrepareMessage()
buf = self._FormatMessage()
pos = 0
end = len(buf)
while pos < end:
# Send only SOCK_BUF_SIZE bytes at a time
data = buf[pos:(pos + SOCK_BUF_SIZE)]
sent = SocketOperation(sock, SOCKOP_SEND, data, write_timeout)
# Remove sent bytes
pos += sent
assert pos == end, "Message wasn't sent completely"
def _PrepareMessage(self):
"""Prepares the HTTP message by setting mandatory headers.
"""
# RFC2616, section 4.3: "The presence of a message-body in a request is
# signaled by the inclusion of a Content-Length or Transfer-Encoding header
# field in the request's message-headers."
if self._msg.body:
self._msg.headers[HTTP_CONTENT_LENGTH] = len(self._msg.body)
def _FormatMessage(self):
"""Serializes the HTTP message into a string.
"""
buf = StringIO()
# Add start line
buf.write(str(self._msg.start_line))
buf.write("\r\n")
# Add headers
if self._msg.start_line.version != HTTP_0_9:
for name, value in self._msg.headers.iteritems():
buf.write("%s: %s\r\n" % (name, value))
buf.write("\r\n")
# Add message body if needed
if self.HasMessageBody():
buf.write(self._msg.body)
elif self._msg.body:
logging.warning("Ignoring message body")
return buf.getvalue()
def HasMessageBody(self):
"""Checks whether the HTTP message contains a body.
Can be overridden by subclasses.
"""
return bool(self._msg.body)
class HttpMessageReader(object):
"""Reads HTTP message from socket.
"""
# Length limits
START_LINE_LENGTH_MAX = None
HEADER_LENGTH_MAX = None
# Parser state machine
PS_START_LINE = "start-line"
PS_HEADERS = "headers"
PS_BODY = "entity-body"
PS_COMPLETE = "complete"
def __init__(self, sock, msg, read_timeout):
"""Reads an HTTP message from a socket.
@type sock: socket
@param sock: Socket to be read from
@type msg: http.HttpMessage
@param msg: Object for the read message
@type read_timeout: float
@param read_timeout: Read timeout for socket
"""
self.sock = sock
self.msg = msg
self.start_line_buffer = None
self.header_buffer = StringIO()
self.body_buffer = StringIO()
self.parser_status = self.PS_START_LINE
self.content_length = None
self.peer_will_close = None
buf = ""
eof = False
while self.parser_status != self.PS_COMPLETE:
# TODO: Don't read more than necessary (Content-Length), otherwise
# data might be lost and/or an error could occur
data = SocketOperation(sock, SOCKOP_RECV, SOCK_BUF_SIZE, read_timeout)
if data:
buf += data
else:
eof = True
# Do some parsing and error checking while more data arrives
buf = self._ContinueParsing(buf, eof)
# Must be done only after the buffer has been evaluated
# TODO: Content-Length < len(data read) and connection closed
if (eof and
self.parser_status in (self.PS_START_LINE,
self.PS_HEADERS)):
raise HttpError("Connection closed prematurely")
# Parse rest
buf = self._ContinueParsing(buf, True)
assert self.parser_status == self.PS_COMPLETE
assert not buf, "Parser didn't read full response"
# Body is complete
msg.body = self.body_buffer.getvalue()
def _ContinueParsing(self, buf, eof):
"""Main function for HTTP message state machine.
@type buf: string
@param buf: Receive buffer
@type eof: bool
@param eof: Whether we've reached EOF on the socket
@rtype: string
@return: Updated receive buffer
"""
# TODO: Use offset instead of slicing when possible
if self.parser_status == self.PS_START_LINE:
# Expect start line
while True:
idx = buf.find("\r\n")
# RFC2616, section 4.1: "In the interest of robustness, servers SHOULD
# ignore any empty line(s) received where a Request-Line is expected.
# In other words, if the server is reading the protocol stream at the
# beginning of a message and receives a CRLF first, it should ignore
# the CRLF."
if idx == 0:
# TODO: Limit number of CRLFs/empty lines for safety?
buf = buf[2:]
continue
if idx > 0:
self.start_line_buffer = buf[:idx]
self._CheckStartLineLength(len(self.start_line_buffer))
# Remove status line, including CRLF
buf = buf[idx + 2:]
self.msg.start_line = self.ParseStartLine(self.start_line_buffer)
self.parser_status = self.PS_HEADERS
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckStartLineLength(len(buf))
break
# TODO: Handle messages without headers
if self.parser_status == self.PS_HEADERS:
# Wait for header end
idx = buf.find("\r\n\r\n")
if idx >= 0:
self.header_buffer.write(buf[:idx + 2])
self._CheckHeaderLength(self.header_buffer.tell())
# Remove headers, including CRLF
buf = buf[idx + 4:]
self._ParseHeaders()
self.parser_status = self.PS_BODY
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckHeaderLength(len(buf))
if self.parser_status == self.PS_BODY:
# TODO: Implement max size for body_buffer
self.body_buffer.write(buf)
buf = ""
# Check whether we've read everything
#
# RFC2616, section 4.4: "When a message-body is included with a message,
# the transfer-length of that body is determined by one of the following
# [...] 5. By the server closing the connection. (Closing the connection
# cannot be used to indicate the end of a request body, since that would
# leave no possibility for the server to send back a response.)"
#
# TODO: Error when buffer length > Content-Length header
if (eof or
self.content_length is None or
(self.content_length is not None and
self.body_buffer.tell() >= self.content_length)):
self.parser_status = self.PS_COMPLETE
return buf
def _CheckStartLineLength(self, length):
"""Limits the start line buffer size.
@type length: int
@param length: Buffer size
"""
if (self.START_LINE_LENGTH_MAX is not None and
length > self.START_LINE_LENGTH_MAX):
raise HttpError("Start line longer than %d chars" %
self.START_LINE_LENGTH_MAX)
def _CheckHeaderLength(self, length):
"""Limits the header buffer size.
@type length: int
@param length: Buffer size
"""
if (self.HEADER_LENGTH_MAX is not None and
length > self.HEADER_LENGTH_MAX):
raise HttpError("Headers longer than %d chars" % self.HEADER_LENGTH_MAX)
def ParseStartLine(self, start_line):
"""Parses the start line of a message.
Must be overridden by subclass.
@type start_line: string
@param start_line: Start line string
"""
raise NotImplementedError()
def _WillPeerCloseConnection(self):
"""Evaluate whether peer will close the connection.
@rtype: bool
@return: Whether peer will close the connection
"""
# RFC2616, section 14.10: "HTTP/1.1 defines the "close" connection option
# for the sender to signal that the connection will be closed after
# completion of the response. For example,
#
# Connection: close
#
# in either the request or the response header fields indicates that the
# connection SHOULD NOT be considered `persistent' (section 8.1) after the
# current request/response is complete."
hdr_connection = self.msg.headers.get(HTTP_CONNECTION, None)
if hdr_connection:
hdr_connection = hdr_connection.lower()
# An HTTP/1.1 server is assumed to stay open unless explicitly closed.
if self.msg.start_line.version == HTTP_1_1:
return (hdr_connection and "close" in hdr_connection)
# Some HTTP/1.0 implementations have support for persistent connections,
# using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.headers.get(HTTP_KEEP_ALIVE):
return False
# At least Akamai returns a "Connection: Keep-Alive" header, which was
# supposed to be sent by the client.
if hdr_connection and "keep-alive" in hdr_connection:
return False
return True
def _ParseHeaders(self):
"""Parses the headers.
This function also adjusts internal variables based on header values.
RFC2616, section 4.3: The presence of a message-body in a request is
signaled by the inclusion of a Content-Length or Transfer-Encoding header
field in the request's message-headers.
"""
# Parse headers
self.header_buffer.seek(0, 0)
self.msg.headers = ParseHeaders(self.header_buffer)
self.peer_will_close = self._WillPeerCloseConnection()
# Do we have a Content-Length header?
hdr_content_length = self.msg.headers.get(HTTP_CONTENT_LENGTH, None)
if hdr_content_length:
try:
self.content_length = int(hdr_content_length)
except (TypeError, ValueError):
self.content_length = None
if self.content_length is not None and self.content_length < 0:
self.content_length = None
# if the connection remains open and a content-length was not provided,
# then assume that the connection WILL close.
if self.content_length is None:
self.peer_will_close = True
|
apyrgio/ganeti
|
lib/http/__init__.py
|
Python
|
bsd-2-clause
| 28,992
| 0.007243
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, class kit (GUICG22)
import GemRB
from GUIDefines import *
from ie_stats import *
import CharGenCommon
import GUICommon
import CommonTables
KitWindow = 0
TextAreaControl = 0
DoneButton = 0
SchoolList = 0
ClassID = 0
def OnLoad():
global KitWindow, TextAreaControl, DoneButton
global SchoolList, ClassID
if GUICommon.CloseOtherWindow(OnLoad):
if(KitWindow):
KitWindow.Unload()
KitWindow = None
return
GemRB.LoadWindowPack("GUICG", 640, 480)
RaceName = CommonTables.Races.GetRowName(GemRB.GetVar("Race")-1 )
Class = GemRB.GetVar("Class")-1
ClassName = CommonTables.Classes.GetRowName(Class)
ClassID = CommonTables.Classes.GetValue(Class, 5)
KitTable = GemRB.LoadTable("kittable")
KitTableName = KitTable.GetValue(ClassName, RaceName)
KitTable = GemRB.LoadTable(KitTableName,1)
SchoolList = GemRB.LoadTable("magesch")
#there is only a specialist mage window for bg1
KitWindow = GemRB.LoadWindow(12)
for i in range(8):
Button = KitWindow.GetControl(i+2)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_OR)
if not KitTable:
RowCount = 1
else:
RowCount = KitTable.GetRowCount()
for i in range(RowCount):
Button = KitWindow.GetControl(i+2)
if not KitTable:
if ClassID == 1:
Kit=GemRB.GetVar("MAGESCHOOL")
KitName = SchoolList.GetValue(i, 0)
else:
Kit = 0
KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 0)
else:
Kit = KitTable.GetValue(i,0)
if ClassID == 1:
if Kit:
Kit = Kit - 21
KitName = SchoolList.GetValue(Kit, 0)
else:
if Kit:
KitName = CommonTables.KitList.GetValue(Kit, 1)
else:
KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 0)
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetText(KitName)
Button.SetVarAssoc("Class Kit",Kit)
if i==0:
GemRB.SetVar("Class Kit",Kit)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, KitPress)
BackButton = KitWindow.GetControl(12)
BackButton.SetText(15416)
DoneButton = KitWindow.GetControl(0)
DoneButton.SetText(11973)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
TextAreaControl = KitWindow.GetControl(11)
TextAreaControl.SetText(17247)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CharGenCommon.BackPress)
#KitPress()
KitWindow.ShowModal(MODAL_SHADOW_NONE)
return
def KitPress():
Kit = GemRB.GetVar("Class Kit")
if Kit == 0:
KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 1)
else:
if ClassID==1:
KitName = SchoolList.GetValue(Kit, 1)
else:
KitName = CommonTables.KitList.GetValue(Kit, 3)
TextAreaControl.SetText(KitName)
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def NextPress():
#class
ClassIndex = GemRB.GetVar ("Class")-1
Class = CommonTables.Classes.GetValue (ClassIndex, 5)
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat (MyChar, IE_CLASS, Class)
KitIndex = GemRB.GetVar ("Class Kit")
if Class == 1:
GemRB.SetVar("MAGESCHOOL", KitIndex)
#the same as the unusable field
Kit = CommonTables.KitList.GetValue(KitIndex, 6)
GemRB.SetPlayerStat (MyChar, IE_KIT, Kit)
CharGenCommon.next()
|
tomprince/gemrb
|
gemrb/GUIScripts/bg1/GUICG22.py
|
Python
|
gpl-2.0
| 3,963
| 0.028261
|
""" Class to handle date-parsing and formatting """
# Workaround for http://bugs.python.org/issue8098
import _strptime # pylint: disable=unused-import
from datetime import datetime
import time
class DateUtils(object):
""" Class to handle date-parsing and formatting """
date_format = '%Y-%m-%dT%H:%M:%SZ'
json_date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
kodi_date_format = '%Y-%m-%d %H:%M'
def get_str_date(self, date):
"""
Formats datetime to str of format %Y-%m-%dT%H:%M:%SZ
Arguments
date: datetime
"""
return datetime.strftime(date, self.date_format)
def parse_str_date(self, str_date):
"""
Parse a date of format %Y-%m-%dT%H:%M:%SZ to date
Arguments
str_date: str, %Y-%m-%dT%H:%M:%SZ
"""
return self._parse_str_date(str_date, self.date_format)
def _parse_str_date(self, str_date, date_format):
try:
return datetime.strptime(str_date, date_format)
except TypeError:
return datetime(*(time.strptime(str_date, date_format)[0:6]))
def parse_kodi_date(self, str_date):
if not str_date:
return None
return self._parse_str_date(str_date, '%Y-%m-%d %H:%M:%S')
def get_kodi_date_format(self, str_date):
"""
Returns a date on format %Y-%m-%dT%H:%M:%SZ as %Y-%m-%d %H:%M
"""
parsed_date = self._parse_str_date(str_date, self.json_date_format)
return datetime.strftime(parsed_date, '%Y-%m-%d %H:%M:%S')
|
sebastian-steinmann/kodi-repo
|
src/service.library.video/resources/lib/date_utils.py
|
Python
|
mit
| 1,538
| 0.001951
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns('netadmin.networks.views',
url(r'^host/(?P<object_id>\d+)/$',
'host_detail', name='host_detail'),
url(r'^host/list/$',
'host_list', name='host_list'),
url(r'^host/list/page/(?P<page>\d+)/$',
'host_list', name='host_list_page'),
url(r'^host/new/$',
'host_create', name="host_new"),
url(r'^host/edit/(?P<object_id>\d+)/$',
'host_update', name="host_update"),
url(r'^host/delete/(?P<object_id>\d+)/$',
'host_delete', name="host_delete"),
url(r'^network/(?P<object_id>\d+)/$',
'network_detail', name='network_detail'),
url(r'^network/list/$',
'network_list', name='network_list'),
url(r'^network/list/page/(?P<page>\d+)/$',
'network_list', name='network_list_page'),
url(r'^network/new/$',
'network_create', name="network_new"),
url(r'^network/edit/(?P<object_id>\d+)/$',
'network_update', name="network_update"),
url(r'^network/delete/(?P<object_id>\d+)/$',
'network_delete', name="network_delete"),
url(r'^network/events/(?P<object_id>\d+)/$',
'network_events', name='network_events'),
url(r'^network/netmask-create/$',
'subnet_network', name='subnet_network'),
url(r'/update/(?P<object_id>\d+)/$',
'network_select', name='network_select'),
url(r'share/list/(?P<object_type>host|network)/(?P<object_id>\d+)/',
'share_list', name="share_list"),
url(r'share/(?P<object_type>host|network)/(?P<object_id>\d+)/',
'share', name="share"),
url(r'share/not/(?P<object_type>host|network)/(?P<object_id>\d+)/(?P<user_id>\d+)/',
'share_not', name="share_not"),
url(r'share/edit/(?P<object_type>host|network)/(?P<object_id>\d+)/(?P<user_id>\d+)/',
'share_edit', name="share_edit"),
)
|
umitproject/network-admin
|
netadmin/networks/urls.py
|
Python
|
agpl-3.0
| 2,718
| 0.002575
|
#-*- coding: utf-8 -*-
'''
Created on 16 déc. 2013
@author: yoann Moreau
All controls operations :
return true if control ok
'''
import os
import errno
from datetime import date,datetime,timedelta
import ogr,osr
import re
import gdal
import osr
import numpy as np
import numpy.ma as ma
import subprocess
import shutil
import math
from pyspatialite._spatialite import Row
import scipy.ndimage as ndimage
import pyproj as pp
def checkForFile(pathToFile):
if os.path.isfile(pathToFile):
return True
else:
return False
def createParamFile(pathFile,user,key):
f = open(pathFile, 'w+')
f.write("{\n")
f.write(' "url" : "https://api.ecmwf.int/v1",\n')
f.write('"key" : "'+key+'",\n')
f.write('"email" : "'+user+'"\n')
f.write("}")
f.close()
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def checkForFolder(pathToFolder):
try:
os.makedirs(pathToFolder)
except OSError as exception:
if exception.errno != errno.EEXIST:
exit('Path for downloaded Era Interim could not be create. Check your right on the parent folder...')
def checkForDate(dateC):
#convert string to date from YYYY-MM-DD
if len(dateC)==10:
YYYY=dateC[0:4]
MM=dateC[5:7]
DD=dateC[8:10]
if (YYYY.isdigit() and MM.isdigit() and DD.isdigit()):
try:
date(int(YYYY),int(MM),int(DD))
except ValueError:
exit('Error on Date Format... please give a date in YYYY-MM-DD format')
return date(int(YYYY),int(MM),int(DD))
else:
exit('Error on Date Format... please give a date in YYYY-MM-DD format')
else:
exit('Error on Date Format... please give a date in YYYY-MM-DD format')
def convertShpToExtend(pathToShp):
"""
reprojette en WGS84 et recupere l'extend
"""
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(pathToShp)
if dataset is not None:
# from Layer
layer = dataset.GetLayer()
spatialRef = layer.GetSpatialRef()
# from Geometry
feature = layer.GetNextFeature()
geom = feature.GetGeometryRef()
spatialRef = geom.GetSpatialReference()
#WGS84
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSG(4326)
coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef)
env = geom.GetEnvelope()
pointMAX = ogr.Geometry(ogr.wkbPoint)
pointMAX.AddPoint(env[1], env[3])
pointMAX.Transform(coordTrans)
pointMIN = ogr.Geometry(ogr.wkbPoint)
pointMIN.AddPoint(env[0], env[2])
pointMIN.Transform(coordTrans)
return [pointMAX.GetPoint()[1],pointMIN.GetPoint()[0],pointMIN.GetPoint()[1],pointMAX.GetPoint()[0]]
else:
exit(" shapefile not found. Please verify your path to the shapefile")
def is_float_re(element):
_float_regexp = re.compile(r"^[-+]?(?:\b[0-9]+(?:\.[0-9]*)?|\.[0-9]+\b)(?:[eE][-+]?[0-9]+\b)?$").match
return True if _float_regexp(element) else False
def checkForExtendValidity(extendList):
if len(extendList)==4 and all([is_float_re(str(x)) for x in extendList]) and extendList[0]>extendList[2] and extendList[1]<extendList[3]:
if float(extendList[0]) > -180 and float(extendList[2]) <180 and float(extendList[1]) <90 and float(extendList[3]) > -90:
extendArea=[str(x) for x in extendList]
return extendArea
else:
exit('Projection given is not in WGS84. Please verify your -t parameter')
else:
exit('Area scpecified is not conform to a ymax xmin ymin xmax extend. please verify your declaration')
def checkForTimeValidity(listTime):
validParameters=('00','06','12','18')
if len(listTime)>0 and isinstance(listTime, list) and all([x in validParameters for x in listTime]):
return listTime
else:
exit('time parameters not conform to eraInterim posibility : '+ ",".join(validParameters))
def checkForStepValidity(listStep,typeData):
validParameters=(0,3,6,9,12)
if typeData=="forcast":
if len(listStep)>0 and isinstance(listStep, list) and all([int(x) in validParameters for x in listStep]):
listStep=[int(x) for x in listStep]
return listStep
else:
exit('step parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters]))
else:
if len(listStep)>0:
exit('step parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters])+ 'for analyse')
else:
return listStep
def checkForGridValidity(grid):
if (is_float_re(grid)):
grid=float(grid)
validParameters=(0.125,0.25,0.5,0.75,1.125,1.5,2,2.5,3)
if grid in validParameters:
return grid
else:
exit('grid parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters]))
else:
exit('grid parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters]))
def create_request_sfc(dateStart,dateEnd, timeList,stepList,grid,extent,paramList,output,typeData=None):
"""
Genere la structure de requete sur les serveurs de l'ECMWF
INPUTS:\n
-date : au format annee-mois-jour\n
-heure : au format heure:minute:seconde\n
-coord : une liste des coordonnees au format [N,W,S,E]\n
-dim_grille : taille de la grille en degre \n
-output : nom & chemin du fichier resultat
"""
if typeData=='analyse':
typeD='an'
else:
typeD='fc'
struct = {
'dataset' : "interim",
'date' : dateStart.strftime("%Y-%m-%d")+"/to/"+dateEnd.strftime("%Y-%m-%d"),
'time' : "/".join(map(str, timeList)),
'stream' : "oper",
'step' : "/".join(map(str, stepList)),
'levtype' : "sfc", #pl -> pressure level ,sfc -> surface
'type' : typeD, #fc -> forcast , an -> analyse
'class' : "ei",
'param' : ".128/".join(map(str, paramList))+'.128',
'area' : "/".join(extent),
'grid' : str(grid)+"/"+str(grid),
'target' : output,
'format' : 'netcdf'
}
return struct
def moveFile(inputImg,outputImg):
"move a file to a directory"
#TODO controls to check if exist
#on déplace le fichier dans le bon répertoire
shutil.move(inputImg, outputImg)
def reprojRaster(pathToImg,output,shape,pathToShape):
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(pathToShape, 0)
layer = dataSource.GetLayer()
srs = layer.GetSpatialRef()
Xres=shape[1]
Yres=shape[0]
subprocess.call(["gdalwarp","-q","-s_srs","EPSG:4326","-t_srs",srs.ExportToWkt(),pathToImg,output,'-ts',str(Xres),str(Yres),'-overwrite','-dstnodata',"0"])
return output
def convertNETCDFtoTIF(inputFile,outputFile,format='float'):
#--convert netCDF to tif
ds_in=gdal.Open('NETCDF:"'+inputFile+'"')
metadata = ds_in.GetMetadata()
scale=metadata['tp#scale_factor']
offset=metadata['tp#add_offset']
nodata=metadata['tp#_FillValue']
cols = ds_in.RasterXSize
rows = ds_in.RasterYSize
geotransform = ds_in.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
nbBand= ds_in.RasterCount
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(outputFile, cols, rows, nbBand, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
for b in range(1,nbBand+1):
band = ds_in.GetRasterBand(b)
arrayB = np.array(band.ReadAsArray(), dtype=format)
np.putmask(arrayB,(arrayB==float(nodata)),0)
#arrayB=numpy.multiply(arrayB, scale)+float(offset)
trans_arrayB=arrayB*float(scale)+float(offset)
np.putmask(trans_arrayB,(arrayB==float(nodata)+1),0)
outband = outRaster.GetRasterBand(b)
outband.WriteArray(trans_arrayB)
outband.FlushCache()
def convertNETCDFtoDicArray(inputFile,format='float'):
#--convert netCDF to numpy dico Array
ds_in=gdal.Open('NETCDF:"'+inputFile+'"')
metadata = ds_in.GetMetadata()
for i in metadata.keys():
if i.find('scale_factor')>0:
scale=metadata[i]
elif i.find('add_offset')>0:
offset=metadata[i]
elif i.find('_FillValue')>0:
nodata=metadata[i]
nbBand= ds_in.RasterCount
dicoAray={}
for b in range(1,nbBand+1):
band = ds_in.GetRasterBand(b)
arrayB = np.array(band.ReadAsArray(), dtype=format)
np.putmask(arrayB,(arrayB==float(nodata)),0)
#arrayB=numpy.multiply(arrayB, scale)+float(offset)
trans_arrayB=arrayB*float(scale)+float(offset)
np.putmask(trans_arrayB,(arrayB==float(nodata)+1),0)
dicoAray[b]=trans_arrayB
return dicoAray
def convertKToD(dicoT):
Degree={}
for i in dicoT:
mask=np.logical_not(dicoT[i] > 0).astype(int)
DegreeArray=dicoT[i]-273.15
np.putmask(DegreeArray,mask,np.nan)
Degree[i]=DegreeArray
return Degree
def convertToHectoPascal(dicoP):
pressure= {}
for i in dicoP:
mask=np.logical_not(dicoP[i] > 0).astype(int)
PArray=dicoP[i]/100
np.putmask(PArray,mask,np.nan)
pressure[i]=PArray
return pressure
def convertPaToKgPa(dicoP):
pressure= {}
for i in dicoP:
mask=np.logical_not(dicoP[i] > 0).astype(int)
PArray=dicoP[i]/1000
np.putmask(PArray,mask,np.nan)
pressure[i]=PArray
return pressure
def convertMToMm(dicoEvap):
evap={}
for i in dicoEvap:
RArray=(dicoEvap[i]*(10**3))
evap[i]=RArray
return evap
def convertWToMJ(dicoRay):
rayonnement={}
for i in dicoRay:
mask=np.logical_not(dicoRay[i] > 0).astype(int)
RArray=(dicoRay[i]/(10**6))
np.putmask(RArray,mask,np.nan)
rayonnement[i]=RArray
return rayonnement
def convertGeoToAlt(dicoGeo):
def mean(values):
return np.nanmean(values)
Altitude={}
cstGravit=9.80665
footprint = np.array([[0,1,0],
[1,0,1],
[0,1,0]])
for i in dicoGeo:
mask=np.logical_not(dicoGeo[i] > 0).astype(int)
GeoArray=np.divide(dicoGeo[i],cstGravit)
np.putmask(GeoArray,mask,np.nan)
indices = np.where(np.isnan(GeoArray))
results = ndimage.generic_filter(GeoArray, mean, footprint=footprint)
for row, col in zip(*indices):
GeoArray[row,col] = results[row,col]
Altitude[i]=GeoArray
return Altitude
def computeDailyAccumulation(dicoBand,nbBandByDay,typeData):
accumulation={}
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h
for j in range (i*nbBandByDay,maxRange):
if "array" in locals():
array=array+dicoBand.items()[j][1]
else:
array=dicoBand.items()[j][1]
accumulation[i]=array
del array
return accumulation
def computeDailyMean(dicoBand,nbBandByDay,typeData):
def meanCalc(values):
return np.nanmean(values)
mean={}
footprint = np.array([[0,1,0],
[1,0,1],
[0,1,0]])
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h
for j in range (i*nbBandByDay,maxRange):
if "array" in locals():
array=array+dicoBand.items()[j][1]
np.putmask(dicoBand.items()[j][1], dicoBand.items()[j][1]==0, 0)
mask=mask+(dicoBand.items()[j][1] > 0).astype(int)
else:
array=dicoBand.items()[j][1]
np.putmask(dicoBand.items()[j][1], dicoBand.items()[j][1]==0, 0)
mask=(dicoBand.items()[j][1] > 0).astype(int)
mean[i]=array
del array
#utilisation de la fonction nanmean --> bcp plus simple
mean[i]=mean[i]/mask
indices = np.where(np.isnan(mean[i]))
results = ndimage.generic_filter(mean[i], meanCalc, footprint=footprint)
for row, col in zip(*indices):
mean[i][row,col] = results[row,col]
return mean
def computeDailyMax(dicoBand,nbBandByDay,typeData=None):
maxB={}
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h si
for j in range (i*nbBandByDay,maxRange):
if "array" in locals():
array=np.fmax(array,dicoBand.items()[j][1])
else:
array=dicoBand.items()[j][1]
maxB[i]=array
del array
return maxB
def computeDailyMin(dicoBand,nbBandByDay,typeData=None):
minB={}
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h
for j in range (i*nbBandByDay,maxRange):
np.putmask(dicoBand.items()[j][1],dicoBand.items()[j][1]==0,np.nan)
if "array" in locals():
array=np.fmin(array,dicoBand.items()[j][1])
else:
array=dicoBand.items()[j][1]
minB[i]=array
del array
return minB
def fusVentFromDict(dicToFus,nbBandByDay,zmesure=10):
""" Wind profile relationship [m.s-1]
Estimate wind speed at 2m
uz wind speed at height zmesure above ground surface
wind is the norm of U and V direction speed
"""
wind={}
keys=dicToFus.keys()
if (len(dicToFus)==2):
for i in dicToFus[keys[0]]:
#Math.log = ln
u=dicToFus[keys[0]][i]*4.87/math.log(67.8*zmesure-5.42);
v=dicToFus[keys[1]][i]*4.87/math.log(67.8*zmesure-5.42);
wind[i]=np.sqrt(pow(u,2)+pow(v,2))
return wind
def ComputeHumidityFromPT(pressureDico,TDico,DewDico):
""" Compute Humidity for each Band and each day based on pressure,Temperature and Dew Point"""
Humidity={}
for i in pressureDico:
Humidity[i]=esat(pressureDico[i],DewDico[i])/esat(pressureDico[i],TDico[i])*100
np.putmask(Humidity[i], pressureDico[i]==0, 0)
np.putmask(Humidity[i], DewDico[i]==0, 0)
np.putmask(Humidity[i], TDico[i]==0, 0)
return Humidity
def esat(pressure,T):
""" Compute partial presure depending on P and T
P(T)=0.61121*exp(17.502*T/(T+240.97))*(1.00072+pression*(3.2+0.00059*temperature²)/100000.0)
From Wexler and al. 1976
Pressure en hpa --> convert to kPa
T en °C
"""
pressure=pressure/10
d_es = 0.61121*np.exp(np.multiply(T,17.502)/(T+240.97))
d_f = 1.00072+pressure*(3.2+0.00059*pow(T,2))/100000.0
return d_es*d_f
def eocalc(T):
""" Saturation vapor pressure at the air temperature [KPa]
T en °C
"""
eo_calc=0.6108*np.exp(17.27*T/(T+237.3))
return eo_calc
def delta_calc(T):
# Slope of saturation vapour pressure curve at air temperature [kPa.°C-1]
# T air temperature in °C
# Equation 13 FAO
delta=4098*(0.6108*np.exp(17.27*T/(T+237.3)))/(T+237.3)**2;
return delta
def doy(datetoConvert,deltaDays):
deltaJ=timedelta(days=deltaDays)
datetoConvert=datetoConvert+deltaJ
J = datetoConvert.timetuple().tm_yday
return J
def getGeoTransform(pathToImg):
srcImage = gdal.Open(pathToImg)
geoTrans = srcImage.GetGeoTransform()
xOrigin = geoTrans[0]
yOrigin = geoTrans[3]
pixelWidth = geoTrans[1]
pixelHeight = geoTrans[5]
return (xOrigin,yOrigin,pixelWidth,pixelHeight)
def getProj(pathToShape):
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(pathToShape, 0)
layer = dataSource.GetLayer()
srs = layer.GetSpatialRef()
return srs.ExportToProj4()
def getShape(pathToImg):
raster = gdal.Open(pathToImg)
transform = raster.GetGeoTransform()
pixelWidth = transform[1]
pixelHeight = transform[5]
return (pixelWidth,pixelHeight)
def getCentroidLatFromArray(shape,geotransform,grid):
lat = np.zeros(shape)
lon = np.zeros(shape)
originX = geotransform[0]
originY = geotransform[1]
for index in np.ndenumerate(lat):
lat.itemset(index[0], float(originX)+float(index[0][1])*float(grid)+(float(grid)/2))
lon.itemset(index[0], float(originY)-float(index[0][0])*float(grid)-(float(grid)/2))
dicoLatLong={}
dicoLatLong[0]=lat
dicoLatLong[1]=lon
return dicoLatLong
def writeTiffFromDicoArray(DicoArray,outputImg,shape,geoparam,proj=None,format=gdal.GDT_Float32):
gdalFormat = 'GTiff'
driver = gdal.GetDriverByName(gdalFormat)
dst_ds = driver.Create(outputImg, shape[1], shape[0], len(DicoArray), format)
j=1
for i in DicoArray.values():
dst_ds.GetRasterBand(j).WriteArray(i, 0)
band = dst_ds.GetRasterBand(j)
band.SetNoDataValue(0)
j+=1
originX = geoparam[0]
originY = geoparam[1]
pixelWidth = geoparam[2]
pixelHeight = geoparam[3]
dst_ds.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
def WriteTxtFileForEachPixel(outputFolder,et0_0,et0_1,et0_2,DateList,DoyList,Ray,RayShort,RayLong,Tmean,Tmax,Tmin,Hmean,Hmax,Hmin,vent,precipitation,pressure,Geo,latlon,projShape):
""" Write a Txtfile """
for i in range(0,et0_0[0].shape[0]):
for j in range(0,et0_0[0].shape[1]):
lat=latlon[0][i][j]
lon=latlon[1][i][j]
p1 = pp.Proj(projShape)
latP,lonP = p1(lat,lon)
numero = str(round(lat,2)).replace('.','')+str(round(lon,2)).replace('.','')
pathTodateFolder=outputFolder+'/POINT_'+numero+'.txt'
f = open(pathTodateFolder,'w+')
f.write('numero;altitude;lat/lon(WGS84);lat/lon(initial)\n')
f.write(str(numero)+'\t ; '+str(Geo[0][i][j])+'\t ; '+str(lat)+'/'+str(lon)+';'+str(latP)+'/'+str(lonP)+'\n')
f.write('ANNEE\tMOIS\tJOUR\tDOY\tRGSURF\tRGLONG\tRGSHORT\tTAMEAN\tTAMAX\tTAMIN\tRHMEAN\tRHMAX\tRHMIN\tVUMEAN\tPRECIP\tPRESSURE\tET0FAO56\tET0SolarEra\tEvapEraInterim\n')
f.write('[YYYY]\t[MM]\t[DD]\t[1-365]\t[MJ.m-2.jour-1]\t[MJ.m-2.jour-1]\t[MJ.m-2.jour-1]\t[Kelvin]\t[Kelvin]\t[Kelvin]\t[%]\t[%]\t[%]\t[m.s-1]\t[m.d-1]\t[kPa]\t[mm.d-1]\t[mm.d-1]\t[mm.d-1]\n')
for d in range(0,len(DateList)):
year=DateList[d].year
month=DateList[d].month
day=DateList[d].day
f.write(str(year)+'\t'+str(month)+'\t'+str(day)+'\t'+ str(DoyList[d])+'\t'+str(Ray[d][i][j])+'\t'+str(RayShort[d][i][j])+'\t'+str(RayLong[d][i][j])+'\t'+str(Tmean[d][i][j])+'\t'+str(Tmax[d][i][j])+'\t'+str(Tmin[d][i][j])+'\t'+str(Hmean[d][i][j])+'\t'+str(Hmax[d][i][j])+'\t'+str(Hmin[d][i][j])+'\t'+ str(vent[d][i][j])+'\t'+str(precipitation[d][i][j])+'\t'+str(pressure[d][i][j])+'\t'+str(et0_0[d][i][j])+'\t'+str(et0_1[d][i][j])+'\t'+str(et0_2[d][i][j])+'\n')
f.close()
return pathTodateFolder
def WritePointList(outputFolder,latlon,projShape):
pathTodateFolder=outputFolder+'/ListeStations.txt'
f = open(pathTodateFolder,'w+')
f.write('numero;lat(WGS84);lon(WGS84);lat(initial);lon(initial)\n')
for i in range(0,latlon[0].shape[0]):
for j in range(0,latlon[0].shape[1]):
lat=latlon[0][i][j]
lon=latlon[1][i][j]
p1 = pp.Proj(projShape)
latP,lonP = p1(lat,lon)
numero = str(round(lat,2)).replace('.','')+str(round(lon,2)).replace('.','')
f.write(str(numero)+';'+str(lat)+';'+str(lon)+';'+str(latP)+';'+str(lonP)+'\n')
f.close()
|
yoannMoreau/Evapo_EraInterim
|
python/utils.py
|
Python
|
cc0-1.0
| 20,707
| 0.026002
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import client as tc
from troveclient.openstack.common.apiclient import exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.engine.clients import client_plugin
from heat.engine import constraints
class TroveClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [DATABASE] = ['database']
def _create(self):
con = self.context
endpoint_type = self._get_client_option('trove', 'endpoint_type')
args = {
'service_type': self.DATABASE,
'auth_url': con.auth_url or '',
'proxy_token': con.auth_token,
'username': None,
'password': None,
'cacert': self._get_client_option('trove', 'ca_file'),
'insecure': self._get_client_option('trove', 'insecure'),
'endpoint_type': endpoint_type
}
client = tc.Client('1.0', **args)
management_url = self.url_for(service_type=self.DATABASE,
endpoint_type=endpoint_type)
client.client.auth_token = con.auth_token
client.client.management_url = management_url
return client
def validate_datastore(self, datastore_type, datastore_version,
ds_type_key, ds_version_key):
if datastore_type:
# get current active versions
allowed_versions = self.client().datastore_versions.list(
datastore_type)
allowed_version_names = [v.name for v in allowed_versions]
if datastore_version:
if datastore_version not in allowed_version_names:
msg = _("Datastore version %(dsversion)s "
"for datastore type %(dstype)s is not valid. "
"Allowed versions are %(allowed)s.") % {
'dstype': datastore_type,
'dsversion': datastore_version,
'allowed': ', '.join(allowed_version_names)}
raise exception.StackValidationFailed(message=msg)
else:
if len(allowed_versions) > 1:
msg = _("Multiple active datastore versions exist for "
"datastore type %(dstype)s. "
"Explicit datastore version must be provided. "
"Allowed versions are %(allowed)s.") % {
'dstype': datastore_type,
'allowed': ', '.join(allowed_version_names)}
raise exception.StackValidationFailed(message=msg)
else:
if datastore_version:
msg = _("Not allowed - %(dsver)s without %(dstype)s.") % {
'dsver': ds_version_key,
'dstype': ds_type_key}
raise exception.StackValidationFailed(message=msg)
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
def get_flavor_id(self, flavor):
'''
Get the id for the specified flavor name.
If the specified value is flavor id, just return it.
:param flavor: the name of the flavor to find
:returns: the id of :flavor:
:raises: exception.FlavorMissing
'''
flavor_id = None
flavor_list = self.client().flavors.list()
for o in flavor_list:
if o.name == flavor:
flavor_id = o.id
break
if o.id == flavor:
flavor_id = o.id
break
if flavor_id is None:
raise exception.FlavorMissing(flavor_id=flavor)
return flavor_id
class FlavorConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.FlavorMissing,)
def validate_with_client(self, client, flavor):
client.client_plugin('trove').get_flavor_id(flavor)
|
miguelgrinberg/heat
|
heat/engine/clients/os/trove.py
|
Python
|
apache-2.0
| 4,752
| 0
|
# import python modules
import os
import time
import logging
import multiprocessing
# import django modules
# import third party modules
# import project specific model classes
from config.models import Origin
# import app specific utility classes
# import app specific utility functions
from .utils import packet_chunk
from .utils import run_capture
from .utils import read_pcap
def discovery_task(origin_uuid="",
offline=False,
interface="",
duration=0,
filepath="",
origin_description=""
):
logging.basicConfig(filename="/tmp/pythos_debug.log", level=logging.DEBUG)
m = multiprocessing.Manager()
packets = m.Queue()
multiprocessing.log_to_stderr(logging.INFO)
num_processes = os.cpu_count()
if not num_processes:
num_processes = 2
pool = multiprocessing.Pool(processes=num_processes, maxtasksperchild=1)
if offline:
current_origin = Origin.objects.create(name="PCAP " + filepath,
description=origin_description,
sensor_flag=True,
plant_flag=False
)
discovery_process = multiprocessing.Process(target=read_pcap,
args=(filepath,
packets
)
)
logging.info("Starting to read pcap file: " + filepath)
else:
try:
current_origin = Origin.objects.get(uuid=origin_uuid)
except:
logging.error("Could not find specified origin: " + origin_uuid +
" Aborting."
)
return
discovery_process = multiprocessing.Process(target=run_capture,
args=(interface,
duration,
packets
)
)
logging.info("Starting live capture on: " + interface)
discovery_process.start()
logging.info("Starting " + str(num_processes) + " worker processes.")
while discovery_process.is_alive() or not packets.empty():
num_packets = packets.qsize()
chunk_size = max(num_packets//num_processes, 10000)
logging.debug(str(num_packets) + " packets in queue.")
if num_packets > chunk_size:
chunk = m.Queue()
for i in range(chunk_size):
chunk.put(packets.get())
logging.debug("Processing chunk with size: " + str(chunk_size))
pool.apply_async(packet_chunk, args=(chunk,
current_origin,
packets
)
)
elif not discovery_process.is_alive():
logging.debug("Processing last chunk.")
pool.apply(packet_chunk, args=(packets, current_origin, packets))
time.sleep(10)
pool.close()
pool.join()
if offline:
logging.info("Pcap " + filepath + " has been processed successfully.")
else:
logging.info("Live capture on " + interface + " has been completed.")
|
hephestos/pythos
|
discovery/tasks.py
|
Python
|
gpl-3.0
| 3,656
| 0.000274
|
import unittest
import time
import _thread as thread
from threading import Barrier
from threading import Lock
from threading import Event
from t2db_buffer.buffer import GlobalBuffer
from t2db_buffer.buffer import BufferServer
from t2db_objects.objects import Tweet
from t2db_objects.objects import User
from t2db_objects.objects import TweetStreaming
from t2db_objects.objects import TweetSearch
from t2db_objects.objects import Streaming
from t2db_objects.objects import Search
from t2db_objects.objects import ObjectList
from t2db_objects.tests.common import randomInteger
from t2db_objects.tests.common import randomTweetStreaming
from t2db_objects.tests.common import randomTweetSearch
from t2db_objects.tests.common import randomStreaming
from t2db_objects.tests.common import randomSearch
from t2db_worker.parser import ParserStatus
from t2db_worker.buffer_communicator import BufferCommunicator
from t2db_worker.buffer_communicator import LocalBuffer
from t2db_worker.tests.test_parser import getOneStatus
getOneStatusLock = Lock()
def getOneStatusTS():
getOneStatusLock.acquire()
try:
status = getOneStatus()
finally:
getOneStatusLock.release()
return status
def addOneElement(sharedList):
status = getOneStatusTS()
ps = ParserStatus(status)
tweet = Tweet(ps.getTweet())
sharedList.addElement(tweet)
def addManyElements(sharedList, randomElements):
status = getOneStatusTS()
localList = []
for i in range(0, randomElements):
ps = ParserStatus(status)
tweet = Tweet(ps.getTweet())
user = User(ps.getUser())
localList.append(tweet)
localList.append(user)
sharedList.addManyElements(localList)
def oneThread(barrier, fun, *args):
fun(*args)
barrier.wait()
def oneThreadUpSync(barrier, fun, *args):
barrier.wait()
fun(*args)
def oneThreadDoubleSync(barrier1, barrier2, fun, *args):
barrier1.wait()
fun(*args)
barrier2.wait()
def createData(base):
status = getOneStatusTS()
randomTweets = base + randomInteger(99) + 1
tweetList = ObjectList()
userList = ObjectList()
streamingList = ObjectList()
searchList = ObjectList()
for i in range(base, randomTweets):
status["id"] = i
status["user"]["id"] = i
ps = ParserStatus(status)
tweet = Tweet(ps.getTweet())
user = User(ps.getUser())
tweetList.append(tweet)
userList.append(user)
streamingList.append(TweetStreaming(randomTweetStreaming(i, 1)))
searchList.append(TweetSearch(randomTweetSearch(i, 1)))
return tweetList, userList, streamingList, searchList
sharedListDataLock = Lock()
sharedListData = []
idNumber = 0
def fakeClient(host, port):
global idNumber
global sharedListDataLock
global sharedListData
sharedListDataLock.acquire()
try:
[tweetList, userList, streamingList, searchList] = createData(idNumber)
idNumber += len(tweetList.list)
finally:
sharedListDataLock.release()
bc = BufferCommunicator(host, port)
bc.sendData(tweetList, userList, streamingList, searchList)
sharedListDataLock.acquire()
try:
sharedListData.append(tweetList)
sharedListData.append(userList)
sharedListData.append(streamingList)
sharedListData.append(searchList)
finally:
sharedListDataLock.release()
"""
class TestSharedElementList(unittest.TestCase):
def setUp(self):
self.sharedList = SharedElementList()
def test_addElement(self):
addOneElement(self.sharedList)
self.assertEqual(len(self.sharedList.elementList), 1)
def test_addManyElements(self):
randomElements = randomInteger(100)
addManyElements(self.sharedList, randomElements)
self.assertEqual(len(self.sharedList.elementList), randomElements*2)
def test_addTwoThreads(self):
barrier = Barrier(2)
thread.start_new_thread(oneThread, (barrier, addOneElement, self.sharedList,))
addOneElement(self.sharedList)
barrier.wait()
self.assertEqual(len(self.sharedList.elementList), 2)
def test_addTwoThreadsManyElements(self):
barrier = Barrier(2)
randomElements = randomInteger(100)
thread.start_new_thread(oneThread, (barrier, addManyElements, self.sharedList,randomElements,))
addManyElements(self.sharedList, randomElements)
barrier.wait()
totalElements = randomElements*2*2
self.assertEqual(len(self.sharedList.elementList), totalElements)
def test_addManyThreadsManyElements(self):
randomThreads = randomInteger(8) + 2 #Always graeter or equal than 2
barrier = Barrier(randomThreads + 1)# Include main thread
randomElements = randomInteger(100)
for i in range(0, randomThreads):
thread.start_new_thread(oneThread, (barrier, addManyElements, self.sharedList, randomElements,))
barrier.wait()
totalElements = randomElements*randomThreads*2
self.assertEqual(len(self.sharedList.elementList), totalElements)
def test_addGetAllElementsAndClean(self):
randomElements = randomInteger(100)
addManyElements(self.sharedList, randomElements)
copyElementList = self.sharedList.getAllElementsAndClean()
self.assertEqual(len(self.sharedList.elementList), 0)
self.assertEqual(len(copyElementList), randomElements*2)
def test_addGetAllElementsAndCleanWhileAdding(self):
barrier1 = Barrier(2)
barrier2 = Barrier(2)
randomElements = randomInteger(100)
thread.start_new_thread(oneThreadDoubleSync, (barrier1, barrier2, addManyElements, self.sharedList,randomElements,))
barrier1.wait()
copyElementList = self.sharedList.getAllElementsAndClean()
barrier2.wait()
totalElements = len(copyElementList) + len(self.sharedList.elementList)
self.assertEqual(randomElements*2, totalElements)
"""
def countData(lb, tweetList, userList, tweetStreamingList, tweetSearchList):
# count originals
for tweet in tweetList.list:
try:
lb.addTweet(tweet)
except:
continue
for user in userList.list:
try:
lb.addUser(user)
except:
continue
for tweetStreaming in tweetStreamingList.list:
try:
lb.addTweetStreaming(tweetStreaming)
except:
continue
for tweetSearch in tweetSearchList.list:
try:
lb.addTweetSearch(tweetSearch)
except:
continue
return lb
class TestServer(unittest.TestCase):
def setUp(self):
global sharedListData
sharedListData = []
def test_serverOneClient(self):
global sharedListData
# Create event
stopEvent = Event()
# Create server barrier
sBarrier = Barrier(2)
# Create server
bs = BufferServer(13001, 5, stopEvent, sBarrier, 5, 5, "http://localhost:8000", "quiltro", "perroCallejero")
streamingList = ObjectList()
streamingList.append(Streaming(randomStreaming(1)))
bs.communicator.service.postStreamings(streamingList)
searchList = ObjectList()
searchList.append(Search(randomSearch(1)))
bs.communicator.service.postSearches(searchList)
bs.start()
# Create barrier for client
cBarrier = Barrier(2)
# Create client
thread.start_new_thread(oneThread, (cBarrier, fakeClient, bs.getHostName(), 13001,))
cBarrier.wait()
time.sleep(5)
# Stop server
stopEvent.set()
# Wait for server
sBarrier.wait()
time.sleep(5)
# Get data and compare
numberTweets = len(bs.globalBuffer.localBuffer.tweetList.list)
numberUsers = len(bs.globalBuffer.localBuffer.userList.list)
numberTweetStreaming = len(bs.globalBuffer.localBuffer.tweetStreamingList.list)
numberTweetSearch = len(bs.globalBuffer.localBuffer.tweetSearchList.list)
self.assertEqual(numberTweets, 0)
self.assertEqual(numberUsers, 0)
self.assertEqual(numberTweetStreaming, 0)
self.assertEqual(numberTweetSearch, 0)
# count originals
lb = LocalBuffer()
lb = countData(lb, sharedListData[0], sharedListData[1], sharedListData[2]
, sharedListData[3])
originalNumberTweets = len(lb.tweetList.list)
originalNumberUsers = len(lb.userList.list)
originalNumberTweetStreaming = len(lb.tweetStreamingList.list)
originalNumberTweetSearch = len(lb.tweetSearchList.list)
self.assertEqual(originalNumberTweets, bs.communicator.sentTweets)
self.assertEqual(originalNumberUsers, bs.communicator.sentUsers)
self.assertEqual(originalNumberTweetStreaming, bs.communicator.sentTweetStreamings)
self.assertEqual(originalNumberTweetSearch, bs.communicator.sentTweetSearches)
def test_serverFiveOrLessClient(self):
global sharedListData
# Create stop event
stopEvent = Event()
# Create server barrier
sBarrier = Barrier(2)
# Create server
bs = BufferServer(13001, 5, stopEvent, sBarrier, 5, 5, "http://localhost:8000", "quiltro", "perroCallejero")
streamingList = ObjectList()
streamingList.append(Streaming(randomStreaming(1)))
bs.communicator.service.postStreamings(streamingList)
searchList = ObjectList()
searchList.append(Search(randomSearch(1)))
bs.communicator.service.postSearches(searchList)
bs.start()
# Create barrier for N clients
randomClients = randomInteger(3) + 2
cBarrier = Barrier(randomClients + 1)
# Create N clients
for i in range(0, randomClients):
thread.start_new_thread(oneThread, (cBarrier, fakeClient,
bs.getHostName(), 13001, ))
cBarrier.wait()
time.sleep(5)
# stop server
stopEvent.set()
# Wait for server
sBarrier.wait()
time.sleep(5)
# Get data and compare
numberTweets = len(bs.globalBuffer.localBuffer.tweetList.list)
numberUsers = len(bs.globalBuffer.localBuffer.userList.list)
numberTweetStreaming = len(bs.globalBuffer.localBuffer.tweetStreamingList.list)
numberTweetSearch = len(bs.globalBuffer.localBuffer.tweetSearchList.list)
self.assertEqual(numberTweets, 0)
self.assertEqual(numberUsers, 0)
self.assertEqual(numberTweetStreaming, 0)
self.assertEqual(numberTweetSearch, 0)
# count originals
originalNumberTweets = 0
originalNumberUsers = 0
originalNumberTweetStreaming = 0
originalNumberTweetSearch = 0
lb = LocalBuffer()
for i in range(0, randomClients):
lb = countData(lb, sharedListData[i*4 + 0], sharedListData[i*4 + 1],
sharedListData[i*4 + 2], sharedListData[i*4 + 3])
originalNumberTweets += len(lb.tweetList.list)
originalNumberUsers += len(lb.userList.list)
originalNumberTweetStreaming += len(lb.tweetStreamingList.list)
originalNumberTweetSearch += len(lb.tweetSearchList.list)
self.assertEqual(originalNumberTweets, bs.communicator.sentTweets)
self.assertEqual(originalNumberUsers, bs.communicator.sentUsers)
self.assertEqual(originalNumberTweetStreaming, bs.communicator.sentTweetStreamings)
self.assertEqual(originalNumberTweetSearch, bs.communicator.sentTweetSearches)
|
ptorrestr/t2db_buffer
|
t2db_buffer/tests/test_buffer.py
|
Python
|
gpl-2.0
| 11,583
| 0.003367
|
import sys
from django.core.management.base import BaseCommand
from ietf.community.constants import SIGNIFICANT_STATES
from ietf.community.models import DocumentChangeDates
from ietf.doc.models import Document
class Command(BaseCommand):
help = (u"Update drafts in community lists by reviewing their rules")
def handle(self, *args, **options):
documents = Document.objects.filter(type='draft')
index = 1
total = documents.count()
for doc in documents.iterator():
(changes, created) = DocumentChangeDates.objects.get_or_create(document=doc)
new_version = doc.latest_event(type='new_revision')
normal_change = doc.latest_event()
significant_change = None
for event in doc.docevent_set.filter(type='changed_document'):
for state in SIGNIFICANT_STATES:
if ('<b>%s</b>' % state) in event.desc:
significant_change = event
break
changes.new_version_date = new_version and new_version.time.date()
changes.normal_change_date = normal_change and normal_change.time.date()
changes.significant_change_date = significant_change and significant_change.time.date()
changes.save()
sys.stdout.write('Document %s/%s\r' % (index, total))
sys.stdout.flush()
index += 1
print
|
wpjesus/codematch
|
ietf/community/management/commands/update_doc_change_dates.py
|
Python
|
bsd-3-clause
| 1,440
| 0.002083
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2009 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <dominic.lowe@stfc.ac.uk>
#
# Contact email: dominic.lowe@stfc.ac.uk
# =============================================================================
"""
Web Feature Server (WFS) methods and metadata. Factory function.
"""
from feature import wfs100, wfs200
def WebFeatureService(url, version='1.0.0', xml=None):
''' wfs factory function, returns a version specific WebFeatureService object '''
if version in ['1.0', '1.0.0']:
return wfs100.WebFeatureService_1_0_0.__new__(wfs100.WebFeatureService_1_0_0, url, version, xml)
elif version in ['2.0', '2.0.0']:
return wfs200.WebFeatureService_2_0_0.__new__(wfs200.WebFeatureService_2_0_0, url, version, xml)
|
okfn/owslib
|
owslib/wfs.py
|
Python
|
bsd-3-clause
| 930
| 0.008602
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
settings_name = "settings.local" if os.name == 'nt' else "settings.remote"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_name)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
alexeiramone/django-default-template
|
manage.py
|
Python
|
mit
| 348
| 0
|
import pymysql.cursors
from pymysql.tests import base
from pymysql import util
from pymysql.err import ProgrammingError
import time
import datetime
__all__ = ["TestConversion", "TestCursor", "TestBulkInserts"]
class TestConversion(base.PyMySQLTestCase):
def test_datatypes(self):
""" test every data type """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_datatypes (b bit, i int, l bigint, f real, s varchar(32), u varchar(32), bb blob, d date, dt datetime, ts timestamp, td time, t time, st datetime)")
try:
# insert values
v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988,2,2), datetime.datetime.now(), datetime.timedelta(5,6), datetime.time(16,32), time.localtime())
c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v)
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
r = c.fetchone()
self.assertEqual(util.int2byte(1), r[0])
self.assertEqual(v[1:8], r[1:8])
# mysql throws away microseconds so we need to check datetimes
# specially. additionally times are turned into timedeltas.
self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8])
self.assertEqual(v[9], r[9]) # just timedeltas
self.assertEqual(datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10])
self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1])
c.execute("delete from test_datatypes")
# check nulls
c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12)
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
r = c.fetchone()
self.assertEqual(tuple([None] * 12), r)
c.execute("delete from test_datatypes")
# check sequence type
c.execute("insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)")
c.execute("select l from test_datatypes where i in %s order by i", ((2,6),))
r = c.fetchall()
self.assertEqual(((4,),(8,)), r)
finally:
c.execute("drop table test_datatypes")
def test_dict(self):
""" test dict escaping """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_dict (a integer, b integer, c integer)")
try:
c.execute("insert into test_dict (a,b,c) values (%(a)s, %(b)s, %(c)s)", {"a":1,"b":2,"c":3})
c.execute("select a,b,c from test_dict")
self.assertEqual((1,2,3), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_string(self):
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_dict (a text)")
test_value = "I am a test string"
try:
c.execute("insert into test_dict (a) values (%s)", test_value)
c.execute("select a from test_dict")
self.assertEqual((test_value,), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_integer(self):
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_dict (a integer)")
test_value = 12345
try:
c.execute("insert into test_dict (a) values (%s)", test_value)
c.execute("select a from test_dict")
self.assertEqual((test_value,), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_big_blob(self):
""" test tons of data """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_big_blob (b blob)")
try:
data = "pymysql" * 1024
c.execute("insert into test_big_blob (b) values (%s)", (data,))
c.execute("select b from test_big_blob")
self.assertEqual(data.encode(conn.charset), c.fetchone()[0])
finally:
c.execute("drop table test_big_blob")
def test_untyped(self):
""" test conversion of null, empty string """
conn = self.connections[0]
c = conn.cursor()
c.execute("select null,''")
self.assertEqual((None,u''), c.fetchone())
c.execute("select '',null")
self.assertEqual((u'',None), c.fetchone())
def test_timedelta(self):
""" test timedelta conversion """
conn = self.connections[0]
c = conn.cursor()
c.execute("select time('12:30'), time('23:12:59'), time('23:12:59.05100')")
self.assertEqual((datetime.timedelta(0, 45000),
datetime.timedelta(0, 83579),
datetime.timedelta(0, 83579, 51000)),
c.fetchone())
def test_datetime(self):
""" test datetime conversion """
conn = self.connections[0]
c = conn.cursor()
dt = datetime.datetime(2013,11,12,9,9,9,123450)
try:
c.execute("create table test_datetime (id int, ts datetime(6))")
c.execute("insert into test_datetime values (1,'2013-11-12 09:09:09.12345')")
c.execute("select ts from test_datetime")
self.assertEqual((dt,),c.fetchone())
except ProgrammingError:
# User is running a version of MySQL that doesn't support msecs within datetime
pass
finally:
c.execute("drop table if exists test_datetime")
class TestCursor(base.PyMySQLTestCase):
# this test case does not work quite right yet, however,
# we substitute in None for the erroneous field which is
# compatible with the DB-API 2.0 spec and has not broken
# any unit tests for anything we've tried.
#def test_description(self):
# """ test description attribute """
# # result is from MySQLdb module
# r = (('Host', 254, 11, 60, 60, 0, 0),
# ('User', 254, 16, 16, 16, 0, 0),
# ('Password', 254, 41, 41, 41, 0, 0),
# ('Select_priv', 254, 1, 1, 1, 0, 0),
# ('Insert_priv', 254, 1, 1, 1, 0, 0),
# ('Update_priv', 254, 1, 1, 1, 0, 0),
# ('Delete_priv', 254, 1, 1, 1, 0, 0),
# ('Create_priv', 254, 1, 1, 1, 0, 0),
# ('Drop_priv', 254, 1, 1, 1, 0, 0),
# ('Reload_priv', 254, 1, 1, 1, 0, 0),
# ('Shutdown_priv', 254, 1, 1, 1, 0, 0),
# ('Process_priv', 254, 1, 1, 1, 0, 0),
# ('File_priv', 254, 1, 1, 1, 0, 0),
# ('Grant_priv', 254, 1, 1, 1, 0, 0),
# ('References_priv', 254, 1, 1, 1, 0, 0),
# ('Index_priv', 254, 1, 1, 1, 0, 0),
# ('Alter_priv', 254, 1, 1, 1, 0, 0),
# ('Show_db_priv', 254, 1, 1, 1, 0, 0),
# ('Super_priv', 254, 1, 1, 1, 0, 0),
# ('Create_tmp_table_priv', 254, 1, 1, 1, 0, 0),
# ('Lock_tables_priv', 254, 1, 1, 1, 0, 0),
# ('Execute_priv', 254, 1, 1, 1, 0, 0),
# ('Repl_slave_priv', 254, 1, 1, 1, 0, 0),
# ('Repl_client_priv', 254, 1, 1, 1, 0, 0),
# ('Create_view_priv', 254, 1, 1, 1, 0, 0),
# ('Show_view_priv', 254, 1, 1, 1, 0, 0),
# ('Create_routine_priv', 254, 1, 1, 1, 0, 0),
# ('Alter_routine_priv', 254, 1, 1, 1, 0, 0),
# ('Create_user_priv', 254, 1, 1, 1, 0, 0),
# ('Event_priv', 254, 1, 1, 1, 0, 0),
# ('Trigger_priv', 254, 1, 1, 1, 0, 0),
# ('ssl_type', 254, 0, 9, 9, 0, 0),
# ('ssl_cipher', 252, 0, 65535, 65535, 0, 0),
# ('x509_issuer', 252, 0, 65535, 65535, 0, 0),
# ('x509_subject', 252, 0, 65535, 65535, 0, 0),
# ('max_questions', 3, 1, 11, 11, 0, 0),
# ('max_updates', 3, 1, 11, 11, 0, 0),
# ('max_connections', 3, 1, 11, 11, 0, 0),
# ('max_user_connections', 3, 1, 11, 11, 0, 0))
# conn = self.connections[0]
# c = conn.cursor()
# c.execute("select * from mysql.user")
#
# self.assertEqual(r, c.description)
def test_fetch_no_result(self):
""" test a fetchone() with no rows """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_nr (b varchar(32))")
try:
data = "pymysql"
c.execute("insert into test_nr (b) values (%s)", (data,))
self.assertEqual(None, c.fetchone())
finally:
c.execute("drop table test_nr")
def test_aggregates(self):
""" test aggregate functions """
conn = self.connections[0]
c = conn.cursor()
try:
c.execute('create table test_aggregates (i integer)')
for i in range(0, 10):
c.execute('insert into test_aggregates (i) values (%s)', (i,))
c.execute('select sum(i) from test_aggregates')
r, = c.fetchone()
self.assertEqual(sum(range(0,10)), r)
finally:
c.execute('drop table test_aggregates')
def test_single_tuple(self):
""" test a single tuple """
conn = self.connections[0]
c = conn.cursor()
try:
c.execute("create table mystuff (id integer primary key)")
c.execute("insert into mystuff (id) values (1)")
c.execute("insert into mystuff (id) values (2)")
c.execute("select id from mystuff where id in %s", ((1,),))
self.assertEqual([(1,)], list(c.fetchall()))
finally:
c.execute("drop table mystuff")
class TestBulkInserts(base.PyMySQLTestCase):
cursor_type = pymysql.cursors.DictCursor
def setUp(self):
super(TestBulkInserts, self).setUp()
self.conn = conn = self.connections[0]
c = conn.cursor(self.cursor_type)
# create a table ane some data to query
c.execute("drop table if exists bulkinsert")
c.execute(
"""CREATE TABLE bulkinsert
(
id int(11),
name char(20),
age int,
height int,
PRIMARY KEY (id)
)
""")
def _verify_records(self, data):
conn = self.connections[0]
cursor = conn.cursor()
cursor.execute("SELECT id, name, age, height from bulkinsert")
result = cursor.fetchall()
self.assertEqual(sorted(data), sorted(result))
def test_bulk_insert(self):
conn = self.connections[0]
cursor = conn.cursor()
data = [(0, "bob", 21, 123), (1, "jim", 56, 45), (2, "fred", 100, 180)]
cursor.executemany("insert into bulkinsert (id, name, age, height) "
"values (%s,%s,%s,%s)", data)
self.assertEqual(
cursor._last_executed, bytearray(
b"insert into bulkinsert (id, name, age, height) values "
b"(0,'bob',21,123),(1,'jim',56,45),(2,'fred',100,180)"))
cursor.execute('commit')
self._verify_records(data)
def test_bulk_insert_multiline_statement(self):
conn = self.connections[0]
cursor = conn.cursor()
data = [(0, "bob", 21, 123), (1, "jim", 56, 45), (2, "fred", 100, 180)]
cursor.executemany("""insert
into bulkinsert (id, name,
age, height)
values (%s,
%s , %s,
%s )
""", data)
self.assertEqual(cursor._last_executed, bytearray(b"""insert
into bulkinsert (id, name,
age, height)
values (0,
'bob' , 21,
123 ),(1,
'jim' , 56,
45 ),(2,
'fred' , 100,
180 )"""))
cursor.execute('commit')
self._verify_records(data)
def test_bulk_insert_single_record(self):
conn = self.connections[0]
cursor = conn.cursor()
data = [(0, "bob", 21, 123)]
cursor.executemany("insert into bulkinsert (id, name, age, height) "
"values (%s,%s,%s,%s)", data)
cursor.execute('commit')
self._verify_records(data)
if __name__ == "__main__":
import unittest
unittest.main()
|
MonicaHsu/truvaluation
|
venv/lib/python2.7/site-packages/pymysql/tests/test_basic.py
|
Python
|
mit
| 12,154
| 0.003291
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.