repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
dianafprieto/SS_2017
scripts/07_NB_StreamTubes.py
Python
mit
4,407
0.00295
import vtk reader = vtk.vtkRectilinearGridReader() reader.SetFileName("D:/Notebooks_Bogota2017/SS_2017/data/jet4_0.500.vtk") reader.Update() output = reader.GetOutput() xmi, xma, ymi, yma, zmi, zma = output.GetBounds() # Color Transfer Function and LookUpTable # Create transfer mapping scalar value to color colorTransferFunction = vtk.vtkColorTransferFunction() colorTransferFunction.AddRGBPoint(0.0, 1.0, 0.0, 0.0) colorTransferFunction.AddRGBPoint(0.15, 0.0, 0.0, 1.0) colorTransferFunction.AddRGBPoint(0.3, 0.0, 1.0, 0.0) tableSize = 30 lut = vtk.vtkLookupTable() lut.SetNumberOfTableValues(tableSize) lut.Build() for i in range(0,tableSize): rgb = list(colorTransferFunction.GetColor(float(i)/tableSize))+[0.2] lut.SetTableValue(i,rgb) # A plane for the seeds plane = vtk.vtkPlaneSource() plane.SetOrigin(0, 0, 0) plane.SetPoint1(xma, 0, 0) plane.SetPoint2(0, 0, zma) plane.SetXResolution(20) plane.SetYResolution(20) # Add the outline of the plane outline = vtk.vtkOutlineFilter() outline.SetInputData(plane.GetOutput()) outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) outlineActor.GetProperty().SetColor(1,1,1) # Compute streamlines streamline = vtk.vtkStreamTracer() streamline.SetSourceConnection(plane.GetOutputPort()) streamline.SetInputConnection(reader.GetOutputPort()) streamline.SetIntegrationDirectionToForward() #streamline.SetIntegrationDirectionToBackward() #streamline.SetIntegrationDirectionToBoth() streamline.SetMaximumPropagation(1) streamline.SetComputeVorticity(True) # Visualize stream as ribbons (= Stream ribbons); i.e. we need to pass the streamlines through the ribbon filter streamRibbons = vtk.vtkRibbonFilter() streamRibbons.SetInputConnection(streamline.GetOutputPort()) streamRibbons.SetWidth(0.01) streamRibbons.Update() streamRibbonsMapper = vtk.vtkPolyDataMapper() streamRibbonsMapper.SetScalarModeToUsePointFieldData() streamRibbonsMapper.SetInputConnection(streamRibbons.GetOutputPort()) # ***TODO: apply a transfer function to the stream ribbons streamRibbonsActor = vtk.vtkActor() streamRibbonsActor.SetMapper(streamRibbonsMapper) # Visualize stream as tubes (= Stream tubes) streamTubes = vtk.vtkTubeFilter() streamTubes.SetInputConnection(streamline.GetOutputPort()) streamTubes.SetRadius(0.01) streamTubes.Update() streamTubeMapper = vtk.vtkPolyDataMapper() streamTubeMapper.SetLookupTable(lut) streamTubeMapper.SetInputConnection(streamTubes.GetOutputPort()) streamTubeMapper.SetScalarVisibility(True) streamTubeMapper.SetScalarModeToUsePointFieldData() streamTubeMapper.SelectColorArray('vectors') streamTubeMapper.SetScalarRange((reader.GetOutput().GetPointData().GetVectors().GetRange(-1))) streamTubeActor = vtk.vtkActor() streamTubeActor.SetMapper(streamTubeMapper) # Visualize stream as lines (= Stream lines) # Pass the streamlines to the mapper streamlineMapper = vtk.vtkPolyDataMapper() streamlineMapper.SetLookupTable(lut) streamlineMapper.SetInputConnection(streamline.GetOutputPort()) streamlineMapper.SetScalarVisibility(True) streamlineMapper.SetScalarModeToUsePointFieldData() streamlineMapper.SelectColorArray('vectors') streamlineMapper.SetScalarRange((reader.GetOutput().GetPointData().GetVectors().GetRange(-1))) # Pass the mapper to the actor streamlineActor = vtk.vtkActor() streamlineActor.SetMapper(streamlineMapper) streamlineActor.GetProperty().SetLineWidth(2.0
) # A
dd the outline of the data set gOutline = vtk.vtkRectilinearGridOutlineFilter() gOutline.SetInputData(output) gOutlineMapper = vtk.vtkPolyDataMapper() gOutlineMapper.SetInputConnection(gOutline.GetOutputPort()) gOutlineActor = vtk.vtkActor() gOutlineActor.SetMapper(gOutlineMapper) gOutlineActor.GetProperty().SetColor(0.5,0.5,0.5) # Rendering / Window renderer = vtk.vtkRenderer() renderer.SetBackground(0.0, 0.0, 0.0) #renderer.AddActor(streamlineActor) # renderer.AddActor(streamRibbonsActor) renderer.AddActor(streamTubeActor) renderer.AddActor(outlineActor) renderer.AddActor(gOutlineActor) renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindow.SetSize(500, 500) renderWindow.Render() interactor = vtk.vtkRenderWindowInteractor() interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) interactor.SetRenderWindow(renderWindow) interactor.Initialize() interactor.Start()
GirlsCodePy/girlscode-coursebuilder
modules/review/review_tests.py
Python
gpl-3.0
68,643
0.000131
# coding: utf-8 # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functional tests for modules/review/review.py.""" __author__ = [ 'johncox@google.com (John Cox)', ] import datetime import types import urllib from common import crypto from common import schema_transforms from common import utils as common_utils from controllers import sites from models import data_sources from models import models from models import student_work from models import transforms from modules.review import domain from modules.review import peer from modules.review import review as review_module from modules.upload import upload from tests.functional import actions from google.appengine.ext import db class ManagerTest(actions.TestBase): """Tests for review.Manager.""" def setUp(self): super(ManagerTest, self).setUp() self.reviewee = models.Student(key_name='reviewee@example.com') self.reviewee_key = self.reviewee.put() self.reviewer = models.Student(key_name='reviewer@example.com') self.reviewer_key = self.reviewer.put() self.unit_id = '1' self.submission_key = db.Key.from_path( student_work.Submission.kind(), student_work.Submission.key_name( reviewee_key=self.reviewee_key, unit_id=self.unit_id)) def test_add_reviewer_adds_new_step_and_summary(self): step_key = review_module.Manager.add_reviewer( self.unit_id, self.submission_key, self.reviewee_key, self.reviewer_key) step = db.get(step_key) summary = db.get(step.review_summary_key) self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind) self.assertEqual(self.reviewee_key, step.reviewee_key) self.assertEqual(self.reviewer_key, step.reviewer_key) self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state) self.assertEqual(self.submission_key, step.submission_key) self.assertEqual(self.unit_id, step.unit_id) self.assertEqual(1, summary.assigned_count) self.assertEqual(0, summary.completed_count) self.assertEqual(0, summary.expired_count) self.assertEqual(self.reviewee_key, summary.reviewee_key) self.assertEqual(self.submission_key, summary.submission_key) self.assertEqual(self.unit_id, summary.unit_id) def test_add_reviewer_existing_raises_assertion_when_summary_missing(self): missing_key = db.Key.from_path( peer.ReviewSummary.kind(), 'no_summary_found_for_key') peer.ReviewStep( assigner_kind=domain.ASSIGNER_KIND_AUTO, review_key=db.Key.from_path(student_work.Review.kind(), 'review'), review_summary_key=missing_key, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id ).put() self.assertRaises( AssertionError, review_module.Manager.add_reviewer, self.unit_id, self.submission_key, self.reviewee_key, self.reviewer_key) def test_add_reviewer_existing_raises_transition_error_when_assigned(self): summary_key = peer.ReviewSummary( assigned_count=1, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, unit_id=self.unit_id ).put() peer.ReviewStep( assigner_kind=domain.ASSIGNER_KIND_AUTO, review_key=db.Key.from_path(student_work.Review.kind(), 'review'), review_summary_key=summary_key, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id ).put() self.assertRaises( domain.TransitionError, review_module.Manager.add_reviewer, self.unit_id, self.submission_key, self.reviewee_key, self.reviewer_key) def test_add_reviewer_existing_raises_transition_error_when_completed(self): summary_key = peer.ReviewSummary( completed_count=1, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, unit_id=self.unit_id ).put() peer.ReviewStep( assigner_kind=domain.ASSIGNER_KIND_AUTO, review_key=db.Key.from_path(student_work.Review.kind(), 'review'), review_summary_key=summary_key, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id ).put() self.assertRaises( domain.TransitionError, review_module.Manager.add_reviewer, self.unit_id, self.submission_key, self.reviewee_key, self.reviewer_key) def test_add_reviewer_unremoved_existing_changes_expired_to_assigned(self): summary_key = peer.ReviewSummary( expired_count=1, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, unit_id=self.unit_id ).put() step_key = peer.ReviewStep( assigner_kind=domain.ASSIGNER_KIND_AUTO, review_key=db.Key.from_path(student_work.Review.kind(), 'review'), review_summary_key=summary_key, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id ).put() review_module.Manager.add_reviewer( self.unit_id, self.submission_key, self.reviewee_key, self.reviewer_key) step, summary = db.get([step_key, summary_key]) self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind) self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state) self.assertFalse(step.removed) self.assertEqual(1, summary.assigned_count) self.assertEqual(0, summary.expired_count) def test_add_reviewer_removed_unremoves_assigned_step(self): summary_key = peer.ReviewSummary( reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, unit_id=self.unit_id ).put() step_key = peer.ReviewStep( assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True, review_key=db.Key.from_path(student_work.Review.kind(), 'review'), review_summary_key=summary_key, reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id ).put() review_module.Manager.add_reviewer( self.unit_id, self.submission_key, self.reviewee_key, self.reviewer_key) step, summary = db.get([step_key, summary_key]) self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind) self.assertEqual(domain.REVIEW_STATE_ASSIGNED
, step.state) self.assertFalse(step.removed) self.assertEqual(1, summary.assigned_count) def test_add_reviewer_removed_unremoves_completed_step(self): summary_key = peer.ReviewSummary( reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key, submission_key=self.submission_key, unit_id=self.unit_id
).put() step_key = peer.ReviewStep( assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
chunweiyuan/xarray
xarray/backends/rasterio_.py
Python
apache-2.0
12,619
0
import os import warnings from collections import OrderedDict from distutils.version import LooseVersion import numpy as np from .. import DataArray from ..core import indexing from ..core.utils import is_scalar from .common import BackendArray from .file_manager import CachingFileManager from .locks import SerializableLock # TODO: should this be GDAL_LOCK instead? RASTERIO_LOCK = SerializableLock() _ERROR_MSG = ('The kind of indexing operation you are trying to do is not ' 'valid on rasterio files. Try to load your data with ds.load()' 'first.') class RasterioArrayWrapper(BackendArray): """A wrapper around rasterio dataset objects""" def __init__(self, manager, lock, vrt_params=None): from rasterio.vrt import WarpedVRT self.manager = manager self.lock = lock # cannot save riods as an attribute: this would break pickleability riods = manager.acquire() if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) self.vrt_params = vrt_params self._shape = (riods.count, riods.height, riods.width) dtypes = riods.dtypes if not np.all(np.asarray(dtypes) == dtypes[0]): raise ValueError('All bands should have the same dtype') self._dtype = np.dtype(dtypes[0]) @property def dtype(self): return self._dtype @property def shape(self): return self._shape def _get_indexer(self, key): """ Get indexer for ra
sterio array. Parameter --------- key: tuple of int
Returns ------- band_key: an indexer for the 1st dimension window: two tuples. Each consists of (start, stop). squeeze_axis: axes to be squeezed np_ind: indexer for loaded numpy array See also -------- indexing.decompose_indexer """ assert len(key) == 3, 'rasterio datasets should always be 3D' # bands cannot be windowed but they can be listed band_key = key[0] np_inds = [] # bands (axis=0) cannot be windowed but they can be listed if isinstance(band_key, slice): start, stop, step = band_key.indices(self.shape[0]) band_key = np.arange(start, stop, step) # be sure we give out a list band_key = (np.asarray(band_key) + 1).tolist() if isinstance(band_key, list): # if band_key is not a scalar np_inds.append(slice(None)) # but other dims can only be windowed window = [] squeeze_axis = [] for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])): if isinstance(k, slice): # step is always positive. see indexing.decompose_indexer start, stop, step = k.indices(n) np_inds.append(slice(None, None, step)) elif is_scalar(k): # windowed operations will always return an array # we will have to squeeze it later squeeze_axis.append(- (2 - i)) start = k stop = k + 1 else: start, stop = np.min(k), np.max(k) + 1 np_inds.append(k - start) window.append((start, stop)) if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray): # do outer-style indexing np_inds[-2:] = np.ix_(*np_inds[-2:]) return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds) def _getitem(self, key): from rasterio.vrt import WarpedVRT band_key, window, squeeze_axis, np_inds = self._get_indexer(key) if not band_key or any(start == stop for (start, stop) in window): # no need to do IO shape = (len(band_key),) + tuple( stop - start for (start, stop) in window) out = np.zeros(shape, dtype=self.dtype) else: with self.lock: riods = self.manager.acquire(needs_lock=False) if self.vrt_params is not None: riods = WarpedVRT(riods, **self.vrt_params) out = riods.read(band_key, window=window) if squeeze_axis: out = np.squeeze(out, axis=squeeze_axis) return out[np_inds] def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER, self._getitem) def _parse_envi(meta): """Parse ENVI metadata into Python data structures. See the link for information on the ENVI header file format: http://www.harrisgeospatial.com/docs/enviheaderfiles.html Parameters ---------- meta : dict Dictionary of keys and str values to parse, as returned by the rasterio tags(ns='ENVI') call. Returns ------- parsed_meta : dict Dictionary containing the original keys and the parsed values """ def parsevec(s): return np.fromstring(s.strip('{}'), dtype='float', sep=',') def default(s): return s.strip('{}') parse = {'wavelength': parsevec, 'fwhm': parsevec} parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()} return parsed_meta def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None): """Open a file with rasterio (experimental). This should work with any file that rasterio can open (most often: geoTIFF). The x and y coordinates are generated automatically from the file's geoinformation, shifted to the center of each pixel (see `"PixelIsArea" Raster Space <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_ for more information). You can generate 2D coordinates from the file's attributes with:: from affine import Affine da = xr.open_rasterio('path_to_file.tif') transform = Affine.from_gdal(*da.attrs['transform']) nx, ny = da.sizes['x'], da.sizes['y'] x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform Parameters ---------- filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT Path to the file to open. Or already open rasterio dataset. parse_coordinates : bool, optional Whether to parse the x and y coordinates out of the file's ``transform`` attribute or not. The default is to automatically parse the coordinates only if they are rectilinear (1D). It can be useful to set ``parse_coordinates=False`` if your files are very large or if you don't need the coordinates. chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new DataArray into a dask array. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. lock : False, True or threading.Lock, optional If chunks is provided, this argument is passed on to :py:func:`dask.array.from_array`. By default, a global lock is used to avoid issues with concurrent access to the same file when using dask's multithreaded backend. Returns ------- data : DataArray The newly created DataArray. """ import rasterio from rasterio.vrt import WarpedVRT vrt_params = None if isinstance(filename, rasterio.io.DatasetReader): filename = filename.name elif isinstance(filename, rasterio.vrt.WarpedVRT): vrt = filename filename = vrt.src_dataset.name vrt_params = dict(crs=vrt.crs.to_string(), resampling=vrt.resampling, src_nodata=vrt.src_nodata, dst_nodata=vrt.dst_nodata,
GENI-NSF/gram
src/vmoc/register_controller.py
Python
mit
2,320
0.003879
#!/usr/bin/python #---------------------------------------------------------------------- # Copyright (c) 2013-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- import json import sys from vmoc.VMOCConfig import VMOCSliceConfiguration, VMOCVLANConfiguration # Usage register_controller.sh slice [vl
an controller ....] [unregister] if len(sys.argv) < 2: print "Usage: register_controller.py slice [vlan controller ...] [unregister]" sys.exit() print sys.argv[1] print sys.argv[2] slice_id = sys.argv[1] vlan_controllers = json.loads(sys.argv[2
]) vlan_configs = [] for i in range(len(vlan_controllers)): if i == 2*(i/2): vlan_tag = vlan_controllers[i] controller_url = vlan_controllers[i+1] vlan_config = \ VMOCVLANConfiguration(vlan_tag=vlan_tag, \ controller_url=controller_url) vlan_configs.append(vlan_config) slice_config = \ VMOCSliceConfiguration(slice_id=slice_id, vlan_configs=vlan_configs) unregister = False if len(sys.argv)>3: unregister = bool(sys.argv[3]) print str(slice_config) command = 'register' if unregister: command = 'unregister' command = command + " " + json.dumps(slice_config.__attr__()) print command
michaelmontano/snowflakepy
src/snowflakeserver.py
Python
bsd-3-clause
1,494
0.014726
import sys sys.path = ['..'] + sys.path import zope from twisted.internet import reactor from thrift.transport import TSocket from thrift.transport import TTransport from thrift.transport import TTwisted from thrift.protocol import TBinaryProtocol from lib.genpy.snowflake import Snowflake from lib.genpy.snowflake.ttypes import * import idworker class SnowflakeServer(object): zope.interface.implements(Snowflake.Iface) def __init__(self, worker_id, datacenter_id): self.worker = idworker.IdWorker(worker_id, datacenter_id) def get_worker_id(self): return self.worker.get_worker_id() def get_datacente
r_id(self): return self.worker.get_datacenter_id() d
ef get_timestamp(self): return self.worker.get_timestamp() def get_id(self): return self.worker.get_id() def print_usage(): print 'python snowflakeserver.py <port> <worker_id> <datacenter_id>' print 'e.g. python snowflakeserver.py 1111 1 1' def main(): if len(sys.argv) != 4: return print_usage() port = int(sys.argv[1]) worker_id = int(sys.argv[2]) datacenter_id = int(sys.argv[3]) reactor.listenTCP(port, TTwisted.ThriftServerFactory( processor=Snowflake.Processor(SnowflakeServer(worker_id, datacenter_id)), iprot_factory=TBinaryProtocol.TBinaryProtocolFactory() )) reactor.run() if __name__ == '__main__': sys.exit(main())
goodcrypto/goodcrypto-libs
reinhardt/templatetags/data_img.py
Python
gpl-3.0
828
0.014493
''' Convert an image file to a data uri. Copyright 2012 GoodCrypto Last modified: 2013-11-13 This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>. ''' import os.path from django import template import reinhardt.data_image register = template.Library() @register.filter def data_img(filename, browser=None): ''' Encode an image file in base 64 as a data uri. The fi
lename is relative to settings.STATIC_URL/settings.STATIC_ROOT. If the datauri is too large or anything goes wrong, retur
ns the url to the filename. Example: <img alt="embedded image" src="{{ 'images/myimage.png'|data_img:browser }}"> ''' return reinhardt.data_image.data_image(filename, browser=browser)
endlessm/chromium-browser
third_party/catapult/common/py_vulcanize/py_vulcanize/resource_loader.py
Python
bsd-3-clause
7,961
0.006657
# Copyright (c) 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ResourceFinder is a helper class for finding resources given their name.""" import codecs import os from py_vulcanize import module from py_vulcanize import style_sheet as style_sheet_module from py_vulcanize import resource as resource_module from py_vulcanize import html_module from py_vulcanize import strip_js_comments class ResourceLoader(object): """Manges loading modules and their dependencies from files. Modules handle parsing and the construction of their individual dependency pointers. The loader deals with bookkeeping of what has been loaded, and mapping names
to file resources. """ def __init__(self, project): self.project = project self.stripped_js_by_filename = {} self.loaded_modules = {} self.loaded_raw_scripts = {} self.loaded_style_sheets = {} self.loaded_images = {} @property def source_paths(self): """A list of base directories to search for modules under."""
return self.project.source_paths def FindResource(self, some_path, binary=False): """Finds a Resource for the given path. Args: some_path: A relative or absolute path to a file. Returns: A Resource or None. """ if os.path.isabs(some_path): return self.FindResourceGivenAbsolutePath(some_path, binary) else: return self.FindResourceGivenRelativePath(some_path, binary) def FindResourceGivenAbsolutePath(self, absolute_path, binary=False): """Returns a Resource for the given absolute path.""" candidate_paths = [] for source_path in self.source_paths: if absolute_path.startswith(source_path): candidate_paths.append(source_path) if len(candidate_paths) == 0: return None # Sort by length. Longest match wins. candidate_paths.sort(lambda x, y: len(x) - len(y)) longest_candidate = candidate_paths[-1] return resource_module.Resource(longest_candidate, absolute_path, binary) def FindResourceGivenRelativePath(self, relative_path, binary=False): """Returns a Resource for the given relative path.""" absolute_path = None for script_path in self.source_paths: absolute_path = os.path.join(script_path, relative_path) if os.path.exists(absolute_path): return resource_module.Resource(script_path, absolute_path, binary) return None def _FindResourceGivenNameAndSuffix( self, requested_name, extension, return_resource=False): """Searches for a file and reads its contents. Args: requested_name: The name of the resource that was requested. extension: The extension for this requested resource. Returns: A (path, contents) pair. """ pathy_name = requested_name.replace('.', os.sep) filename = pathy_name + extension resource = self.FindResourceGivenRelativePath(filename) if return_resource: return resource if not resource: return None, None return _read_file(resource.absolute_path) def FindModuleResource(self, requested_module_name): """Finds a module javascript file and returns a Resource, or none.""" js_resource = self._FindResourceGivenNameAndSuffix( requested_module_name, '.js', return_resource=True) html_resource = self._FindResourceGivenNameAndSuffix( requested_module_name, '.html', return_resource=True) if js_resource and html_resource: if html_module.IsHTMLResourceTheModuleGivenConflictingResourceNames( js_resource, html_resource): return html_resource return js_resource elif js_resource: return js_resource return html_resource def LoadModule(self, module_name=None, module_filename=None, excluded_scripts=None): assert bool(module_name) ^ bool(module_filename), ( 'Must provide either module_name or module_filename.') if module_filename: resource = self.FindResource(module_filename) if not resource: raise Exception('Could not find %s in %s' % ( module_filename, repr(self.source_paths))) module_name = resource.name else: resource = None # Will be set if we end up needing to load. if module_name in self.loaded_modules: assert self.loaded_modules[module_name].contents return self.loaded_modules[module_name] if not resource: # happens when module_name was given resource = self.FindModuleResource(module_name) if not resource: raise module.DepsException('No resource for module "%s"' % module_name) m = html_module.HTMLModule(self, module_name, resource) self.loaded_modules[module_name] = m # Fake it, this is probably either polymer.min.js or platform.js which are # actually .js files.... if resource.absolute_path.endswith('.js'): return m m.Parse(excluded_scripts) m.Load(excluded_scripts) return m def LoadRawScript(self, relative_raw_script_path): resource = None for source_path in self.source_paths: possible_absolute_path = os.path.join( source_path, os.path.normpath(relative_raw_script_path)) if os.path.exists(possible_absolute_path): resource = resource_module.Resource( source_path, possible_absolute_path) break if not resource: raise module.DepsException( 'Could not find a file for raw script %s in %s' % (relative_raw_script_path, self.source_paths)) assert relative_raw_script_path == resource.unix_style_relative_path, ( 'Expected %s == %s' % (relative_raw_script_path, resource.unix_style_relative_path)) if resource.absolute_path in self.loaded_raw_scripts: return self.loaded_raw_scripts[resource.absolute_path] raw_script = module.RawScript(resource) self.loaded_raw_scripts[resource.absolute_path] = raw_script return raw_script def LoadStyleSheet(self, name): if name in self.loaded_style_sheets: return self.loaded_style_sheets[name] resource = self._FindResourceGivenNameAndSuffix( name, '.css', return_resource=True) if not resource: raise module.DepsException( 'Could not find a file for stylesheet %s' % name) style_sheet = style_sheet_module.StyleSheet(self, name, resource) style_sheet.load() self.loaded_style_sheets[name] = style_sheet return style_sheet def LoadImage(self, abs_path): if abs_path in self.loaded_images: return self.loaded_images[abs_path] if not os.path.exists(abs_path): raise module.DepsException("url('%s') did not exist" % abs_path) res = self.FindResourceGivenAbsolutePath(abs_path, binary=True) if res is None: raise module.DepsException("url('%s') was not in search path" % abs_path) image = style_sheet_module.Image(res) self.loaded_images[abs_path] = image return image def GetStrippedJSForFilename(self, filename, early_out_if_no_py_vulcanize): if filename in self.stripped_js_by_filename: return self.stripped_js_by_filename[filename] with open(filename, 'r') as f: contents = f.read(4096) if early_out_if_no_py_vulcanize and ('py_vulcanize' not in contents): return None s = strip_js_comments.StripJSComments(contents) self.stripped_js_by_filename[filename] = s return s def _read_file(absolute_path): """Reads a file and returns a (path, contents) pair. Args: absolute_path: Absolute path to a file. Raises: Exception: The given file doesn't exist. IOError: There was a problem opening or reading the file. """ if not os.path.exists(absolute_path): raise Exception('%s not found.' % absolute_path) f = codecs.open(absolute_path, mode='r', encoding='utf-8') contents = f.read() f.close() return absolute_path, contents
google-research/google-research
tf3d/utils/voxel_utils.py
Python
apache-2.0
21,191
0.004672
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility function for voxels.""" import gin import gin.tf import tensorflow as tf from tf3d.layers import sparse_voxel_net_utils from tf3d.utils import shape_utils compute_pooled_voxel_indices = sparse_voxel_net_utils.compute_pooled_voxel_indices pool_features_given_indices = sparse_voxel_net_utils.pool_features_given_indices def crop_and_pad_voxels(voxels, start_coordinates, end_coordinates): """Crops a voxel region and pads past the boundaries with zeros. This accepts start and end coordinates past the limits of the voxel grid, and uses it to calculate how much top/left/right/bottom padding to add. Args: voxels: A tf.float32 tensor of shape [x, y, z, f] to crop start_coordinates: A list of len 4 with the [x, y, z, f] starting location of our crop. This can be negative, which indicates left/top padding. end_coordinates: A list of len 4 with the [x, y, z, f] ending location of our crop. This can be beyond the size of the voxel tensor, which indicates padding. Returns: cropped_and_padded_voxels: A voxel grid with shape [end_coordinates[0] - start_coordinates[0], end_coordinates[1] - start_coordinates[1], end_coordinates[2] - start_coordinates[2], end_coordinates[3] - start_coordinates[3]] Raises: ValueError: If requested crop and pad is outside the bounds of what the function supports. """ if len(start_coordinates) != 4: raise ValueError('start_coordinates should be of length 4') if len(end_coordinates) != 4: raise ValueError('end_coordinates should be of length 4') if any([coord <= 0 for coord in end_coordinates]): raise ValueError('Requested end coordinates should be > 0') start_coordinates = tf.convert_to_tensor(start_coordinates, tf.int32) end_coordinates = tf.convert_to_tensor(end_coordinates, tf.int32) # Clip the coordinates to within the voxel grid clipped_start_coordinates = tf.maximum(0, start_coordinates) clipped_end_coordinates = tf.minimum(voxels.shape, end_coordinates) cropped_voxels = tf.slice(voxels, begin=cli
pped_start_coordinates, size=(clipped_end_coordinates - clipped_start_coordinates)) top_and_left_padding = tf.m
aximum(0, -start_coordinates) bottom_and_right_padding = tf.maximum(0, end_coordinates - voxels.shape) padding = tf.stack([top_and_left_padding, bottom_and_right_padding], axis=1) return tf.pad(cropped_voxels, padding) def pointcloud_to_voxel_grid(points, features, grid_cell_size, start_location, end_location, segment_func=tf.math.unsorted_segment_mean): """Converts a pointcloud into a voxel grid. Args: points: A tf.float32 tensor of size [N, 3]. features: A tf.float32 tensor of size [N, F]. grid_cell_size: A tf.float32 tensor of size [3]. start_location: A tf.float32 tensor of size [3]. end_location: A tf.float32 tensor of size [3]. segment_func: A tensorflow function that operates on segments. Expect one of tf.math.unsorted_segment_{min/max/mean/prod/sum}. Defaults to tf.math.unsorted_segment_mean Returns: voxel_features: A tf.float32 tensor of size [grid_x_len, grid_y_len, grid_z_len, F]. segment_ids: A tf.int32 tensor of IDs for each point indicating which (flattened) voxel cell its data was mapped to. point_indices: A tf.int32 tensor of size [num_points, 3] containing the location of each point in the 3d voxel grid. """ grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32) start_location = tf.convert_to_tensor(start_location, dtype=tf.float32) end_location = tf.convert_to_tensor(end_location, dtype=tf.float32) point_indices = tf.cast( (points - tf.expand_dims(start_location, axis=0)) / tf.expand_dims(grid_cell_size, axis=0), dtype=tf.int32) grid_size = tf.cast( tf.math.ceil((end_location - start_location) / grid_cell_size), dtype=tf.int32) # Note: all points outside the grid are added to the edges # Cap index at grid_size - 1 (so a 10x10x10 grid's max cell is (9,9,9)) point_indices = tf.minimum(point_indices, tf.expand_dims(grid_size - 1, axis=0)) # Don't allow any points below index (0, 0, 0) point_indices = tf.maximum(point_indices, 0) segment_ids = tf.reduce_sum( point_indices * tf.stack( [grid_size[1] * grid_size[2], grid_size[2], 1], axis=0), axis=1) voxel_features = segment_func( data=features, segment_ids=segment_ids, num_segments=(grid_size[0] * grid_size[1] * grid_size[2])) return (tf.reshape(voxel_features, [grid_size[0], grid_size[1], grid_size[2], features.get_shape().as_list()[1]]), segment_ids, point_indices) def voxels_to_points(voxels, segment_ids): """Convert voxels back to points given their segment id. Args: voxels: A tf.float32 tensor representing a voxel grid. Expect shape [x, y, z, f]. segment_ids: A tf.int32 tensor representing the segment id of each point in the original pointcloud we want to project voxel features back to. Returns: point_features: A tf.float32 tensor of shape [N, f] where each point now has the features in the associated voxel cell. """ flattened_voxels = tf.reshape(voxels, shape=(-1, voxels.shape[-1])) return tf.gather(flattened_voxels, segment_ids) def _points_offset_in_voxels_unbatched(points, grid_cell_size): """Converts points into offsets in voxel grid for a single batch. The values range from -0.5 to 0.5 Args: points: A tf.float32 tensor of size [N, 3]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. Returns: voxel_xyz_offsets: A tf.float32 tensor of size [N, 3]. """ min_points = tf.reduce_min(points, axis=0) points_index = tf.math.floordiv(points - min_points, grid_cell_size) points_offset = points - min_points - (points_index * grid_cell_size) return (points_offset / grid_cell_size) - 0.5 def points_offset_in_voxels(points, grid_cell_size): """Converts points into offsets in voxel grid. Args: points: A tf.float32 tensor of size [batch_size, N, 3]. grid_cell_size: The size of the grid cells in x, y, z dimensions in the voxel grid. It should be either a tf.float32 tensor, a numpy array or a list of size [3]. Returns: voxel_xyz_offsets: A tf.float32 tensor of size [batch_size, N, 3]. """ batch_size = points.get_shape().as_list()[0] def fn(i): return _points_offset_in_voxels_unbatched( points=points[i, :, :], grid_cell_size=grid_cell_size) return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32) def _points_to_voxel_indices(points, grid_cell_size): """Converts points into corresponding voxel indices. Maps each point into a voxel grid with cell size given by grid_cell_size. For each voxel, it computes a x, y, z index. Also converts the x, y, z index to a single number index where there is a one-on-one mapping between each x, y, z index value and its corresponding single number index value. Args: points: A tf.float32 tensor of size [N, 3]. g
kho/mr-cdec
python/pkg/cdec/sa/compile.py
Python
apache-2.0
5,575
0.003587
#!/usr/bin/env python import argparse import os import logging import cdec.configobj import cdec.sa from cdec.sa._sa import monitor_cpu import sys MAX_PHRASE_LENGTH = 4 def precompute(f_sa, max_len, max_nt, max_size, min_gap, rank1, rank2, tight_phrases): lcp = cdec.sa.LCP(f_sa) stats = sorted(lcp.compute_stats(MAX_PHRASE_LENGTH), reverse=True) precomp = cdec.sa.Precomputation(from_stats=stats, fsarray=f_sa, precompute_rank=rank1, precompute_secondary_rank=rank2, max_length=max_len, max_nonterminals=max_nt, train_max_initial_size=max_size, train_min_gap_size=min_gap) return precomp def main(): preprocess_start_time = monitor_cpu() sys.setrecursionlimit(sys.getrecursionlimit() * 100) logging.basicConfig(level=logging.INFO) logger = logging.getLogger('cdec.sa.compile') parser = argparse.ArgumentParser(description='Compile a corpus into a suffix array.') parser.add_argument('--maxnt', '-n', type=int, default=2, help='Maximum number of non-terminal symbols') parser.add_argument('--maxlen', '-l', type=int, default=5, help='Maximum number of terminals') parser.add_argument('--maxsize', '-s', type=int, default=15, help='Maximum rule span') parser.add_argument('--mingap', '-g', type=int, default=1, help='Minimum gap size') parser.add_argument('--rank1', '-r1', type=int, default=100, help='Number of pre-computed frequent patterns') parser.add_argument('--rank2', '-r2', type=int, default=10, help='Number of pre-computed super-frequent patterns)') parser.add_argument('--loose', action='store_true', help='Enable loose phrase extraction (default: tight)') parser.add_argument('-c', '--config', default='/dev/stdout', help='Output configuration') parser.add_argument('-f', '--source', help='Source language corpus') parser.add_argument('-e', '--target', help='Target language corpus') parser.add_argument('-b', '--bitext', help='Parallel text (source ||| target)') parser.add_argument('-a', '--alignment', required=True, help='Bitext word alignment') parser.add_argument('-o', '--output', required=True, help='Output path') args = parser.parse_args() if not ((args.source and args.target) or args.bitext): parser.error('a parallel corpus is required\n' '\tuse -f (source) with -e (target) or -b (bitext)') param_names = ('max_len', 'max_nt', 'max_size', 'min_gap', 'rank1', 'rank2', 'tight_phrases') params = (args.maxlen, args.maxnt, args.maxsize, args.mingap, args.rank1, args.rank2, not args.loose) if not os.path.exists(args.output): os.mkdir(args.output) f_sa_bin = os.path.join(args.output, 'f.sa.bin') e_bin = os.path.join(args.output, 'e.bin') precomp_file = 'precomp.{0}.{1}.{2}.{3}.{4}.{5}.bin'.format(*params) precomp_bin = os.path.join(args.output, precomp_file) a_bin = os.path.join(args.output, 'a.bin') lex_bin = os.path.join(args.output, 'lex.bin') start_time = monitor_cpu() logger.info('Compiling source suffix array') if args.bitext: f_sa = cdec.sa.SuffixArray(from_text=args.bitext, side='source') else: f_sa = cdec.sa.SuffixArray(from_text=args.source) f_sa.write_binary(f_sa_bin) stop_time = monitor_cpu() logger.info('Compiling source suffix array took %f seconds', stop_time - start_time) start_time = mo
nitor_cpu() logger.info('Compiling target data array') if args.bitext: e = cdec.sa.DataArray(from_text=args.bitext, side='target') else: e = cdec.sa.DataArray(from_text=args.target) e.write_binary(e_bin) stop_time = monitor_cp
u() logger.info('Compiling target data array took %f seconds', stop_time - start_time) start_time = monitor_cpu() logger.info('Precomputing frequent phrases') precompute(f_sa, *params).write_binary(precomp_bin) stop_time = monitor_cpu() logger.info('Compiling precomputations took %f seconds', stop_time - start_time) start_time = monitor_cpu() logger.info('Compiling alignment') a = cdec.sa.Alignment(from_text=args.alignment) a.write_binary(a_bin) stop_time = monitor_cpu() logger.info('Compiling alignment took %f seonds', stop_time - start_time) start_time = monitor_cpu() logger.info('Compiling bilexical dictionary') lex = cdec.sa.BiLex(from_data=True, alignment=a, earray=e, fsarray=f_sa) lex.write_binary(lex_bin) stop_time = monitor_cpu() logger.info('Compiling bilexical dictionary took %f seconds', stop_time - start_time) # Write configuration config = cdec.configobj.ConfigObj(args.config, unrepr=True) config['f_sa_file'] = os.path.abspath(f_sa_bin) config['e_file'] = os.path.abspath(e_bin) config['a_file'] = os.path.abspath(a_bin) config['lex_file'] = os.path.abspath(lex_bin) config['precompute_file'] = os.path.abspath(precomp_bin) for name, value in zip(param_names, params): config[name] = value config.write() preprocess_stop_time = monitor_cpu() logger.info('Overall preprocessing step took %f seconds', preprocess_stop_time - preprocess_start_time) if __name__ == '__main__': main()
zenodo/invenio
invenio/base/views.py
Python
gpl-2.0
947
0
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2014 CERN. # #
Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTAB
ILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Define base Blueprint.""" from flask import Blueprint blueprint = Blueprint('base', __name__, template_folder='templates', static_folder='static')
veltri/DLV2
tests/parser/bkunstrat3.bk.test.py
Python
apache-2.0
69
0
input = """ x | -x. y |
-y. """ output = """ x | -x. y | -y. "
""
detly/webcavate
webcavate/app.py
Python
gpl-3.0
3,132
0.000958
""" Copyright 2014 Jason Heeris, jason.heeris@gmail.com This file is part of the dungeon excavator web interface ("webcavate"). Webcavate is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Webcavate is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with webcavate. If not, see <http://www.gnu.org/licenses/>. """ import argparse import uuid from flask import Flask, render_template, request, make_response, redirect, url_for, flash
from dungeon.excavate import render_room HELP_TEXT = """\ Web interface to the dungeon excavator.""" app = Flask('d
ungeon.web') app.secret_key = str(uuid.uuid4()) @app.route("/") def root(): """ Web interface landing page. """ return render_template('index.html') @app.route("/error") def error(): """ Display errors. """ return render_template('error.html') def make_map(request, format): tile_size = int(request.form['size']) wall_file = request.files['walls'] floor_file = request.files['floor'] floorplan_file = request.files['floorplan'] try: room_data, content_type = render_room( floor_file.read(), wall_file.read(), floorplan_file.read(), tile_size, format ) except ValueError as ve: flash(str(ve)) return redirect(url_for('error')) # Create response response = make_response(room_data) response.headers['Content-Type'] = content_type return response @app.route("/map.svg", methods=['POST']) def map_svg(): return make_map(request, format='svg') @app.route("/map.png", methods=['POST']) def map_png(): return make_map(request, format='png') @app.route("/map.jpg", methods=['POST']) def map_jpg(): return make_map(request, format='jpg') @app.route("/map", methods=['POST']) def process(): """ Process submitted form data. """ format = request.form['format'] try: node = { 'png': 'map_png', 'svg': 'map_svg', 'jpg': 'map_jpg', }[format] except KeyError: flash("The output format you selected is not supported.") return redirect(url_for('error')) else: return redirect(url_for(node, _method='POST'), code=307) def main(): """ Parse arguments and get things going for the web interface """ parser = argparse.ArgumentParser(description=HELP_TEXT) parser.add_argument( '-p', '--port', help="Port to serve the interface on.", type=int, default=5050 ) parser.add_argument( '-a', '--host', help="Host to server the interface on.", ) args = parser.parse_args() app.run(port=args.port, host=args.host, debug=False)
galaxy-iuc/parsec
parsec/commands/libraries/create_library.py
Python
apache-2.0
859
0.001164
import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('create_library') @click.argument("name", type=str) @click.option( "--description", help="Optional dat
a library description", type=str ) @click.option( "--synopsis", help="Optional data library synopsis", type=str ) @pass_context @custom_exception @json_output def cli(ctx, name, description="", synopsis=""): """Create a data library with the properties defined in the arguments. Output: Details of the created library. For example:: {'id': 'f740ab636b360a70', 'name': 'Library fr
om bioblend', 'url': '/api/libraries/f740ab636b360a70'} """ return ctx.gi.libraries.create_library(name, description=description, synopsis=synopsis)
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/tools/parameters/dataset_matcher.py
Python
gpl-3.0
6,314
0.01758
import galaxy.model from logging import getLogger log = getLogger( __name__ ) ROLES_UNSET = object() INVALID_STATES = [ galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ] class DatasetMatcher( object ): """ Utility class to aid DataToolParameter and similar classes in reasoning about what HDAs could match or are selected for a parameter and value. Goal here is to both encapsulate and reuse logic related to filtering, datatype matching, hiding errored dataset, finding implicit conversions, and permission handling. """ def __init__( self, trans, param, value, other_values ): self.trans = trans self.param = param self.tool = param.tool self.value = value self.current_user_roles = ROLES_UNSET filter_value = None if param.options: try: filter_value = param.options.get_options( trans, other_values )[0][0] except IndexError: pass # no valid options self.filter_value = filter_value def hda_accessible( self, hda, check_security=True ): """ Does HDA correspond to dataset that is an a valid state and is accessible to user. """ dataset = hda.dataset state_valid =
not dataset.state in INVALID_STATES return state_valid and ( not check_security or self.__can_access_dataset( dataset ) ) def valid_hda_match( self, hda, check_implicit_conversions=True, check_security=False ): """ Return False of this parameter can not be matched to
a the supplied HDA, otherwise return a description of the match (either a HdaDirectMatch describing a direct match or a HdaImplicitMatch describing an implicit conversion.) """ if self.filter( hda ): return False formats = self.param.formats if hda.datatype.matches_any( formats ): return HdaDirectMatch( hda ) if not check_implicit_conversions: return False target_ext, converted_dataset = hda.find_conversion_destination( formats ) if target_ext: if converted_dataset: hda = converted_dataset if check_security and not self.__can_access_dataset( hda.dataset ): return False return HdaImplicitMatch( hda, target_ext ) return False def hda_match( self, hda, check_implicit_conversions=True, ensure_visible=True ): """ If HDA is accessible, return information about whether it could match this parameter and if so how. See valid_hda_match for more information. """ accessible = self.hda_accessible( hda ) if accessible and ( not ensure_visible or hda.visible or ( self.selected( hda ) and not hda.implicitly_converted_parent_datasets ) ): # If we are sending data to an external application, then we need to make sure there are no roles # associated with the dataset that restrict its access from "public". require_public = self.tool and self.tool.tool_type == 'data_destination' if require_public and not self.trans.app.security_agent.dataset_is_public( hda.dataset ): return False if self.filter( hda ): return False return self.valid_hda_match( hda, check_implicit_conversions=check_implicit_conversions ) def selected( self, hda ): """ Given value for DataToolParameter, is this HDA "selected". """ value = self.value if value and str( value[ 0 ] ).isdigit(): return hda.id in map(int, value) else: return value and hda in value def filter( self, hda ): """ Filter out this value based on other values for job (if applicable). """ param = self.param return param.options and param._options_filter_attribute( hda ) != self.filter_value def __can_access_dataset( self, dataset ): # Lazily cache current_user_roles. if self.current_user_roles is ROLES_UNSET: self.current_user_roles = self.trans.get_current_user_roles() return self.trans.app.security_agent.can_access_dataset( self.current_user_roles, dataset ) class HdaDirectMatch( object ): """ Supplied HDA was a valid option directly (did not need to find implicit conversion). """ def __init__( self, hda ): self.hda = hda @property def implicit_conversion( self ): return False class HdaImplicitMatch( object ): """ Supplied HDA was a valid option directly (did not need to find implicit conversion). """ def __init__( self, hda, target_ext ): self.hda = hda self.target_ext = target_ext @property def implicit_conversion( self ): return True class DatasetCollectionMatcher( object ): def __init__( self, dataset_matcher ): self.dataset_matcher = dataset_matcher def __valid_element( self, element ): # Simplify things for now and assume these are hdas and not implicit # converts. One could imagine handling both of those cases down the # road. if element.ldda: return False child_collection = element.child_collection if child_collection: return self.dataset_collection_match( child_collection ) hda = element.hda if not hda: return False hda_match = self.dataset_matcher.hda_match( hda, ensure_visible=False ) return hda_match and not hda_match.implicit_conversion def hdca_match( self, history_dataset_collection_association, reduction=False ): dataset_collection = history_dataset_collection_association.collection if reduction and dataset_collection.collection_type.find( ":" ) > 0: return False else: return self.dataset_collection_match( dataset_collection ) def dataset_collection_match( self, dataset_collection ): valid = True for element in dataset_collection.elements: if not self.__valid_element( element ): valid = False break return valid __all__ = [ DatasetMatcher, DatasetCollectionMatcher ]
lechat/jenkinsflow
test/prefix_test.py
Python
bsd-3-clause
1,965
0.005598
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT # All rights reserved. This work is under a BSD license, see LICENSE.TXT. from jenkinsflow.flow import serial from .framework import api_select prefixed_jobs = """ serial flow: [ job: 'top_quick1' serial flow: [ job: 'top_x_quick2-1' ] serial flow: [ job: 'top_x_quick2-2' ] serial flow: [ job: 'top_x_quick2-3' ] job: 'top_quick3' parallel flow: ( serial flow: [ job: 'top_y_z_quick4a' ] serial flow: [ job: 'quick4b' ] job: 'top_y_quick5' ) ] """ def test_prefix(api_type, capsys): with api_select.api(__file__, api_type) as api: def job(name): api.job(name, exec_time=0.5, max_fails=0, expect_invocations=0, expect_order=None, params=None) api.flow_job() job('quick1') index = 0 for index in 1, 2, 3: job('x_quick2-' + str(index)) job('quick3') job('y_z_quick4') job('y_quick5') with serial(api, timeout=70, report_interval=3, job_name_prefix='top_', just_dump=True) as ctrl1: ctrl1.invoke('quick1') for index in 1, 2, 3: with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2: ctrl2.invoke('quick2-' + str(index)) ctrl1.in
voke('quick3') with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2: with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3a: ctrl3a.invoke('quick4a') # Reset prefix with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix=None) as ctrl3b: ctrl3b.invoke('quick4b') ctrl2.invoke('quick5') sout, _
= capsys.readouterr() assert prefixed_jobs.strip() in sout
jolyonb/edx-platform
common/test/acceptance/fixtures/course.py
Python
agpl-3.0
15,531
0.002447
""" Fixture to create a course and course components (XBlocks). """ import datetime import json import mimetypes from collections import namedtuple from textwrap import dedent from opaque_keys.edx.keys import CourseKey from path import Path from common.test.acceptance.fixtures import STUDIO_BASE_URL from common.test.acceptance.fixtures.base import FixtureError, XBlockContainerFixture class XBlockFixtureDesc(object): """ Description of an XBlock, used to configure a course fixture. """ def __init__(se
lf, category, display_name, data=None, metadata=None, grader_type=None, publish='make_public', **kwargs): """ Configure the XBlock to be created by the fixture. These arguments have the same meaning as in the Studio REST API: * `category` * `display_name` * `data` * `metadata` * `grader_type` * `publish` """ self.category =
category self.display_name = display_name self.data = data self.metadata = metadata self.grader_type = grader_type self.publish = publish self.children = [] self.locator = None self.fields = kwargs def add_children(self, *args): """ Add child XBlocks to this XBlock. Each item in `args` is an `XBlockFixtureDesc` object. Returns the `xblock_desc` instance to allow chaining. """ self.children.extend(args) return self def serialize(self): """ Return a JSON representation of the XBlock, suitable for sending as POST data to /xblock XBlocks are always set to public visibility. """ returned_data = { 'display_name': self.display_name, 'data': self.data, 'metadata': self.metadata, 'graderType': self.grader_type, 'publish': self.publish, 'fields': self.fields, } return json.dumps(returned_data) def __str__(self): """ Return a string representation of the description. Useful for error messages. """ return dedent(u""" <XBlockFixtureDescriptor: category={0}, data={1}, metadata={2}, grader_type={3}, publish={4}, children={5}, locator={6}, > """).strip().format( self.category, self.data, self.metadata, self.grader_type, self.publish, self.children, self.locator ) # Description of course updates to add to the course # `date` is a str (e.g. "January 29, 2014) # `content` is also a str (e.g. "Test course") CourseUpdateDesc = namedtuple("CourseUpdateDesc", ['date', 'content']) class CourseFixture(XBlockContainerFixture): """ Fixture for ensuring that a course exists. WARNING: This fixture is NOT idempotent. To avoid conflicts between tests, you should use unique course identifiers for each fixture. """ def __init__(self, org, number, run, display_name, start_date=None, end_date=None, settings=None): """ Configure the course fixture to create a course with `org`, `number`, `run`, and `display_name` (all unicode). `start_date` and `end_date` are datetime objects indicating the course start and end date. The default is for the course to have started in the distant past, which is generally what we want for testing so students can enroll. `settings` can be any additional course settings needs to be enabled. for example to enable entrance exam settings would be a dict like this {"entrance_exam_enabled": "true"} These have the same meaning as in the Studio restful API /course end-point. """ super(CourseFixture, self).__init__() self._course_dict = { 'org': org, 'number': number, 'run': run, 'display_name': display_name } # Set a default start date to the past, but use Studio's # default for the end date (meaning we don't set it here) if start_date is None: start_date = datetime.datetime(1970, 1, 1) self._course_details = { 'start_date': start_date.isoformat(), } if end_date is not None: self._course_details['end_date'] = end_date.isoformat() if settings is not None: self._course_details.update(settings) self._updates = [] self._handouts = [] self._assets = [] self._textbooks = [] self._advanced_settings = {} self._course_key = None def __str__(self): """ String representation of the course fixture, useful for debugging. """ return u"<CourseFixture: org='{org}', number='{number}', run='{run}'>".format(**self._course_dict) def add_course_details(self, course_details): """ Add course details to dict of course details to be updated when configure_course or install is called. Arguments: Dictionary containing key value pairs for course updates, e.g. {'start_date': datetime.now() } """ if 'start_date' in course_details: course_details['start_date'] = course_details['start_date'].isoformat() if 'end_date' in course_details: course_details['end_date'] = course_details['end_date'].isoformat() self._course_details.update(course_details) def add_update(self, update): """ Add an update to the course. `update` should be a `CourseUpdateDesc`. """ self._updates.append(update) def add_handout(self, asset_name): """ Add the handout named `asset_name` to the course info page. Note that this does not actually *create* the static asset; it only links to it. """ self._handouts.append(asset_name) def add_asset(self, asset_name): """ Add the asset to the list of assets to be uploaded when the install method is called. """ self._assets.extend(asset_name) def add_textbook(self, book_title, chapters): """ Add textbook to the list of textbooks to be added when the install method is called. """ self._textbooks.append({"chapters": chapters, "tab_title": book_title}) def add_advanced_settings(self, settings): """ Adds advanced settings to be set on the course when the install method is called. """ self._advanced_settings.update(settings) def install(self): """ Create the course and XBlocks within the course. This is NOT an idempotent method; if the course already exists, this will raise a `FixtureError`. You should use unique course identifiers to avoid conflicts between tests. """ self._create_course() self._install_course_updates() self._install_course_handouts() self._install_course_textbooks() self._configure_course() self._upload_assets() self._add_advanced_settings() self._create_xblock_children(self._course_location, self.children) return self def configure_course(self): """ Configure Course Settings, take new course settings from self._course_details dict object """ self._configure_course() @property def studio_course_outline_as_json(self): """ Retrieves Studio course outline in JSON format. """ url = STUDIO_BASE_URL + '/course/' + self._course_key + "?format=json" response = self.session.get(url, headers=self.headers) if not response.ok: raise FixtureError( u"Could not retrieve course outline json. Status was {0}".format( response.status_code)) try: course_outline_json = response.json() except ValueError: raise FixtureError( u"Could not
prudnikov/python-oauth2
oauth2/_version.py
Python
mit
438
0.004566
# This is the vers
ion of this source code. manual_verstr = "1.5" auto_build_num = "212" verstr = manual_verstr + "." + auto_build_num try: from pyutil.version_class import Version as pyutil_Version __version__ = pyutil_Version(verstr) except (ImportError, ValueError): # Maybe there is no pyutil installed. from distutils.version import LooseVersion as distutils_Version __version_
_ = distutils_Version(verstr)
joadavis/rpi-coding
minecraft/oliverboom.py
Python
mit
907
0.016538
import mcpi.minecraft as minecraft import mcpi.block as block import random import time mc = minecraft.Minecraft.create() #mc.postToChat("Heat Vision!") pos = mc.player.getTilePos() #mc.postToChat(pos) #rot = mc.player.getRotation() #pitch = mc.player.getPitch() #direct = mc.player.getDirection() #mc.postToChat(rot) #mc.postToChat(pitch) #mc.postToChat(direct) # those dont work on Pi # activate any tnt around mc.postToChat("Oliver's boom!") while True: x,y,z = mc.player.getPos() for xi in range(-4, 4):
for zi in range (-4, 4): for yi in range (-1, 3):
thisblock = mc.getBlock(x + xi, y + yi, z + zi) #print thisblock if thisblock == 46: mc.setBlock(x + xi, y + yi, z+zi, 46, 1) print "setting on" #mc.setBlock(x + xi, y + 1, z+zi, 46, 1) time.sleep(1)
jroeland/teapot
project/web/app/products/urls.py
Python
mit
489
0.010225
''' Created on Oct 19, 2016 @author: jaime ''' from django.conf.urls import url from django.views.decorators.csrf import csrf_exempt from products import views url
patterns = [ url(r'^categories/$', csrf_exempt(views.ProductCategoryView.as_view())), url(r'^categories/(?P<uid>\w+)/$
', csrf_exempt(views.ProductCategoryView.as_view())), url(r'^$', csrf_exempt(views.ProductView.as_view())), url(r'^(?P<uid>\w+)/$', csrf_exempt(views.ProductView.as_view())), ]
yac/rdoupdate
rdoupdate/shell.py
Python
apache-2.0
5,901
0
# -*- encoding: utf-8 -*- import argparse import os import sys import yaml from . import VERSION import actions import core import exception from utils import log def error(errtype, msg, code=42): sys.stderr.write("{t.red}[ERROR] {t.yellow}{er}: {msg}" "{t.normal}\n".format(er=errtype, msg=msg, t=log.term)) sys.exit(code) def get_parser(): parser = argparse.ArgumentParser(prog='rdoupdate') subparsers = parser.add_subparsers(help='available actions') parser.add_argument('--version', action='version', version=VERSION) # check check_parser = subparsers.add_parser( 'check', help="validate update file(s)", description="validate one or more update files; use -g to select " "an update file added by last commit to a git repo or " "use -f to select update files directly (default: -g .)") check_parser.add_argument( '-g', '--git', type=str, metavar='DIR', help="check latest update file added to git repo in DIR directory") check_parser.add_argument( '-f', '--files', type=str, metavar='FILE', nargs='+', help="check all specified FILEs; use - for stdin") check_parser.add_argument( '-a', '--available', action='store_true', help="also check if builds are available for download") check_parser.set_defaults(action=do_check) # download dl_parser = subparsers.add_parser( 'download', help="download builds from update file(s)", description=("download builds from one or more update files into a " "directory tree; use -g to select an update file added " "by last commit to a git repo or use -f to select update " "files directly; default: -g .")) dl_parser.add_argument( '-g', '--git', type=str, metavar='DIR', help="download builds from latest update file added to git repo in " "DIR directory") dl_parser.add_argument( '-f', '--files', type=str, metavar='FILE', nargs='+', help="check all specified FILEs; use - for stdin") dl_parser.add_argument( '-o', '--outdir', type=str, metavar='DIR', help="directory to download builds into (default: .)") dl_parser.add_argument( '-u', '--per-update', action='store_true', help="create extra directory for each update") dl_parser.add_argument( '-b', '--build-filter', metavar='ATTR:REGEX', action='append', help="Only download builds with ATTRibute matching python REGEX; can " "be specified multiple times") dl_parser.set_defaults(action=do_download) # move move_parser = subparsers.add_parser( 'move', help="move an update file (create a commit)", description="create a commit that moves selected files to a directory") move_parser.add_argument( 'files', metavar='FILE', type=str, nargs='+', help='updat
e file(s) to move') move_parser.add_argument( '-d', '--dir', type=str, metavar='DIR', help="move update file(s) to this directory instead of using " "update.group") move_parse
r.set_defaults(action=do_move) list_parser = subparsers.add_parser( 'list-bsources', help="show available build sources", description="show available build sources") list_parser.set_defaults(action=do_list_bsources) return parser def _get_update_files(args): if args.files and args.git: error("invalid invocation", "-g and -f are exclusive.", 19) if args.files: files = args.files else: if not args.git: args.git = '.' f = actions.get_last_commit_update(args.git) files = [os.path.join(args.git, f)] return files def do_check(args): files = _get_update_files(args) good, fails = actions.check_files(*files, available=args.available, verbose=True) actions.print_summary(good, fails, 'PASSED', 'FAILED') if fails: return 127 def _parse_build_filter(fargs): bf = [] if not fargs: return bf for f in fargs: try: attr, rex = f.split(':', 1) except Exception as ex: raise exception.InvalidFilter(what=f) bf.append((attr, rex)) return bf def do_download(args): files = _get_update_files(args) build_filter = _parse_build_filter(args.build_filter) good, fails = actions.download_updates_builds( *files, out_dir=args.outdir, per_update=args.per_update, build_filter=build_filter) actions.print_summary(good, fails, 'DOWNLOADED', 'FAILED to download') if fails: return 128 def do_move(args): actions.move_files(args.files, args.dir) def do_list_bsources(args): actions.list_build_sources() def run(*cargs): parser = get_parser() args = parser.parse_args(cargs) action = args.action return action(args) def main(): cargs = sys.argv[1:] try: return run(*cargs) except IOError as e: error("file error", "%s: %s" % (e.strerror, e.filename), 2) except exception.ChdirError as e: error("file error", e, 3) except exception.CommandFailed as e: error("command failed", e.kwargs['cmd'], 5) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e: error("invalid YAML", e, 7) except exception.InvalidUpdateStructure as e: error("invalid structure", e, 11) except exception.InvalidUpdateCommit as e: error("invalid commit", e, 13) except exception.ParsingError as e: error("parsing error", e, 17) except Exception as e: err = type(e).__name__ ex = str(e) if ex: err += ": %s" % ex error("unexpected error", err, 42) if __name__ == '__main__': main()
google/material-design-icons
update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Python
apache-2.0
33,326
0.003691
from fontTools.misc.py23 import byteord from fontTools.misc import sstruct from fontTools.misc.fixedTools import floatToFixedToStr from fontTools.misc.textTools import safeEval # from itertools import * from . import DefaultTable from . import grUtils from array import array from functools import reduce import struct, re, sys Silf_hdr_format = ''' > version: 16.16F ''' Silf_hdr_format_3 = ''' > version: 16.16F compilerVersion: L numSilf: H x x ''' Silf_part1_format_v3 = ''' > ruleVersion: 16.16F passOffset: H pseudosOffset: H ''' Silf_part1_format = ''' > maxGlyphID: H extraAscent: h extraDescent: h numPasses: B iSubst: B iPos: B iJust: B iBidi: B flags: B maxPreContext: B maxPostContext: B attrPseudo: B attrBreakWeight: B attrDirectionality: B attrMirroring: B attrSkipPasses: B numJLevels: B ''' Silf_justify_format = ''' > attrStretch: B attrShrink: B attrStep: B attrWeight: B runto: B x x x ''' Silf_part2_format = ''' > numLigComp: H numUserDefn: B maxCompPerLig: B direction: B attCollisions: B x x x numCritFeatures: B ''' Silf_pseudomap_format = ''' > unicode: L nPseudo: H ''' Silf_pseudomap_format_h = ''' > unicode: H nPseudo: H ''' Silf_classmap_format = ''' > numClass: H numLinear: H ''' Silf_lookupclass_format = ''' > numIDs: H searchRange: H entrySelector: H rangeShift: H ''' Silf_lookuppair_format = ''' > glyphId: H index: H ''' Silf_pass_format = ''' > flags: B maxRuleLoop: B maxRuleContext: B maxBackup: B numRules: H fsmOffset: H pcCode: L rcCode: L aCode: L oDebug: L numRows: H numTransitional: H numSuccess: H numColumns: H ''' aCode_info = ( ("NOP", 0), ("PUSH_BYTE", "b"), ("PUSH_BYTE_U", "B"), ("PUSH_SHORT", ">h"), ("PUSH_SHORT_U", ">H"), ("PUSH_LONG", ">L"), ("ADD", 0), ("SUB", 0), ("MUL", 0), ("DIV", 0), ("MIN", 0), ("MAX", 0), ("NEG", 0), ("TRUNC8", 0), ("TRUNC16", 0), ("COND", 0), ("AND", 0), # x10 ("OR", 0), ("NOT", 0), ("EQUAL", 0), ("NOT_EQ", 0), ("LESS", 0), ("GTR", 0), ("LESS_EQ", 0), ("GTR_EQ", 0), ("NEXT", 0), ("NEXT_N", "b"), ("COPY_NEXT", 0), ("PUT_GLYPH_8BIT_OBS", "B"), ("PUT_SUBS_8BIT_OBS", "bBB"), ("PUT_COPY", "b"), ("INSERT", 0), ("DELETE", 0), # x20 ("ASSOC", -1), ("CNTXT_ITEM", "bB"), ("ATTR_SET", "B"), ("ATTR_ADD", "B"), ("ATTR_SUB", "B"), ("ATTR_SET_SLOT", "B"), ("IATTR_SET_SLOT", "BB"), ("PUSH_SLOT_ATTR", "Bb"), ("PUSH_GLYPH_ATTR_OBS", "Bb"), ("PUSH_GLYPH_METRIC", "Bbb"), ("PUSH_FEAT", "Bb"), ("PUSH_ATT_TO_GATTR_OBS", "Bb"), ("PUSH_ATT_TO_GLYPH_
METRIC", "Bbb"), ("PUSH_I
SLOT_ATTR", "Bbb"), ("PUSH_IGLYPH_ATTR", "Bbb"), ("POP_RET", 0), # x30 ("RET_ZERO", 0), ("RET_TRUE", 0), ("IATTR_SET", "BB"), ("IATTR_ADD", "BB"), ("IATTR_SUB", "BB"), ("PUSH_PROC_STATE", "B"), ("PUSH_VERSION", 0), ("PUT_SUBS", ">bHH"), ("PUT_SUBS2", 0), ("PUT_SUBS3", 0), ("PUT_GLYPH", ">H"), ("PUSH_GLYPH_ATTR", ">Hb"), ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), ("BITOR", 0), ("BITAND", 0), ("BITNOT", 0), # x40 ("BITSET", ">HH"), ("SET_FEAT", "Bb") ) aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)]) def disassemble(aCode): codelen = len(aCode) pc = 0 res = [] while pc < codelen: opcode = byteord(aCode[pc:pc+1]) if opcode > len(aCode_info): instr = aCode_info[0] else: instr = aCode_info[opcode] pc += 1 if instr[1] != 0 and pc >= codelen : return res if instr[1] == -1: count = byteord(aCode[pc]) fmt = "%dB" % count pc += 1 elif instr[1] == 0: fmt = "" else : fmt = instr[1] if fmt == "": res.append(instr[0]) continue parms = struct.unpack_from(fmt, aCode[pc:]) res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") pc += struct.calcsize(fmt) return res instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?") def assemble(instrs): res = b"" for inst in instrs: m = instre.match(inst) if not m or not m.group(1) in aCode_map: continue opcode, parmfmt = aCode_map[m.group(1)] res += struct.pack("B", opcode) if m.group(2): if parmfmt == 0: continue parms = [int(x) for x in re.split(r",\s*", m.group(2))] if parmfmt == -1: l = len(parms) res += struct.pack(("%dB" % (l+1)), l, *parms) else: res += struct.pack(parmfmt, *parms) return res def writecode(tag, writer, instrs): writer.begintag(tag) writer.newline() for l in disassemble(instrs): writer.write(l) writer.newline() writer.endtag(tag) writer.newline() def readcode(content): res = [] for e in content_string(content).split('\n'): e = e.strip() if not len(e): continue res.append(e) return assemble(res) attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID', 'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID') attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi') attrs_contexts = ('maxPreContext', 'maxPostContext') attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality', 'attrMirroring', 'attrSkipPasses', 'attCollisions') pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup', 'minRulePreContext', 'maxRulePreContext', 'collisionThreshold') pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns') def writesimple(tag, self, writer, *attrkeys): attrs = dict([(k, getattr(self, k)) for k in attrkeys]) writer.simpletag(tag, **attrs) writer.newline() def getSimple(self, attrs, *attr_list): for k in attr_list: if k in attrs: setattr(self, k, int(safeEval(attrs[k]))) def content_string(contents): res = "" for element in contents: if isinstance(element, tuple): continue res += element return res.strip() def wrapline(writer, dat, length=80): currline = "" for d in dat: if len(currline) > length: writer.write(currline[:-1]) writer.newline() currline = "" currline += d + " " if len(currline): writer.write(currline[:-1]) writer.newline() class _Object() : pass class table_S__i_l_f(DefaultTable.DefaultTable): '''Silf table support''' def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.silfs = [] def decompile(self, data, ttFont): sstruct.unpack2(Silf_hdr_format, data, self) self.version = float(floatToFixedToStr(self.version, precisionBits=16)) if self.version >= 5.0: (data, self.scheme) = grUtils.decompress(data) sstruct.unpack2(Silf_hdr_format_3, data, self) base = sstruct.calcsize(Silf_hdr_format_3) elif self.version < 3.0:
libvirt/libvirt-test-API
libvirttestapi/repos/checkpoint/checkpoint_get_xml.py
Python
gpl-2.0
2,134
0.000937
# Copyright (C) 2010-2012 Red Hat, Inc. # This work is licensed under the GNU GPLv2 or later. import libvirt import re from libvirt import libvirtError from libvirttestapi.utils import utils required_params = {'guestname', 'checkpoint_name'} optional_params = {'flags': None} def checkpoint_get_xml(params): logger = params['logger'] guestname = params['guestname'] checkpoint_name = params.get('checkpoint_name', None) flag = utils.parse_flags(params) if not utils.version_compare('libvirt-python', 5, 6, 0, logger): logger.info("Current libvirt-python don't support getXMLDesc().") return 0 logger.info("Checkpoint name: %s" % checkpoint_name) logger.info("flag: %s" % flag) if flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SIZE: logger.info("Bug 1207659: Don't support this flag.") return 0 try: conn = libvirt.open() dom = conn.lookupByName(guestname) cp = dom.checkpointLookupByName(checkpoint_name) cp_xml = cp.getXMLDesc(flag) except libvirtError as err: logger.error("API error message: %s" % err.get_error_message()) return 1 checkpoint_xml_path = "/var/lib/libvirt/qemu/checkpoint/%s/%s.xml" % (guestname, checkpoint_name) cp_f
d = open(checkpoint_xml_path, 'r') checkpoint_xml = cp_fd.read() checkpoint_xml = re.sub(r'<!--\n.*\n-->\n\n', '', checkpoint_xml, flags=re.S) if flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_NO_DOMAIN: cp_xml = cp_xml.replace('</domaincheckpoint>\n', '') if cp_xml in checkpoint_xml: logger.info("PASS: check checkpoint xml succ
essful.") else: logger.error("FAIL: check checkpoint xml failed.") return 1 elif flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SIZE: logger.info("Don't support this flag.") elif flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SECURE or flag == 0: if cp_xml == checkpoint_xml: logger.info("PASS: check checkpoint xml successful.") else: logger.error("FAIL: check checkpoint xml failed.") return 1 return 0
zamonia500/PythonTeacherMythenmetz
과외숙제/factorial.py
Python
gpl-3.0
86
0
def fac
torial(c
ount): return count * factorial(count - 1) answer = factorial(6)
iotile/coretools
iotilesensorgraph/test/test_devicemodel.py
Python
gpl-3.0
693
0
import pytest from iotile.core.exceptions import ArgumentError from iotile.sg.model import DeviceModel def test_default_values(): """Make sure we can get properties with default values.""" model = DeviceModel() assert model.get('max_nodes') == 32 assert model.get(u'max_nodes') == 32
model.set('max_nodes', 16) assert model.get('max_nodes') == 16 assert model.get(u'max_nodes') == 16 model.set(u'max_nodes', 17) assert model.get('max_nodes') == 17 assert model.get(u'max_nodes') == 17 with pytest.raises(ArgumentError): model.get('unknown_parameter') with
pytest.raises(ArgumentError): model.set('unknown_parameter', 15)
zerog2k/stc_diyclock
post_extra_script.py
Python
mit
392
0.015306
''' custom script for platformio ''' from os.path import join from SCons.Script import DefaultEnvironment env = DefaultEnvironment() #print "post_extra_script running..." #print env.Dump() # compiler and linker flags dont work very well in build_flags of platformio.ini
- need to set them here env.Append(
LINKFLAGS = [ "--data-loc", 0x30 ], STCGALCMD="/stcgal.py" )
JensTimmerman/radical.pilot
docs/architecture/api_draft/unit_manager.py
Python
mit
3,311
0.017215
from a
ttributes import * from constants import * # -------------------------------------------------------------------------
----- # class UnitManager (Attributes) : """ UnitManager class -- manages a pool """ # -------------------------------------------------------------------------- # def __init__ (self, url=None, scheduler='default', session=None) : Attributes.__init__ (self) # -------------------------------------------------------------------------- # def add_pilot (self, pid) : """ add (Compute or Data)-Pilot(s) to the pool """ raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def list_pilots (self, ptype=ANY) : """ List IDs of data and/or compute pilots """ raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def remove_pilot (self, pid, drain=False) : """ Remove pilot(s) (does not cancel the pilot(s), but removes all units from the pilot(s). `drain` determines what happens to the units which are managed by the removed pilot(s). If `True`, the pilot removal is delayed until all units reach a final state. If `False` (the default), then `RUNNING` units will be canceled, and `PENDING` units will be re-assinged to the unit managers for re-scheduling to other pilots. """ raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def submit_unit (self, description) : """ Instantiate and return (Compute or Data)-Unit object(s) """ raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def list_units (self, utype=ANY) : """ List IDs of data and/or compute units """ raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def get_unit (self, uids) : """ Reconnect to and return (Compute or Data)-Unit object(s) """ raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) : """ Wait for given unit(s) to enter given state """ raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__) # -------------------------------------------------------------------------- # def cancel_units (self, uids) : """ Cancel given unit(s) """ raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__) # ------------------------------------------------------------------------------ #
mixmastamyk/flask-skeleton
src/timezones.py
Python
unlicense
371
0.002703
import pytz priorities = ('US/Pacific', 'US/Mountain', 'US/Central', 'US/Eastern', 'Brazil/East', 'UTC') all_tz = pytz.all_timezones_set.copy() for priori
ty in priorities: all_tz.remove(priority) all_tz = sorted(list(all_tz)) all_tz[:0] = priorities # prepends list to list # tuples for selection widget all_tz = tuple((tz, tz) for
tz in all_tz)
eduNEXT/edx-platform
openedx/core/djangoapps/oauth_dispatch/tests/factories.py
Python
agpl-3.0
1,383
0
# pylint: disable=missing-docstring from datetime import datetime, timedelta import factory import pytz from factory.django import DjangoModelFactory from factory.fuzzy import FuzzyText from oauth2_provider.models import AccessToken, Application, RefreshToken from openedx.core.djangoapps.oauth_dispatch.models impor
t ApplicationAccess from common.djangoapps.student.tests.factories import UserFactory class ApplicationFactory(DjangoModelFactory): class Meta: model = Application user = factory.SubFactory(UserFactory) client_id = factory.Sequence('client_{}'.format) client_secret = 'some_secret' c
lient_type = 'confidential' authorization_grant_type = Application.CLIENT_CONFIDENTIAL name = FuzzyText(prefix='name', length=8) class ApplicationAccessFactory(DjangoModelFactory): class Meta: model = ApplicationAccess application = factory.SubFactory(ApplicationFactory) scopes = ['grades:read'] class AccessTokenFactory(DjangoModelFactory): class Meta: model = AccessToken django_get_or_create = ('user', 'application') token = FuzzyText(length=32) expires = datetime.now(pytz.UTC) + timedelta(days=1) class RefreshTokenFactory(DjangoModelFactory): class Meta: model = RefreshToken django_get_or_create = ('user', 'application') token = FuzzyText(length=32)
yelizariev/addons-yelizariev
delivery_sequence/__manifest__.py
Python
lgpl-3.0
295
0
{ "name": "Delivery Sequence", "vesion": "12.0.1.0.0", "author": "IT-Proje
cts LLC, Ivan Yelizarie
v", "license": "LGPL-3", "category": "Custom", "website": "https://yelizariev.github.io", "depends": ["delivery"], "data": ["views.xml"], "installable": False, }
theoj2/Nibbletex
nibblegen/nibblegen.py
Python
gpl-3.0
19,283
0.02733
''' Nibblegen: A script to convert LaTex text to html usable in Nibbleblog Forked from the latex2wp project (the licenceing for which is below). Copyright (C) 2014 Theodore Jones This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' """ Copyright 2009 Luca Trevisan Additional contributors: Radu Grigore LaTeX2WP version 0.6.2 This file is part of LaTeX2WP, a program that converts a LaTeX document into a format that is ready to be copied and pasted into WordPress. You are free to redistribute and/or modify LaTeX2WP under the terms of the GNU General Public License (GPL), version 3 or (at your option) any later version. I hope you will find LaTeX2WP useful, but be advised that it comes WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GPL for more details. You should have received a copy of the GNU General Public License along with LaTeX2WP. If you can't find it, see <http://www.gnu.org/licenses/>. """ import re from sys import argv from latex2wpstyle import * # prepare variables computed from the info in latex2wpstyle count = dict() for thm in ThmEnvs: count[T[thm]] = 0 count["section"] = count["subsection"] = count["equation"] = 0 ref={} endlatex = "&fg="+textcolor if HTML : endproof = "" inthm = "" """ At the beginning, the commands \$, \% and \& are temporarily replaced by placeholders (the second entry in each 4-tuple). At the end, The placeholders in text mode are replaced by the third entry, and the placeholders in math mode are replaced by the fourth entry. """ esc = [["\\$","_dollar_","&#36;","\\$"], ["\\%","_percent_","&#37;","\\%"], ["\\&","_amp_","&amp;","\\&"], [">","_greater_",">","&gt;"], ["<","_lesser_","<","&lt;"]] M = M + [ ["\\more","<!--more-->"], ["\\newblock","\\\\"], ["\\sloppy",""], ["\\S","&sect;"]] Mnomath =[["\\\\","<br/>\n"], ["\\ "," "], ["\\`a","&agrave;"], ["\\'a","&aacute;"], ["\\\"a","&auml;"], ["\\aa ","&aring;"], ["{\\aa}","&aring;"], ["\\`e","&egrave;"], ["\\'e","&eacute;"], ["\\\"e","&euml;"], ["\\`i","&igrave;"], ["\\'i","&iacute;"], ["\\\"i","&iuml;"], ["\\`o","&ograve;"], ["\\'o","&oacute;"], ["\\\"o","&ouml;"], ["\\`o","&ograve;"], ["\\'o","&oacute;"], ["\\\"o","&ouml;"], ["\\H o","&ouml;"], ["\\`u","&ugrave;"], ["\\'u","&uacute;"], ["\\\"u","&uuml;"], ["\\`u","&ugrave;"], ["\\'u","&uacute;"], ["\\\"u","&uuml;"], ["\\v{C}","&#268;"]] cb = re.compile("\\{|}") def extractbody(m) : begin = re.compile("\\\\begin\s*") m= begin.sub("\\\\begin",m) end = re.compile("\\\\end\s*") m = end.sub("\\\\end",m) beginenddoc = re.compile("\\\\begin\\{document}" "|\\\\end\\{document}") parse = beginenddoc.split(m) if len(parse)== 1 : m = parse[0] else : m = parse[1] """ removes comments, replaces double returns with <p> and other returns and multiple spaces by a single space. """ for e in esc : m = m.replace(e[0],e[1]) comments = re.compile("%.*?\n") m=comments.sub(" ",m) multiplereturns = re.compile("\n\n+") m= multiplereturns.sub ("<p>",m) spaces=re.compile("(\n|[ ])+") m=spaces.sub(" ",m) """ removes text between \iffalse ... \fi and between \iftex ... \fi keeps text between \ifblog ... \fi """ ifcommands = re.compile("\\\\iffalse|\\\\ifblog|\\\\iftex|\\\\fi") L=ifcommands.split(m) I=ifcommands.findall(m) m= L[0] for i in range(1,(len(L)+1)/2) : if (I[2*i-2]=="\\ifblog") : m=m+L[2*i-1] m=m+L[2*i] ""
" changes $$ ... $$ into \[ ... \] and reformats eqnarray* environments as regular array environments """ doubledollar = re.compile("\\$\\$") L=doubledollar.split(m) m=L[0] for i in range(1,(len(L)+1)/2) : m = m+ "\\[" + L[2*i-1] + "\\]" + L[2*i] m=m.replace("\\begin{e
qnarray*}","\\[ \\begin{array}{rcl} ") m=m.replace("\\end{eqnarray*}","\\end{array} \\]") return m def convertsqb(m) : r = re.compile("\\\\item\\s*\\[.*?\\]") Litems = r.findall(m) Lrest = r.split(m) m = Lrest[0] for i in range(0,len(Litems)) : s= Litems[i] s=s.replace("\\item","\\nitem") s=s.replace("[","{") s=s.replace("]","}") m=m+s+Lrest[i+1] r = re.compile("\\\\begin\\s*\\{\\w+}\\s*\\[.*?\\]") Lthms = r.findall(m) Lrest = r.split(m) m = Lrest[0] for i in range(0,len(Lthms)) : s= Lthms[i] s=s.replace("\\begin","\\nbegin") s=s.replace("[","{") s=s.replace("]","}") m=m+s+Lrest[i+1] return m def converttables(m) : retable = re.compile("\\\\begin\s*\\{tabular}.*?\\\\end\s*\\{tabular}" "|\\\\begin\s*\\{btabular}.*?\\\\end\s*\\{btabular}") tables = retable.findall(m) rest = retable.split(m) m = rest[0] for i in range(len(tables)) : if tables[i].find("{btabular}") != -1 : m = m + convertonetable(tables[i],True) else : m = m + convertonetable(tables[i],False) m = m + rest[i+1] return m def convertmacros(m) : comm = re.compile("\\\\[a-zA-Z]*") commands = comm.findall(m) rest = comm.split(m) r= rest[0] for i in range( len (commands) ) : for s1,s2 in M : if s1==commands[i] : commands[i] = s2 r=r+commands[i]+rest[i+1] return(r) def convertonetable(m,border) : tokens = re.compile("\\\\begin\\{tabular}\s*\\{.*?}" "|\\\\end\\{tabular}" "|\\\\begin\\{btabular}\s*\\{.*?}" "|\\\\end\\{btabular}" "|&|\\\\\\\\") align = { "c" : "center", "l" : "left" , "r" : "right" } T = tokens.findall(m) C = tokens.split(m) L = cb.split(T[0]) format = L[3] columns = len(format) if border : m = "<table border=\"1\" align=center>" else : m="<table align = center><tr>" p=1 i=0 while T[p-1] != "\\end{tabular}" and T[p-1] != "\\end{btabular}": m = m + "<td align="+align[format[i]]+">" + C[p] + "</td>" p=p+1 i=i+1 if T[p-1]=="\\\\" : for i in range (p,columns) : m=m+"<td></td>" m=m+"</tr><tr>" i=0 m = m+ "</tr></table>" return (m) def separatemath(m) : mathre = re.compile("\\$.*?\\$" "|\\\\begin\\{equation}.*?\\\\end\\{equation}" "|\\\\\\[.*?\\\\\\]") math = mathre.findall(m) text = mathre.split(m) return(math,text) def processmath( M ) : R = [] counteq=0 global ref mathdelim = re.compile("\\$" "|\\\\begin\\{equation}" "|\\\\end\\{equation}" "|\\\\\\[|\\\\\\]") label = re.compile("\\\\label\\{.*?}") for m in M : md = mathdelim.findall(m) mb = mathdelim.split(m) """ In what follows, md[0] contains the initial delimiter, which is either \begin{equation}, or $, or \[, and mb[1] contains the actual math
vuolter/pyload
src/pyload/plugins/downloaders/VeehdCom.py
Python
agpl-3.0
2,189
0.000914
# -*- coding: utf-8 -*- import re from ..base.downloader import BaseDownloader class VeehdCom(BaseDownloader): __name__ = "VeehdCom" __type__ = "downloader" __version__ = "0.29"
__status__ = "testing" __pattern__ = r"http://veehd\.com/video/\d+_\S+" __config__ = [ ("enabled", "bool", "Activated", True), ("filename_spaces", "bool", "Allow spaces in filename", False), ("replacement_char", "str", "Filename replacement character", "_"),
] __description__ = """Veehd.com downloader plugin""" __license__ = "GPLv3" __authors__ = [("cat", "cat@pyload")] def setup(self): self.multi_dl = True self.req.can_continue = True def process(self, pyfile): self.download_html() if not self.file_exists(): self.offline() pyfile.name = self.get_file_name() self.download(self.get_file_url()) def download_html(self): url = self.pyfile.url self.log_debug(f"Requesting page: {url}") self.data = self.load(url) def file_exists(self): if not self.data: self.download_html() if "<title>Veehd</title>" in self.data: return False return True def get_file_name(self): if not self.data: self.download_html() m = re.search(r"<title.*?>(.+?) on Veehd</title>", self.data) if m is None: self.error(self._("Video title not found")) name = m.group(1) #: Replace unwanted characters in filename if self.config.get("filename_spaces"): pattern = r"[^\w ]+" else: pattern = r"[^\w.]+" return re.sub(pattern, self.config.get("replacement_char"), name) + ".avi" def get_file_url(self): """ Returns the absolute downloadable filepath. """ if not self.data: self.download_html() m = re.search( r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/.+?)"', self.data, ) if m is None: self.error(self._("Embedded video url not found")) return m.group(1)
SmartElect/SmartElect
bulk_sms/tasks.py
Python
apache-2.0
7,112
0.001547
import csv import datetime import logging import os from celery.task import task from django.conf import settings from django.contrib.auth import get_user_model from django.utils.timezone import now from libya_elections.constants import REMINDER_CHECKIN, REMINDER_REPORT, \ REMINDER_LAST_REPORT, REMINDER_CLOSE from polling_reports.models import CenterOpen, PollingReport, StaffPhone from register.models import Whitelist from text_messages.utils import get_message from .models import Batch, Broadcast from .utils import Line logger = logging.getLogger(__name__) def read_messages_from_file(file_path): """ Read uploaded bulk SMS file. Generate tuples: (phone_number, message, from_shortcode). Delete file afterward. :param file_path: :return: """ # We don't currently enable customization of the from_shortcode via file upload. # Just use the default. from_shortcode = None with open(file_path, encoding='utf-8') as f: reader = csv.reader(f) for row in reader: if any(row): line = Line._make(row) number = int(line.number) yield number, line.message, from_shortcode os.remove(file_path) @task def upload_bulk_sms_file(batch_id, file_path): """ Upload a batch of bulk SMS messages for the given batch. Delete the temp file after we're done. Assumes the file is valid (run is_file_valid on it first!) :param batch_id: :param _file: :return: message_for_user """ batch = Batch.objects.get(id=batch_id) batch.add_messages(read_messages_from_file(file_path)) batch.status = Batch.PENDING batch.save() # Break out some of the logic for sending polling report reminder messages # for easier testing class PollingReportReminderMessage(object): """ Capture some of the common logic for polling report reminders. (Do not instantiate, use the subclasses.) """ def __init__(self, message_number, reminder_number): self.message_number = message_number self.reminder_number = reminder_number def get_message_code(self): raise NotImplementedError def get_message_text(self): context = {'message_number': self.message_number, 'reminder_number': self.reminder_number} return get_message(self.get_message_code()).msg.format(**context) def get_phone_numbers_to_send_to(self): """ Generator that yields (phone_number, message_text, from_shortcode) tuples for the phone numbers that we need to send this reminder to. """ # Get the phone numbers we want to send to, excluding those that have # already done the thing we want to remind them of phone_numbers = self.PhoneModel.objects.exclude(phone_number__in=self.to_exclude())\ .values_list('phone_number', flat=True) message_text = self.get_message_text() # Set from_number to REPORTS_SHORT_CODE so that recipient can # simply just respond to this message with their report. from_shortcode = settings.REPORTS_SHORT_CODE for phone_number in phone_numbers: yield phone_number, message_text, from_shortcode def to_exclude(self): raise NotImplementedError class CheckinReminderMessage(PollingReportReminderMessage): """ Message telling user to check in (activate phone, roll call) """ def __init__(self, message_number, reminder_number): super(CheckinReminderMessage, self).__init__(message_number, reminder_number) self.PhoneModel = Whitelist def get_message_code(self): return REMINDER_CHECKIN def to_exclude(self): """Return list of phone numbers to exclude""" midnight = now().replace(hour=0, minute=0, microsecond=0) return CenterOpen.objects.filter( creation_date__gte=midnight, ).values_list('phone_number', flat=True) class PollingDayReportReminderMessage(PollingReportReminderMessage): """ Message telling user to send in polling day statistics report """ def __init__(self, message_number, reminder_number): super(PollingDayReportReminderMessage, self).__init__(message_number, reminder_number) self.PhoneModel = StaffPhone def get_message_code(self): return { 4: REMINDER_REPORT, 5: REMINDER_REPORT, 6: REMINDER_LAST_REPORT, 7: REMINDER_CLOSE, }[self.message_number] def to_exclude(self): """Return list of phone numbers to exclude""" reporting_period = self.message_number - 3 one_day_ago = now() - datetime.timedelta(hours=24) return PollingReport.objects.filter( period_number=reporting_period, creation_date__gte=one_day_ago, ).values_list('phone_number', flat=True) @task def message_reminder_task(message_number, reminder_number, audience, election): """ Make a batch to send out a bunch of reminder messages to a given audience, iffi they haven't sent us the
expected report yet. """ logger.debug("Start message_reminder_
task") if audience not in ('whitelist', 'registered'): raise ValueError("Unknown audience type %s - expected whitelist or registered" % audience) # Batches need to be owned by somebody - pick a non-random superuser user = get_user_model().objects.filter(is_active=True, is_superuser=True)[0] batch = Batch.objects.create( name="Reminder %d for message_number %d" % (reminder_number, message_number), created_by=user, priority=Batch.PRIORITY_TIME_CRITICAL) # create the corresponding broadcast object broadcast = Broadcast.objects.create( created_by=batch.created_by, batch=batch, audience=Broadcast.STAFF_ONLY, message=batch.name, # this message is only temporary ) try: if audience == 'whitelist': msg = CheckinReminderMessage(message_number, reminder_number) else: msg = PollingDayReportReminderMessage(message_number, reminder_number) batch.add_messages(msg.get_phone_numbers_to_send_to()) batch.status = Batch.APPROVED batch.reviewed_by = user batch.save() # update the message for the broadcast. broadcast.message = msg.get_message_text() broadcast.save() logger.debug("Batch saved") except Exception: logger.exception("Error while creating message reminder batch") # If anything went wrong, don't leave partial batch lying around in unknown state batch.delete() broadcast.delete() raise @task def approve_broadcast(broadcast_id): """Creates messages for each individual in the audience and changes batch status to approved.""" broadcast = Broadcast.objects.get(pk=broadcast_id) messages = broadcast.get_messages() batch = broadcast.batch batch.add_messages(messages) batch.status = Batch.APPROVED batch.save()
mefly2012/platform
src/parse/qyxx_ck.py
Python
apache-2.0
1,153
0.00272
# -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8') from common import public import time import re
class qyxx_ck(): """采矿许可证""" need_check_ziduan = ['valid_from', 'v
alidto' ] def check_valid_from(self, indexstr, ustr): """有效期限自""" ret = None validdate = indexstr['validdate'].strip() if validdate and len(validdate): err, time = public.get_date(validdate, 0) if err: ret = err else: frm = time if ustr != frm: ret = u'不等我的是-%s-' % frm return ret def check_validto(self, indexstr, ustr): """有效期限至""" ret = None validdate = indexstr['validdate'].strip() if validdate and len(validdate): err, time = public.get_date(validdate, 1) if err: ret = err else: frm = time if ustr != frm: ret = u'不等我的是-%s-' % frm return ret
laurentb/assnet
assnet/filters.py
Python
agpl-3.0
2,171
0
# -*- coding: utf-8 -*- # Copyright (C) 2011 Romain Bignon, Laurent Bachelier # # This file is part of assnet. # # assnet is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the
Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # assnet is distributed in the hope that it will be us
eful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with assnet. If not, see <http://www.gnu.org/licenses/>. from urlparse import urlsplit, urlunsplit from urllib import quote from paste.url import URL __all__ = ['compact', 'quote_url', 'quote_path', 'quote_and_decode_url'] UNSAFE_CHARS = { '?': quote('?'), '&': quote('&'), ';': quote(';'), ':': quote(':'), ',': quote(','), '=': quote('='), ' ': quote(' '), '+': quote('+'), ':': quote(':'), '$': quote('$'), '"': quote('"'), } def compact(text): return text.replace('\n', ' ').strip() def quote_path(path): """ Quote a path (see quote_url) """ return ''.join([UNSAFE_CHARS.get(c, c) for c in path]) def quote_url(url): """ Quote the path part of an URL object and return the full URL as a string. Special characters in the URL are not considered as the query string or any other parameters, they should be in their dedicated variables of the URL class. """ purl = urlsplit(url.url) # do not escape the scheme and netloc if purl.scheme and purl.netloc: path = urlunsplit((None, None, purl.path, purl.query, purl.fragment)) basepath = urlunsplit((purl.scheme, purl.netloc, '', None, None)) else: path = url.url basepath = '' return URL(basepath + quote_path(path), vars=url.vars).href def quote_and_decode_url(url): """ Like quote_url but for usage in Mako templates """ return quote_url(url).decode('utf-8')
sims1253/coala-bears
bears/python/RadonBear.py
Python
agpl-3.0
1,902
0.003155
import radon.complexity import radon.visitors from coalib.bears.LocalBear import LocalBear from coalib.results.Result import Result from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY from coalib.results.SourceRange import SourceRange from coalib.settings.Setting import typed_list class RadonBear(LocalBear): def run(self, filename, file, radon_ranks_info: typed_list(str)=(), radon_ranks_normal: typed_list(str)=('C', 'D'), radon_ranks_major: typed_list(str)=('E', 'F')): """ Uses radon to compute complexity of a given file. :param radon_ranks_info: The ranks (given by radon) to treat as severity INFO. :param radon_ranks_normal: The ranks (given by radon) to treat as severity NORMAL. :param radon_ranks_major: The ranks (given by radon) to treat as severity MAJOR. """ severity_map = { RESULT_SEVERITY.INFO: radon_ranks_info, RESULT_SEVERITY.NORMAL: radon_ranks_normal, RESULT_SEVERITY.MAJOR: radon_ranks_major } for visitor in radon.complexity.cc_visit("".join(file)): rank = radon.complexity.cc_rank(visitor.complexity) severity = None for result_severity, rank_list in severity_map.items(): if rank in rank_list: severity = result_severity if severity is None: continue visitor_range = SourceRange.from_values( filename, visitor.lineno, visitor.col_offset, visitor.endline) message = "{} has a cyclomatic complexity of {}".format( visitor.name, rank)
yield Result(self, me
ssage, severity=severity, affected_code=(visitor_range,))
jscott1989/happening
src/plugins/__init__.py
Python
mit
43
0
"""P
lace all plugins in
this directory."""
amatkivskiy/baidu
baidu/utils/msbuilder.py
Python
apache-2.0
1,654
0.002418
import os import datetime from utils.util import run_command __author__ = 'maa' class MsBuilder: def __init__(self, msbuild): if msbuild == None: self.msbuild = r"C:\Windows\Microsoft.NET\Framework64\v4.0.30319\MSBuild.exe" else: self.msbuild = msbuild def build_with_params(self, csprojPath, targets, properties): if not os.path.isfile(self.msbuild): raise Exception('MsBuild.exe not found. path = ' + self.msbuild) start = datetime.datetime.now() print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S')) params = [self.msbuild, csprojPath] params.append('/t:' + ';'.join(targets)) params.append('/p:' + ';'.join(properties)) return run_command(params) def build(self, csprojPath, args): if not os.path.isfile(self.msbuild): raise Exception('MsBuild.exe not found. path = ' + self.msbuild) start
= datetime.datetime.now() print('STAR
TED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S')) params = [self.msbuild, csprojPath] + list(args) return run_command(params) def get_files_from_project_bin_folder(self, csproj, configuration, do_return_full_paths=False): name = os.path.dirname(os.path.realpath(csproj)) bin_config_path = os.path.join(name, 'bin', configuration) files = os.listdir(bin_config_path) if not do_return_full_paths: return files files_full_path = list() for file in files: files_full_path.append(os.path.join(bin_config_path, file)) return files_full_path
nicolashainaux/mathmaker
tests/01_core_objects/test_110_polygons.py
Python
gpl-3.0
4,579
0.003494
# -*- coding: utf-8 -*- # Mathmaker creates automatically maths exercises sheets # with their answers # Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com> # This file is part of Mathmaker. # Mathmaker is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # any later version. # Mathmaker is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Mathmaker; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import pytest from mathmaker.lib.core.root_calculus import Value from mathmaker.lib.core.base_geometry import Point from mathmaker.lib.core.geometry import Polygon @pytest.fixture def p1(): p1 = Polygon([Point('A', 0.5, 0.5), Point('B', 3, 1), Point('C', 3.2, 4), Point('D', 0.8, 3) ]) p1.side[0].label = Value(4, unit='cm') p1.side[1].label = Value(3, unit='cm') p1.side[2].label = Value(2, unit='cm') p1.side[3].label = Value(6.5, unit='cm') p1.angle[0].label = Value(64, unit="\\textdegree") p1.angle[1].label = Value(128, unit="\\textdegree") p1.angle[2].label = Value(32, unit="\\textdegree") p1.angle[3].label = Value(256, unit="\\textdegree") p1.angle[0].mark = 'simple' p1.angle[1].mark = 'simple' p1.angle[2].mark = 'simple' p1.angle[3].mark = 'simple' return p1 def test_p1_into_euk(p1): """Check Polygon's generated euk file.""" assert p1.into_euk() == \ 'box -0.1, -0.1, 3.8, 4.6\n\n'\ 'A = point(0.5, 0.5)\n'\ 'B = point(3, 1)\n'\ 'C = point(3.2, 4)\n'\ 'D = point(0.8, 3)\n'\ '\n'\ 'draw\n'\ ' (A.B.C.D)\n'\ ' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\ ' $\\rotatebox{86}{\sffamily 3~cm}$ B 86 - 8.9 deg 4.9\n'\ ' $\\rotatebox{23}{\sffamily 2~cm}$ C 203 - 12.2 deg 4.2\n'\ ' $\\rotatebox{83}{\sffamily 6.5~cm}$ D 263 - 12.9 deg 4.1\n'\ ' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\ ' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ B 138.7 deg 2.7\n'\ ' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ C 234.3 deg 2.7\n'\ ' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ D 322.7 deg 2.7\n'\ ' "A" A 227.3 deg, font("sffamily")\n'\ ' "B" B 318.7 deg, font("sffamily")\n'\ ' "C" C 54.3 deg, font("sffamily")\n'\ ' "D" D 142.7 deg, font("sffamily")\n'\ 'end\n\n'\ 'label\n'\ ' B, A, D simple\n'\ ' C, B, A simple\n'\ ' D, C, B simple\n'\ ' A, D, C simple\n'\ 'end\n' def test_p1_rename_errors(p1): """Check wrong arguments trigger exceptions when renaming.""" with pytest.raises(TypeError): p1.rename(5678) with pytest.raises(ValueError): p1.rename('KJLIZ') def test_p1_renamed(p1): """Check renaming Polygon is OK.""" p1.rename('YOGA') assert p1.into_euk() == \ 'box -0.1, -0.1, 3.8, 4.6\n\n'\ 'A = point(0.5, 0.5)\n'\ 'G = point(3, 1)\n'\ 'O = point(3.2, 4)\n'\ 'Y = point(0.8, 3)\n'\ '\n'\ 'draw\n'\ ' (A.G.O.Y)\n'\ ' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\ ' $\\rotatebox{86}{\sffamily 3~cm}$ G 86 - 8.9 deg 4.9\n'\ ' $\\rotatebox{23}{\sffamily 2~cm}$ O 203 - 12.2 deg 4.2\n'\ ' $\\rotatebox{83}{\sffamily 6.5~cm}$ Y 263 - 12.9 deg 4.1\n'\ ' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\ ' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ G 138.7 deg 2.7\n'\ ' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ O 234.3 deg 2.7\n'\ ' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ Y 322.7 deg 2
.7\n'\ ' "A" A 227.3 deg, font("sffamily")\n'\ ' "G" G 318.7 deg, font("sffamily")\n'\
' "O" O 54.3 deg, font("sffamily")\n'\ ' "Y" Y 142.7 deg, font("sffamily")\n'\ 'end\n\n'\ 'label\n'\ ' G, A, Y simple\n'\ ' O, G, A simple\n'\ ' Y, O, G simple\n'\ ' A, Y, O simple\n'\ 'end\n'
spiricn/libIDL
idl/Enum.py
Python
mit
3,126
0.011836
from idl.Annotatable import Annotatable from idl.IDLSyntaxError import IDLSyntaxError from idl.Type import Type class EnumField(Annotatable): ''' Object that represents a single enumeration field. ''' def __init__(self, enum, name, value): Annotatable.__init__(self) self._enum = enum self._name = name self._value = value @property def enum(self)
: ''' Enumeration type this field is associated with. ''' return self._enum @property def name(self): ''' Field name. ''' return self._name @property def value(self): ''' Integer field value. ''' return self._value class Enum(Type)
: def __init__(self, module, desc): Type.__init__(self, module, Type.ENUM, desc.name) self._desc = desc self._fields = [] for field in self._desc.fields: if field.value: # Evaluate value value = eval(field.value) # Duplicate value check for i in self._fields: if i.value == value: raise IDLSyntaxError(self.module, field.line, 'Duplicate explicit field value %d given for field %r in enumeration %r' % (value, field.name, self.pathStr) ) else: value = self._generateFieldValue() newField = EnumField(self, field.name, value) # Duplicate name check if self.getField(newField.name): raise IDLSyntaxError(self.module, field.line, 'Duplicate field name %r in enumeration %r' % (newField.name, self.pathStr) ) # Annotations newField._assignAnnotations(field.annotations) self._fields.append(newField) @property def fields(self): ''' List of enumeration fields. ''' return self._fields def getField(self, name): ''' Gets a field with a specific name. @param name: Field name. @return: EnumField object or None. ''' for field in self._fields: if field.name == name: return field return None def _generateFieldValue(self): # Assign value value = 0 while True: taken = False for field in self._fields: if field.value == value: taken = True value += 1 break if not taken: break return value
SolusOS-discontinued/pisi
pisi/signalhandler.py
Python
gpl-2.0
1,553
0.001932
# -*- coding: utf-8 -*- # # Copyright (C) 2006, TUBITAK/UEKAE # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later v
ersion. # # Please read the COPYING file. # import signal exception = { signal.SIGINT:KeyboardInterrupt } class Signal: def __init__(self, sig): self.signal = sig self.oldhandler = signal.get
signal(sig) self.pending = False class SignalHandler: def __init__(self): self.signals = {} def signal_handler(self, sig, frame): signal.signal(sig, signal.SIG_IGN) self.signals[sig].pending = True def disable_signal(self, sig): if sig not in self.signals.keys(): self.signals[sig] = Signal(sig) signal.signal(sig, self.signal_handler) def enable_signal(self, sig): if sig in self.signals.keys(): if self.signals[sig].oldhandler: oldhandler = self.signals[sig].oldhandler else: oldhandler = signal.SIG_DFL pending = self.signals[sig].pending del self.signals[sig] signal.signal(sig, oldhandler) if pending: raise exception[sig] def signal_disabled(self, sig): return sig in self.signals.keys() def signal_pending(self, sig): return self.signal_disabled(sig) and self.signals[sig].pending
kyleabeauchamp/mdtraj
mdtraj/formats/netcdf.py
Python
lgpl-2.1
21,654
0.002448
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2013 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## """ This module provides the ability to read and write AMBER NetCDF trajectories. The code is heavily based on amber_netcdf_trajectory_tools.py by John Chodera. """ ############################################################################## # imports ############################################################################## from __future__ import print_function, division # stdlib import os import socket import warnings from datetime import datetime from distutils.version import StrictVersion import numpy as np from mdtraj import version from mdtraj.formats.registry import _FormatRegistry from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices __all__ = ['NetCDFTrajectoryFile', 'load_netcdf'] ############################################################################## # classes ############################################################################## @_FormatRegistry.register_loader('.nc') @_FormatRegistry.register_loader('.netcdf') def load_netcdf(filename, top=None, stride=None, atom_indices=None, frame=None): """Load an AMBER NetCDF file. Since the NetCDF format doesn't contain information to specify the topology, you need to supply a topology Parameters ---------- filename : str filename of AMBER NetCDF file. top : {str, Trajectory, Topology} The NetCDF format does not contain topology information. Pass in either the path to a pdb file, a trajectory, or a topology to supply this information. stride : int, default=None Only read every stride-th frame atom_indices : array_like, optional If not None, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. frame : int, optional Use this option to load only a single frame from a trajectory on disk. If frame is None, the default, the entire trajectory will be loaded. If supplied, ``stride`` will be ignored. Returns ------- trajectory : md.Trajectory The resulting trajectory, as an md.Trajectory object. See Also -------- mdtraj.NetCDFTrajectoryFile : Low level interface to NetCDF files """ from mdtraj.core.trajectory import _parse_topology, Trajectory topology = _parse_topology(top) atom_indices = cast_indices(atom_indices) if atom_indices is not None: topology = topology.subset(atom_indices) with NetCDFTrajectoryFile(filename) as f: if frame is not None: f.seek(frame) xyz, time, cell_lengths, cell_angles = f.read(n_frames=1, atom_indices=atom_indices) else: xyz, time, cell_lengths, cell_angles = f.read(stride=stride, atom_indices=atom_indices) xyz = in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True) cell_lengths = in_units_of(cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True) trajectory = Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=cell_lengths, unitcell_angles=cell_angles) return trajectory @_FormatRegistry.register_fileobject('.nc') @_FormatRegistry.register_fileobject('.netcdf') class NetCDFTrajectoryFile(object): """Interface for reading and writing to AMB
ER NetCDF files. This is a file-like object, t
hat supports both reading or writing depending on the `mode` flag. It implements the context manager protocol, so you can also use it with the python 'with' statement. Parameters ---------- filename : str The name of the file to open mode : {'r', 'w'}, default='r' The mode in which to open the file. Valid options are 'r' and 'w' for 'read' and 'write', respectively. force_overwrite : bool, default=False In write mode, if a file named `filename` already exists, clobber it and overwrite it. """ distance_unit = 'angstroms' def __init__(self, filename, mode='r', force_overwrite=True): self._closed = True # is the file currently closed? self._mode = mode # what mode were we opened in if StrictVersion(import_('scipy.version').short_version) < StrictVersion('0.12.0'): raise ImportError('MDTraj NetCDF support requires scipy>=0.12.0. ' 'You have %s' % import_('scipy.version').short_version) netcdf = import_('scipy.io').netcdf_file if mode not in ['r', 'w']: raise ValueError("mode must be one of ['r', 'w']") if mode == 'w' and not force_overwrite and os.path.exists(filename): raise IOError('"%s" already exists' % filename) # AMBER uses the NetCDF3 format, with 64 bit encodings, which # for scipy.io.netcdf_file is "version=2" self._handle = netcdf(filename, mode=mode, version=2) self._closed = False # self._frame_index is the current frame that we're at in the # file # self._needs_initialization indicates whether we need to set the # global properties of the file. This is required before the first # write operation on a new file if mode == 'w': self._frame_index = 0 self._needs_initialization = True elif mode == 'r': self._frame_index = 0 self._needs_initialization = False else: raise RuntimeError() @property def n_atoms(self): self._validate_open() if self._needs_initialization: raise IOError('The file is uninitialized.') return self._handle.dimensions['atom'] @property def n_frames(self): self._validate_open() if not self._needs_initialization: return self._handle.variables['coordinates'].shape[0] return 0 def _validate_open(self): if self._closed: raise IOError('The file is closed.') def read(self, n_frames=None, stride=None, atom_indices=None): """Read data from a molecular dynamics trajectory in the AMBER NetCDF format. Parameters ---------- n_frames : int, optional If n_frames is not None, the next n_frames of data from the file will be read. Otherwise, all of the frames in the file will be read. stride : int, optional If stride is not None, read only every stride-th frame from disk. atom_indices : np.ndarray, dtype=int, optional The specific indices of the atoms you'd like to retrieve. If not supplied, all of the atoms will be retrieved. Returns ------- coordinates : np.ndarray, shape=(n_frames, n_atoms, 3) The cartesian coordinates of the atoms, in units of angstroms. time : np.ndarray, None The time corresponding to each frame, in units of picoseconds, or None if no time information is present in the trajectory. cell_lengths : np.ndarray, None T
google-research/uda
image/randaugment/augmentation_transforms.py
Python
apache-2.0
14,832
0.007821
# coding=utf-8 # Copyright 2019 The Google UDA Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transforms used in the Augmentation Policies. Copied from AutoAugment: https://github.com/tensorflow/models/blob/master/research/autoaugment/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import numpy as np # pylint:disable=g-multiple-import from PIL import ImageOps, ImageEnhance, ImageFilter, Image # pylint:enable=g-multiple-import import tensorflow as tf FLAGS = tf.flags.FLAGS IMAGE_SIZE = 32 # What is the dataset mean and std of the images on the training set PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted def get_mean_and_std(): if FLAGS.task_name == "cifar10": means = [0.49139968, 0.48215841, 0.44653091] stds = [0.24703223, 0.24348513, 0.26158784] elif FLAGS.task_name == "svhn": means = [0.4376821, 0.4437697, 0.47280442] stds = [0.19803012, 0.20101562, 0.19703614] else: assert False return means, stds def _width_height_from_img_shape(img_shape): """`img_shape` in autoaugment is (height, width).""" return (img_shape[1], img_shape[0]) def random_flip(x): """Flip the input x horizontally with 50% probability.""" if np.random.rand(1)[0] > 0.5: return np.fliplr(x) return x def zero_pad_and_crop(img, amount=4): """Zero pad by `amount` zero pixels on each side then take a random crop. Args: img: numpy image that will be zero padded and cropped. amount: amount of zeros to pad `img` with horizontally and verically. Returns: The cropped zero padded img. The returned numpy array will be of the same shape as `img`. """ padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2, img.shape[2])) padded_i
mg[amount:img.shape[0] + amount, amount: img.shape[1] + amount, :] = img top = np.random.randint(low=0, high=2 * amount) left = np.random.randint(low=0, high=2 * amount) new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1],
:] return new_img def create_cutout_mask(img_height, img_width, num_channels, size): """Creates a zero mask used for cutout of shape `img_height` x `img_width`. Args: img_height: Height of image cutout mask will be applied to. img_width: Width of image cutout mask will be applied to. num_channels: Number of channels in the image. size: Size of the zeros mask. Returns: A mask of shape `img_height` x `img_width` with all ones except for a square of zeros of shape `size` x `size`. This mask is meant to be elementwise multiplied with the original image. Additionally returns the `upper_coord` and `lower_coord` which specify where the cutout mask will be applied. """ assert img_height == img_width # Sample center where cutout mask will be applied height_loc = np.random.randint(low=0, high=img_height) width_loc = np.random.randint(low=0, high=img_width) # Determine upper right and lower left corners of patch upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2)) lower_coord = (min(img_height, height_loc + size // 2), min(img_width, width_loc + size // 2)) mask_height = lower_coord[0] - upper_coord[0] mask_width = lower_coord[1] - upper_coord[1] assert mask_height > 0 assert mask_width > 0 mask = np.ones((img_height, img_width, num_channels)) zeros = np.zeros((mask_height, mask_width, num_channels)) mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = ( zeros) return mask, upper_coord, lower_coord def cutout_numpy(img, size=16): """Apply cutout with mask of shape `size` x `size` to `img`. The cutout operation is from the paper https://arxiv.org/abs/1708.04552. This operation applies a `size`x`size` mask of zeros to a random location within `img`. Args: img: Numpy image that cutout will be applied to. size: Height/width of the cutout mask that will be Returns: A numpy tensor that is the result of applying the cutout mask to `img`. """ img_height, img_width, num_channels = (img.shape[0], img.shape[1], img.shape[2]) assert len(img.shape) == 3 mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size) return img * mask def float_parameter(level, maxval): """Helper function to scale `val` between 0 and maxval . Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: A float that results from scaling `maxval` according to `level`. """ return float(level) * maxval / PARAMETER_MAX def int_parameter(level, maxval): """Helper function to scale `val` between 0 and maxval . Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: An int that results from scaling `maxval` according to `level`. """ return int(level * maxval / PARAMETER_MAX) def pil_wrap(img, use_mean_std): """Convert the `img` numpy tensor to a PIL Image.""" if use_mean_std: MEANS, STDS = get_mean_and_std() else: MEANS = [0, 0, 0] STDS = [1, 1, 1] img_ori = (img * STDS + MEANS) * 255 return Image.fromarray( np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA') def pil_unwrap(pil_img, use_mean_std, img_shape): """Converts the PIL img to a numpy array.""" if use_mean_std: MEANS, STDS = get_mean_and_std() else: MEANS = [0, 0, 0] STDS = [1, 1, 1] pic_array = np.array(pil_img.getdata()).reshape((img_shape[0], img_shape[1], 4)) / 255.0 i1, i2 = np.where(pic_array[:, :, 3] == 0) pic_array = (pic_array[:, :, :3] - MEANS) / STDS pic_array[i1, i2] = [0, 0, 0] return pic_array def apply_policy(policy, img, use_mean_std=True): """Apply the `policy` to the numpy `img`. Args: policy: A list of tuples with the form (name, probability, level) where `name` is the name of the augmentation operation to apply, `probability` is the probability of applying the operation and `level` is what strength the operation to apply. img: Numpy image that will have `policy` applied to it. Returns: The result of applying `policy` to `img`. """ img_shape = img.shape pil_img = pil_wrap(img, use_mean_std) for xform in policy: assert len(xform) == 3 name, probability, level = xform xform_fn = NAME_TO_TRANSFORM[name].pil_transformer( probability, level, img_shape) pil_img = xform_fn(pil_img) return pil_unwrap(pil_img, use_mean_std, img_shape) class TransformFunction(object): """Wraps the Transform function for pretty printing options.""" def __init__(self, func, name): self.f = func self.name = name def __repr__(self): return '<' + self.name + '>' def __call__(self, pil_img): return self.f(pil_img) class TransformT(object): """Each instance of this class represents a specific transform.""" def __init__(self, name, xform_fn): self.name = name self.xform = xform_fn def pil_transformer(self, probability, level, img_shape): def return_function(im): if random.random() < probability: im = self.xform(im, level, img_shape) return im name = self.name + '({:.1f},{})'.format(probability, level) return TransformFunction(return_function, name) ################## Transform Functions ######
sysadminmatmoz/ingadhoc
project_description/__openerp__.py
Python
agpl-3.0
1,639
0
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar) # All Rights Reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without e
ven the implied warr
anty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Project Description', 'version': '8.0.1.0.0', 'category': 'Projects & Services', 'sequence': 14, 'summary': '', 'description': """ Project Description =================== Adds account_analytic_account description field on project form view """, 'author': 'ADHOC SA', 'website': 'www.adhoc.com.ar', 'license': 'AGPL-3', 'images': [ ], 'depends': [ 'project', ], 'data': [ 'view/project_view.xml', ], 'demo': [ ], 'test': [ ], 'installable': True, 'auto_install': False, 'application': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
zhushuchen/Ocean
组件/zookeeper-3.3.6/src/contrib/zkpython/src/test/delete_test.py
Python
agpl-3.0
2,872
0.006964
#!/usr/bin/python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import zookeeper, zktestbase, unittest, threading class DeletionTest(zktestbase.TestBase): """Test whether we can delete znodes""" def test_sync_delete(self): ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"} self.assertEqual(self.connected, True) ret = zookeeper.create(self.handle, "/zk-python-deletetest", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL) self.assertEqual(ret, "/zk-python-deletetest") ret = zookeeper.delete(self.handle,"/zk-python-deletetest") self.assertEqual(ret, zookeeper.OK) children = zookeeper.get_children(self.handle, "/") self.assertEqual(False, "zk-python-deletetest" in children) # test exception self.assertRaises(zookeeper.NoNodeException, zookeeper.delete, self.handle, "/zk-python-deletetest") def test_async_delete(self): ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"} self.assertEqual(self.connected, True) ret = zookeeper.create(self.handle, "/zk-python-adel
etetest", "node
contents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL) self.assertEqual(ret, "/zk-python-adeletetest") self.cv = threading.Condition() self.callback_flag = False self.rc = -1 def callback(handle, rc): self.cv.acquire() self.callback_flag = True self.cv.notify() self.rc = rc # don't assert this here, as if the assertion fails, the test will block self.cv.release() self.cv.acquire() ret = zookeeper.adelete(self.handle,"/zk-python-adeletetest",-1,callback) self.assertEqual(ret, zookeeper.OK, "adelete failed") while not self.callback_flag: self.cv.wait(15) self.cv.release() self.assertEqual(self.callback_flag, True, "adelete timed out") self.assertEqual(self.rc, zookeeper.OK) if __name__ == '__main__': unittest.main()
OrangeTux/MafBot
mafbot/__init__.py
Python
gpl-3.0
106
0
from selenium im
port webdriver import logging logger = logging.getLogger() driver
= webdriver.Firefox()
youtube/cobalt
third_party/skia/infra/bots/recipe_modules/checkout/api.py
Python
bsd-3-clause
5,928
0.006748
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0201 from recipe_engine import recipe_api from recipe_engine import config_types class CheckoutApi(recipe_api.RecipeApi): @property def default_checkout_root(self): """The default location for cached persistent checkouts.""" return self.m.vars.cache_dir.join('work') def git(self, checkout_root): """Run the steps to perform a pure-git checkout without DEPS.""" skia_dir = checkout_root.join('skia') self.m.git.checkout( self.m.properties['repository'], dir_path=skia_dir, ref=self.m.properties['revision'], submodules=False) if self.m.vars.is_trybot: self.m.git('fetch', 'origin', self.m.properties['patch_ref']) self.m.git('checkout', 'FETCH_HEAD') self.m.git('rebase', self.m.properties['revision']) return self.m.properties['revision'] def bot_update(self, checkout_root, gclient_cache=None, checkout_chromium=False, checkout_flutter=False, extra_gclient_env=None, parent_rev=False, flutter_android=False): """Run the steps to obtain a checkout using bot_update. Args: checkout_root: Root directory where the code will be synced. gclient_cache: Optional, directory of the gclient cache. checkout_chromium: If True, will check out chromium/src.git in addition to the primary repo. checkout_flutter: If True, will checkout flutter in addition to the primary repo. extra_gclient_env: Map of extra environment variable names to their values to supply while running gclient. parent_rev: If True, checks out the parent of the specified revision, rather than the revision itself, ie. HEAD^ for normal jobs and HEAD (no patch) for try jobs. flutter_android: Indicates that we're checking out flutter for Android. """ if not gclient_cache: gclient_cache = self.m.vars.cache_dir.join('git') if not extra_gclient_env: extra_gclient_env = {} cfg_kwargs = {} # Use a persistent gclient cache for Swarming. cfg_kwargs['CACHE_DIR'] = gclient_cache # Create the checkout path if necessary. # TODO(borenet): 'makedirs checkout_root' self.m.file.ensure_directory('makedirs checkout_path', checkout_root) # Initial cleanup. gclient_cfg = self.m.gclient.make_config(**cfg_kwargs) main_repo = self.m.properties['repository'] if checkout_flutter: main_repo = 'https://github.com/flutter/engine.git' main_name = self.m.path.basename(main_repo) if main_name.endswith('.git'): main_name = main_name[:-len('.git')] # Special case for flutter because it seems to need a very specific # directory structure to successf
ully build. if checkout_flutter and main_name == 'engine': main_name = 'src/flutter' main = gclient_cfg.solutions.add() main.name = main_name main.managed = False main.url = main_repo main.revision = self.m.properties.get('revision') or 'origin/master' m = gclient_cfg.got_revi
sion_mapping m[main_name] = 'got_revision' patch_root = main_name patch_repo = main.url if self.m.properties.get('patch_repo'): patch_repo = self.m.properties['patch_repo'] patch_root = patch_repo.split('/')[-1] if patch_root.endswith('.git'): patch_root = patch_root[:-4] if checkout_flutter: # Skia is a DEP of Flutter; the 'revision' property is a Skia revision, # and any patch should be applied to Skia, not Flutter. main.revision = 'origin/master' main.managed = True m[main_name] = 'got_flutter_revision' if flutter_android: gclient_cfg.target_os.add('android') skia_dep_path = 'src/third_party/skia' gclient_cfg.repo_path_map['https://skia.googlesource.com/skia'] = ( skia_dep_path, 'HEAD') gclient_cfg.revisions[skia_dep_path] = self.m.properties['revision'] m[skia_dep_path] = 'got_revision' patch_root = skia_dep_path if checkout_chromium: main.custom_vars['checkout_chromium'] = True extra_gclient_env['GYP_CHROMIUM_NO_ACTION'] = '0' # TODO(rmistry): Remove the below block after there is a solution for # crbug.com/616443 entries_file = checkout_root.join('.gclient_entries') if self.m.path.exists(entries_file) or self._test_data.enabled: self.m.file.remove('remove %s' % entries_file, entries_file) # Run bot_update. if not self.m.vars.is_trybot and parent_rev: main.revision = main.revision + '^' patch_refs = None patch_ref = self.m.properties.get('patch_ref') if patch_ref: patch_refs = ['%s@%s:%s' % (self.m.properties['patch_repo'], self.m.properties['revision'], patch_ref)] self.m.gclient.c = gclient_cfg with self.m.context(cwd=checkout_root): update_step = self.m.bot_update.ensure_checkout( patch_root=patch_root, # The logic in ensure_checkout for this arg is fairly naive, so if # patch=False, we'll see "... (without patch)" in the step names, even # for non-trybot runs, which is misleading and confusing. Therefore, # always specify patch=True for non-trybot runs. patch=not (self.m.vars.is_trybot and parent_rev), patch_refs=patch_refs, ) if checkout_chromium or checkout_flutter: gclient_env = {'DEPOT_TOOLS_UPDATE': '0'} if extra_gclient_env: gclient_env.update(extra_gclient_env) with self.m.context(cwd=checkout_root, env=gclient_env): self.m.gclient.runhooks() return update_step.presentation.properties['got_revision']
htwenhe/DJOA
env/Lib/site-packages/tablib/formats/_df.py
Python
mit
1,040
0.000962
""" Tablib - DataFrame Support. """ import sys if sys.version_info[0] > 2: from io import BytesIO else: from cStringIO import StringIO as BytesIO try: from pandas import DataFrame except ImportError: DataFrame = None import tablib from tablib.compat import unicode title = 'df' extensions = ('df', ) def detect(stream): """Returns True if given stream is a DataFrame.""" if DataFrame is None: return False try: DataFrame(stream) return True except ValueError:
return False def export_set(dset, index=None):
"""Returns DataFrame representation of DataBook.""" if DataFrame is None: raise NotImplementedError( 'DataFrame Format requires `pandas` to be installed.' ' Try `pip install tablib[pandas]`.') dataframe = DataFrame(dset.dict, columns=dset.headers) return dataframe def import_set(dset, in_stream): """Returns dataset from DataFrame.""" dset.wipe() dset.dict = in_stream.to_dict(orient='records')
RoboWoodhouse/RoboButler
python/scanlibrary.py
Python
mit
2,231
0.037203
import os from lxml import etree class device: def __init__(self,ipadress,name): self.ipadress=str(ipadress) self.name=str(name) self.status="off" def turn_on(self): self.status="on" def turn_off(self): self.status="off" def getstatus(devices): ips=[] for instance in devices: instance.turn_off() test=os.popen("nmap -sP --unprivileged 192.168.2.0/24") for i in test.readlines(): if i.split(' ')[0]=='Nmap' and i.split(' ')[1]=='scan' : ips.append(i.split('(')[1][:-2]) for i in xrange(0,len(ips)): for j in xrange(0,len(devices)): if ips[i]== devices[j].ipadress: devices[j].turn_on() return devices def writexmlrow(device,container,number): if (number==1): col=etree.SubElement(container,'div',{'class':'col-lg-2 col-lg-offset-1 col-md-2 col-md-offset-1 placeholder'}) else: col=etree.SubElement(container,'div',{'class':'col-lg-2 col-md-2 placeholder'}) if (device.status=='on'): image1=etree.SubElement(col,'img',{'src':'./images/green.png','width':'200','height':'200','class':'img-responsive','align':'center'}) else: image1=etree.SubElement(col,'img',{'src':'./images/gray.png','width':'200','height':'200','class':'img-responsive','align':'center'}) label1=etree.SubElement(col,'h4',{'align':'center'}) label1.text=device.name return def writexmlpart(devices): container=etree.Element('div',{'class':'row placeholder'}) i=1 for instance in devices: writexmlrow(instance,container,i) i=i+1 output=etree.tostring(container, pretty_print=True) with open("./parts/part1_1.html","r") as file: part1=file.read() with open("./parts/part1_2.html","r") as file: part2=file.read() with open("./parts/part1.html","w") as file: file.write(part1+output+part2) return def writescanlog(): localtime==time.localtime(time.time()) wi
th open("./log/scanlog.txt","a") as log: log.write(str(localtime[3])+':'+str(localtime[4])+'on the'+str(localti
me[2])+'.'+str(localtime[1])+'.'+str(localtime[0])[-2:]) log.write("Scanned Wifi for my Devices")
fusionbox/django-darkknight
setup.py
Python
bsd-2-clause
1,503
0.002661
#!/usr/bin/env python import os from setuptools import setup def read(fname): with open(os.path.join(os.path.dirname(__file__), fname)) as f: return f.read() setup(name='django-darkknight', version='0.9.0', li
cense="BSD", description="He's a silent guardian, a watchful protector", long_description=read('README.rst'), author="Fusionbox, Inc", author_email="programmers@fusionbox.com", url='http://github.com/fusionbox/django-darkknight', packages=['darkknight', 'darkknight_gpg'], install_requires
=[ 'django-dotenv', 'Django>=1.5', 'pyOpenSSL', 'django-localflavor', 'django-countries', ], extras_require = { 'gpg': ['gnupg>=2.0.2,<3', 'django-apptemplates'], }, classifiers=[ "Development Status :: 4 - Beta", "Framework :: Django", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Security :: Cryptography", ], )
yacoma/auth-boilerplate
server/permissions.py
Python
mit
959
0
from .app import App from .model import User, Group, ResetNonce class ViewPermission: pass class EditPermission: pass @App.permission_rule(model=object,
permission=object) def admin_has_global_permission(identi
ty, model, permission): user = User.get(email=identity.userid) return Group.get(name="Admin") in user.groups @App.permission_rule(model=User, permission=object) def user_has_self_permission(identity, model, permission): user = User.get(email=identity.userid) if user is not None and Group.get(name="Admin") in user.groups: return True else: return model.email == identity.userid @App.permission_rule(model=ResetNonce, permission=EditPermission) def user_has_permission_to_reset_nonce(identity, model, permission): user = User.get(email=identity.userid) if user is not None and Group.get(name="Admin") in user.groups: return True else: return user.id == int(model.id)
dalejung/trtools
trtools/io/common.py
Python
mit
109
0.009174
def _filename(obj):
try: return obj.__filename__() except:
pass return str(obj)
Southpaw-TACTIC/Team
src/python/Lib/site-packages/win32com/client/dynamic.py
Python
epl-1.0
21,580
0.031279
"""Support for dynamic COM client support. Introduction Dynamic COM client support is the ability to use a COM server without prior knowledge of the server. This can be used to talk to almost all COM servers, including much of MS Office. In general, you should not use this module directly - see below. Example >>> import win32com.client >>> xl = win32com.client.Dispatch("Excel.Application") # The line above invokes the functionality of this class. # xl is now an object we can use to talk to Excel. >>> xl.Visible = 1 # The Excel window becomes visible. """ import traceback import string import new import pythoncom import winerror import build from types import StringType, IntType, TupleType, ListType from pywintypes import UnicodeType, IIDType import win32com.client # Needed as code we eval() references it. from win32com.client import NeedUnicodeConversions debugging=0 # General debugging debugging_attr=0 # Debugging dynamic attribute lookups. LCID = 0x0 # These errors generally mean the property or method exists, # but can't be used in this context - eg, property instead of a method, etc. # Used to determine if we have a real error or not. ERRORS_BAD_CONTEXT = [ winerror.DISP_E_MEMBERNOTFOUND, winerror.DISP_E_BADPARAMCOUNT, winerror.DISP_E_PARAMNOTOPTIONAL, winerror.DISP_E_TYPEMISMATCH, winerror.E_INVALIDARG, ] ALL_INVOKE_TYPES = [ pythoncom.INVOKE_PROPERTYGET, pythoncom.INVOKE_PROPERTYPUT, pythoncom.INVOKE_PROPERTYPUTREF, pythoncom.INVOKE_FUNC ] def debug_print(*args): if debugging: for arg in args: print arg, print def debug_attr_print(*args): if debugging_attr: for arg in args: print arg, print # get the type objects for IDispatch and IUnknown dispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch] iunkType = pythoncom.TypeIIDs[pythoncom.IID_IUnknown] _GoodDispatchType=[StringType,IIDType,UnicodeType] _defaultDispatchItem=build.DispatchItem def _GetGoodDispatch(IDispatch, clsctx = pythoncom.CLSCTX_SERVER): if type(IDispatch) in _GoodDispatchType: try: IDispatch = pythoncom.connect(IDispatch) except pythoncom.ole_error: IDispatch = pythoncom.CoCreateInstance(IDispatch, None, clsctx, pythoncom.IID_IDispatch) else: # may already be a wrapped class. IDispatch = getattr(IDispatch, "_oleobj_", IDispatch) return IDispatch def _GetGoodDispatchAndUserName(IDispatch, userName, clsctx): # Get a dispatch object, and a 'user name' (ie, the name as # displayed to the user in repr() etc. if userName is None: if type(IDispatch) == StringType: userName = IDispatch elif type(IDispatch) == UnicodeType: # We always want the displayed name to be a real string userName = IDispatch.encode("ascii", "replace") elif type(userName) == UnicodeType: # As above - always a string... userName = userName.encode("ascii", "replace") else: userName = str(userName) return (_GetGoodDispatch(IDispatch, clsctx), userName) def _GetDescInvokeType(entry, default_invoke_type): if not entry or not entry.desc: return default_invoke_type return entry.desc[4] def Dispatch(IDispatch, userName = None, createClass = None, typeinfo = None, UnicodeToString=NeedUnicodeConversions, clsctx = pythoncom.CLSCTX_SERVER): IDispatch, userName = _GetGoodDispatchAndUserName(IDispatch,userName,clsctx) if createClass is None: createClass = CDispatch lazydata = None try: if typeinfo is None: typeinfo = IDispatch.GetTypeInfo() try: #try for a typecomp typecomp = typeinfo.GetTypeComp() lazydata = typeinfo, typecomp except pythoncom.com_error: pass except pythoncom.com_error: typeinfo = None olerepr = MakeOleRepr(IDispatch, typeinfo, lazydata) return createClass(IDispatch, olerepr, userName,UnicodeToString, lazydata) def MakeOleRepr(IDispatch, typeinfo, typecomp): olerepr = None if typeinfo is not None: try: attr = typeinfo.GetTypeAttr() # If the type info is a special DUAL interface, magically turn it into # a DISPATCH typeinfo. if attr[5] == pythoncom.TKIND_INTERFACE and attr[11] & pythoncom.TYPEFLAG_FDUAL: # Get corresponding Disp interface; # -1 is a special value which does this for us. href = typeinfo.GetRefTypeOfImplType(-1); typeinfo = typeinfo.GetRefTypeInfo(href) attr = typeinfo.GetTypeAttr() if typecomp is None: olerepr = build.DispatchItem(typeinfo, attr, None, 0) else: olerepr = build.LazyDispatchItem(attr, None) except pythoncom.ole_error: pass if olerepr is None: olerepr = build.DispatchItem() return olerepr def DumbDispatch(IDispatch, userName = None, createClass = None,UnicodeToString=NeedUnicodeConversions, clsctx=pythoncom.CLSCTX_SERVER): "Dispatc
h with no type info" IDispatch, userName = _GetGoodDispatchAndUserName(IDispatch,userName,clsctx) if createClass is None: createClass = CDispatch return createClass(IDispatch, buil
d.DispatchItem(), userName,UnicodeToString) class CDispatch: def __init__(self, IDispatch, olerepr, userName = None, UnicodeToString=NeedUnicodeConversions, lazydata = None): if userName is None: userName = "<unknown>" self.__dict__['_oleobj_'] = IDispatch self.__dict__['_username_'] = userName self.__dict__['_olerepr_'] = olerepr self.__dict__['_mapCachedItems_'] = {} self.__dict__['_builtMethods_'] = {} self.__dict__['_enum_'] = None self.__dict__['_unicode_to_string_'] = UnicodeToString self.__dict__['_lazydata_'] = lazydata def __call__(self, *args): "Provide 'default dispatch' COM functionality - allow instance to be called" if self._olerepr_.defaultDispatchName: invkind, dispid = self._find_dispatch_type_(self._olerepr_.defaultDispatchName) else: invkind, dispid = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET, pythoncom.DISPID_VALUE if invkind is not None: allArgs = (dispid,LCID,invkind,1) + args return self._get_good_object_(self._oleobj_.Invoke(*allArgs),self._olerepr_.defaultDispatchName,None) raise TypeError, "This dispatch object does not define a default method" def __nonzero__(self): return 1 # ie "if object:" should always be "true" - without this, __len__ is tried. # _Possibly_ want to defer to __len__ if available, but Im not sure this is # desirable??? def __repr__(self): return "<COMObject %s>" % (self._username_) def __str__(self): # __str__ is used when the user does "print object", so we gracefully # fall back to the __repr__ if the object has no default method. try: return str(self.__call__()) except pythoncom.com_error, details: if details[0] not in ERRORS_BAD_CONTEXT: raise return self.__repr__() # Delegate comparison to the oleobjs, as they know how to do identity. def __cmp__(self, other): other = getattr(other, "_oleobj_", other) return cmp(self._oleobj_, other) def __int__(self): return int(self.__call__()) def __len__(self): invkind, dispid = self._find_dispatch_type_("Count") if invkind: return self._oleobj_.Invoke(dispid, LCID, invkind, 1) raise TypeError, "This dispatch object does not define a Count method" def _NewEnum(self): try: invkind = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET enum = self._oleobj_.InvokeTypes(pythoncom.DISPID_NEWENUM,LCID,invkind,(13, 10),()) except pythoncom.com_error: return None # no enumerator for this object. import util return util.WrapEnum(enum, None) def __getitem__(self, index): # syver modified # Improved __getitem__ courtesy Syver Enstad # Must check _NewEnum before Item, to ensure b/w compat. if isinstance(index, IntType): if self.__dict__['_enum_'] is None: self.__dict__['_enum_'] = self._NewEnum() if self.__dict__['_enum_'] is not None: return self._get_good_object_(self._enum_.__getitem__(index)) # See if we have an "Item" method/property we can use (goes hand in hand with Count() above!) invkind, dispid = self._find_dispatch_type_("Item") if invkind is not None: return self._get_good_ob
toobaz/pandas
pandas/tests/sparse/test_indexing.py
Python
bsd-3-clause
38,613
0.000829
import numpy as np import pytest import pandas as pd from pandas.core.sparse.api import SparseDtype import pandas.util.testing as tm @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning") @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning") class TestSparseSeriesIndexing: def setup_method(self, method): self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan]) self.sparse = self.orig.to_sparse() def test_getitem(self): orig = self.orig sparse = self.sparse assert sparse[0] == 1 assert np.isnan(sparse[1]) assert sparse[3] == 3 result = sparse[[1, 3, 4]] exp = orig[[1, 3, 4]].to_sparse() tm.assert_sp_series_equal(result, exp) # dense array result = sparse[orig % 2 == 1] exp = orig[orig % 2 == 1].to_sparse() tm.assert_sp_series_equal(result, exp) # sparse array (actuary it coerces to normal Series) result = sparse[sparse % 2 == 1] exp = orig[orig % 2 == 1].to_sparse() tm.assert_sp_series_equal(result, exp) # sparse array result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)] tm.assert_sp_series_equal(result, exp) def test_getitem_slice(self): orig = self.orig sparse = self.sparse tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse()) tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse()) tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse()) tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse()) def test_getitem_int_dtype(self): # GH 8292 s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name="xxx") res = s[::2] exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name="xxx") tm.assert_sp_series_equal(res, exp) assert res.dtype == SparseDtype(np.int64) s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name="xxx") res = s[::2] exp = pd.SparseSeries( [0, 2, 4, 6], index=[0, 2, 4, 6], fill_value=0, name="xxx" ) tm.assert_sp_series_equal(res, exp) assert res.dtype == SparseDtype(np.int64) def test_getitem_fill_value(self): orig = pd.Series([1, np.nan, 0, 3, 0]) sparse = orig.to_sparse(fill_value=0) assert sparse[0] == 1 assert np.isnan(sparse[1]) assert sparse[2] == 0 assert sparse[3] == 3 result = sparse[[1, 3, 4]] exp = orig[[1, 3, 4]].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) # dense array result = sparse[orig % 2 == 1] exp = orig[orig % 2 == 1].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) # sparse array (actuary it coerces to normal Series) result = sparse[sparse % 2 == 1] exp = orig[orig % 2 == 1].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) # sparse array result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)] tm.assert_sp_series_equal(result, exp) def test_getitem_ellipsis(self): # GH 9467 s = pd.SparseSeries([1, np.nan, 2, 0, np.nan]) tm.assert_sp_series_equal(s[...], s) s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0) tm.assert_sp_series_equal(s[...], s) def test_getitem_slice_fill_value(self): orig = pd.Series([1, np.nan, 0, 3, 0]) sparse = orig.to_sparse(fill_value=0) tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse(fill_value=0)) tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse(fill_value=0)) tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse(fill_value=0)) tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse(fill_value=0)) def test_loc(self): orig = self.orig sparse = self.sparse assert sparse.loc[0] == 1 assert np.isnan(sparse.loc[1]) result = sparse.loc[[1, 3, 4]] exp = orig.loc[[1, 3, 4]].to_sparse() tm.assert_sp_series_equal(result, exp) # exceeds the bounds result = sparse.reindex([1, 3, 4, 5]) exp = orig.reindex([1, 3, 4, 5]).to_sparse() tm.assert_sp_series_equal(result, exp) # padded with NaN assert np.isnan(result[-1]) # dense array result = sparse.loc[orig % 2 == 1] exp = orig.loc[orig % 2 == 1].to_sparse() tm.assert_sp_series_equal(result, exp) # sparse array (actuary it coerces to normal Series) result = sparse.loc[sparse % 2 == 1] exp = orig.loc[orig % 2 == 1].to_sparse() tm.assert_sp_series_equal(result, exp) # sparse array result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)] tm.assert_sp_series_equal(result, exp) def test_loc_index(self): orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE")) sparse = orig.to_sparse() assert sparse.loc["A"] == 1 assert np.isnan(sparse.loc["B"]) result = sparse.loc[["A", "C", "D"]] exp = orig.loc[["A", "C", "D"]].to_sparse() tm.assert_sp_series_equal(result, exp) # dense array result = sparse.loc[orig % 2 == 1] exp = orig.loc[orig % 2 == 1].to_sparse() tm.assert_sp_series_equal(result, exp) # sparse array (actuary it coerces to normal Series) result = sparse.loc[sparse % 2 == 1] exp = orig.loc[orig % 2 == 1].to_sparse() tm.assert_sp_series_equal(result, exp) # sparse array result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)] tm.assert_sp_series_equal(result, exp) def test_loc_index_fill_value(self): orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE")) sparse = orig.to_sparse(fill_value=0) assert sparse.loc["A"] == 1 assert np.isnan(sparse.loc["B"]) result = sparse.loc[["A", "C", "D"]] exp = orig.loc[["A", "C", "D"]].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) # dense array result = sparse.loc[orig % 2 == 1] exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) # sparse array (actuary it coerces to normal Series) result = sparse.loc[sparse % 2 == 1] exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) def test_loc_slice(self): orig = self.orig sparse = self.sparse tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse()) def test_loc_slice_index_fill_value(self): orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE")) sparse = orig.to_sparse(fill_value=0) tm.assert_sp_series_equal( sparse.l
oc["C":], orig.loc["C":].to_sparse(fill_value=0) ) def test_loc_slice_fill_value(self): orig = pd.Series([1, np.nan, 0, 3, 0]) sparse = orig.to_sparse(fill_value=0) tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse(fill_value=0)) def test_iloc(self): orig = self.orig sparse = self.sparse assert sparse.iloc[3] == 3 assert np.
isnan(sparse.iloc[2]) result = sparse.iloc[[1, 3, 4]] exp = orig.iloc[[1, 3, 4]].to_sparse() tm.assert_sp_series_equal(result, exp) result = sparse.iloc[[1, -2, -4]] exp = orig.iloc[[1, -2, -4]].to_sparse() tm.assert_sp_series_equal(result, exp) with pytest.raises(IndexError): sparse.iloc[[1, 3, 5]] def test_iloc_fill_value(self): orig = pd.Series([1, np.nan, 0, 3, 0]) sparse = orig.to_sparse(fill_value=0) assert sparse.iloc[3] == 3 assert np.isnan(sparse.iloc[1]) assert sparse.iloc[4] == 0 result = sparse.iloc[[1, 3, 4]] exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0) tm.assert_sp_series_equal(result, exp) def test_iloc_slice(self): orig = pd.Series([1, np.nan, np
k4rtik/alpo
quotes/admin.py
Python
mit
243
0.00823
from quotes.models import Quote from django.contrib import admin c
lass QuoteAdmin(admin.ModelAdmin): list_display = ('message', 'name', 'program', 'class_of', 'submission_time') admin.site.register(
Quote, QuoteAdmin)
gryzz/uCall
utils/asterisk-connector/ami2stomp-get.py
Python
gpl-3.0
972
0.005144
#!/usr/bin/env python # vim: set expandtab shiftwidth=4: # http://www.voip-info.org/wiki/view/asterisk+manager+events import sys,time import simplejson as json from stompy.simple import Client import ConfigParser config = ConfigParser.ConfigParser() devel_config = ConfigParser.ConfigParser() config.read('/opt/ucall/etc/config.ini') devel_config.read('/opt/ucall/etc/devel_config.ini') stomp_host = config.get('STOMP', 'host') stomp_username = config.get('STOMP', 'username') stomp_password = config.get('STOMP', 'password') stomp_queue = "/queue/messages/" + devel_config.get('GENERAL', 'agent') print '='*80 pr
int 'Stomp host:', stomp_host print 'Stomp username:', stomp_username print 'Stomp password:', stomp_password print 'Stomp queue:', stomp_queue print '='*80 stomp = Client(stomp_host) st
omp.connect(stomp_username, stomp_password) stomp.subscribe("jms.queue.msg.ctrl") while True: message = stomp.get() print message.body stomp.disconnect()
xbot/alfred-pushbullet
lib/pushbullet/filetype.py
Python
mit
373
0
def _magic_get_file_type(f, _): file_type = magic.from_buffer(f.read(1024), mime=True)
f.seek(0) return file_type.decode('utf-8') def _guess_file_type(_, filename): return mimetypes.guess_type(filename)[0] try: import magic except ImportError: import mimetypes get_file_ty
pe = _guess_file_type else: get_file_type = _magic_get_file_type
eltonkevani/tempest_el_env
tempest/api/compute/floating_ips/test_floating_ips_actions.py
Python
apache-2.0
7,990
0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from tempest.api.compute import base from tempest.common.utils import data_utils from tempest import exceptions from tempest.test import attr class FloatingIPsTestJSON(base.BaseV2ComputeTest): _interface = 'json' server_id = None floating_ip = None @classmethod def setUpClass(cls): super(FloatingIPsTestJSON, cls).setUpClass() cls.client = cls.floating_ips_client cls.servers_client = cls.servers_client # Server creation resp, server = cls.create_test_server(wait_until='ACTIVE') cls.server_id = server['id'] # Floating IP creation resp, body = cls.client.create_floating_ip() cls.floating_ip_id = body['id'] cls.floating_ip = body['ip'] # Generating a nonexistent floatingIP id cls.floating_ip_ids = [] resp, body = cls.client.list_floating_ips() for i in range(len(body)): cls.floating_ip_ids.append(body[i]['id']) while True: cls.non_exist_id = data_utils.rand_int_id(start=999) if cls.config.service_available.neutron: cls.non_exist_id = str(uuid.uuid4()) if cls.non_exist_id not in cls.floating_ip_ids: break @classmethod def tearDownClass(cls): # Deleting the floating IP which is created in this method resp, body = cls.client.delete_floating_ip(cls.floating_ip_id) super(FloatingIPsTestJSON, cls).tearDownClass() @attr(type='gate') def test_allocate_floating_ip(self): # Positive test:Allocation of a new floating IP to a project # should be successful resp, body = self.client.create_floating_ip() self.assertEqual(200, resp.status) floating_ip_id_allocated = body['id'] try: resp, floating_ip_details = \ self.client.get_floating_ip_details(floating_ip_id_allocated) # Checking if the details of allocated IP is in list of floating IP resp, body = self.client.list_floating_ips() self.assertIn(floating_ip_details, body) finally: # Deleting the floating IP which is created in this method self.client.delete_floating_ip(floating_ip_id_allocated) @attr(type=['negative', 'gate']) def test_allocate_floating_ip_from_nonexistent_pool(self): # Positive test:Allocation of a new floating IP from a nonexistent_pool # to a project should fail self.assertRaises(exceptions.NotFound, self.client.create_floating_ip, "non_exist_pool") @attr(type='gate') def test_delete_floating_ip(self): # Positive test:Deletion of valid floating IP from project # should be successful # Creating the floating IP that is to be deleted in this method resp, floating_ip_body = self.client.create_floating_ip() # Storing the details of floating IP before deleting it cli_resp = self.client.get_floating_ip_details(floating_ip_body['id']) resp, floating_ip_details = cli_resp # Deleting the floating IP from the project resp, body = self.client.delete_floating_ip(floating_ip_body['id']) self.assertEqual(202, resp.status) # Check it was really deleted. self.client.wait_for_resource_deletion(floating_ip_body['id']) @attr(type='gate') def test_associate_disassociate_floating_ip(self): # Positive test:Associate and disassociate the provided floating IP # to a specific server should be successful # Association of floating IP to fixed IP address resp, body = self.client.associate_floating_ip_to_server( self.floating_ip, self.server_id) self.assertEqual(202, resp.status) # Disassociation of floating IP that was associated in this method resp, body = self.
client.disassociate_floating_ip_from_server( self.floating_ip, self.server_id) self.assertEqual(202, resp.status) @attr(type=['negative', 'gate']) def test_delete_nonexistant_floating_ip(self): # Negative test:Deletion of a nonexistent floating IP # from project should fail # Deleting
the non existent floating IP self.assertRaises(exceptions.NotFound, self.client.delete_floating_ip, self.non_exist_id) @attr(type=['negative', 'gate']) def test_associate_nonexistant_floating_ip(self): # Negative test:Association of a non existent floating IP # to specific server should fail # Associating non existent floating IP self.assertRaises(exceptions.NotFound, self.client.associate_floating_ip_to_server, "0.0.0.0", self.server_id) @attr(type=['negative', 'gate']) def test_dissociate_nonexistant_floating_ip(self): # Negative test:Dissociation of a non existent floating IP should fail # Dissociating non existent floating IP self.assertRaises(exceptions.NotFound, self.client.disassociate_floating_ip_from_server, "0.0.0.0", self.server_id) @attr(type='gate') def test_associate_already_associated_floating_ip(self): # positive test:Association of an already associated floating IP # to specific server should change the association of the Floating IP # Create server so as to use for Multiple association resp, body = self.servers_client.create_server('floating-server2', self.image_ref, self.flavor_ref) self.servers_client.wait_for_server_status(body['id'], 'ACTIVE') self.new_server_id = body['id'] # Associating floating IP for the first time resp, _ = self.client.associate_floating_ip_to_server( self.floating_ip, self.server_id) # Associating floating IP for the second time resp, body = self.client.associate_floating_ip_to_server( self.floating_ip, self.new_server_id) self.addCleanup(self.servers_client.delete_server, self.new_server_id) if (resp['status'] is not None): self.addCleanup(self.client.disassociate_floating_ip_from_server, self.floating_ip, self.new_server_id) # Make sure no longer associated with old server self.assertRaises((exceptions.NotFound, exceptions.UnprocessableEntity), self.client.disassociate_floating_ip_from_server, self.floating_ip, self.server_id) @attr(type=['negative', 'gate']) def test_associate_ip_to_server_without_passing_floating_ip(self): # Negative test:Association of empty floating IP to specific server # should raise NotFound exception self.assertRaises(exceptions.NotFound, self.client.associate_floating_ip_to_server, '', self.server_id) class FloatingIPsTestXML(FloatingIPsTestJSON): _interface = 'xml'
sunyihuan326/DeltaLab
Andrew_NG_learning/class_two/week_three/Dxq_1.py
Python
mit
3,691
0.00246
# coding:utf-8 ''' Created on 2017/11/7. @author: chk01 ''' import math import numpy as np import h5py import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.python.framework import ops from class_two.week
_three.tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict np.random.seed(1) def exam1(): y_hat = tf.constant(36, name='Y-hat') y = tf.constant(39, name='y') loss = tf.Variable((y - y_hat) ** 2, name='loss') init = tf.global_variables_initializer() with tf.S
ession() as sess: sess.run(init) print(sess.run(loss)) def exam2(): a = tf.constant(2) b = tf.constant(3) c = tf.multiply(a, b) return c def exam3(x_input): with tf.Session() as sess: x = tf.placeholder(tf.int64, name='x') y = 2 * x print(sess.run(y, feed_dict={x: x_input})) # GRADED FUNCTION: linear_function def linear_function(): """ Implements a linear function: Initializes W to be a random tensor of shape (4,3) Initializes X to be a random tensor of shape (3,1) Initializes b to be a random tensor of shape (4,1) Returns: result -- runs the session for Y = WX + b """ np.random.seed(1) X = tf.constant(np.random.randn(3, 1), tf.float32, name='X') W = tf.constant(np.random.randn(4, 3), tf.float32, name='W') b = tf.constant(np.random.randn(4, 1), tf.float32, name='b') Y = tf.matmul(W, X) + b with tf.Session() as sess: result = sess.run(Y) return result # GRADED FUNCTION: sigmoid def sigmoid(z): """ Computes the sigmoid of z Arguments: z -- input value, scalar or vector Returns: results -- the sigmoid of z """ x = tf.placeholder(tf.float32, name='x') sigmoid = tf.nn.sigmoid(x) with tf.Session() as sess: result = sess.run(sigmoid, feed_dict={x: z}) return result def cost(logits, labels): """     Computes the cost using the sigmoid cross entropy          Arguments:     logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)     labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y.          Returns:     cost -- runs the session of the cost (formula (2)) """ z = tf.placeholder(tf.float32, name='z-input') y = tf.placeholder(tf.float32, name='y-input') cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y) with tf.Session() as sess: cost = sess.run(cost, feed_dict={z: logits, y: labels}) return cost # GRADED FUNCTION: one_hot_matrix def one_hot_matrix(labels, C): """ Creates a matrix where the i-th row corresponds to the ith class number and the jth column corresponds to the jth training example. So if example j had a label i. Then entry (i,j) will be 1. Arguments: labels -- vector containing the labels C -- number of classes, the depth of the one hot dimension Returns: one_hot -- one hot matrix """ C = tf.constant(C, name='C') one_hot_matrix = tf.one_hot(labels, C, axis=0) tf.nn.sigmoid_cross_entropy_with_logits() with tf.Session() as sess: one_hot = sess.run(one_hot_matrix) return one_hot if __name__ == '__main__': # exam1() logits = np.array([0.2, 0.4, 0.7, 0.9]) cost = cost(logits, np.array([0, 0, 1, 1])) print("cost = " + str(cost)) tf.one_hot(labels,C,axis=0)
nikolas/raven-python
raven/processors.py
Python
bsd-3-clause
3,988
0
""" raven.core.processors ~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import re from raven.utils import varmap from raven.utils import six class Processor(object): def __init__(self, client): self.client = client def get_data(self, data, **kwargs): return def process(self, data, **kwargs): resp = self.get_data(data, **kwargs) if resp: data = resp if 'exception' in data: if 'values' in data['exception']:
for
value in data['exception'].get('values', []): if 'stacktrace' in value: self.filter_stacktrace(value['stacktrace']) if 'request' in data: self.filter_http(data['request']) if 'extra' in data: data['extra'] = self.filter_extra(data['extra']) return data def filter_stacktrace(self, data): pass def filter_http(self, data): pass def filter_extra(self, data): return data class RemovePostDataProcessor(Processor): """ Removes HTTP post data. """ def filter_http(self, data, **kwargs): data.pop('data', None) class RemoveStackLocalsProcessor(Processor): """ Removes local context variables from stacktraces. """ def filter_stacktrace(self, data, **kwargs): for frame in data.get('frames', []): frame.pop('vars', None) class SanitizePasswordsProcessor(Processor): """ Asterisk out things that look like passwords, credit card numbers, and API keys in frames, http, and basic extra data. """ MASK = '*' * 8 FIELDS = frozenset([ 'password', 'secret', 'passwd', 'authorization', 'api_key', 'apikey', 'sentry_dsn', ]) VALUES_RE = re.compile(r'^(?:\d[ -]*?){13,16}$') def sanitize(self, key, value): if value is None: return if isinstance(value, six.string_types) and self.VALUES_RE.match(value): return self.MASK if not key: # key can be a NoneType return value key = key.lower() for field in self.FIELDS: if field in key: # store mask as a fixed length for security return self.MASK return value def filter_stacktrace(self, data): for frame in data.get('frames', []): if 'vars' not in frame: continue frame['vars'] = varmap(self.sanitize, frame['vars']) def filter_http(self, data): for n in ('data', 'cookies', 'headers', 'env', 'query_string'): if n not in data: continue if isinstance(data[n], six.string_types) and '=' in data[n]: # at this point we've assumed it's a standard HTTP query # or cookie if n == 'cookies': delimiter = ';' else: delimiter = '&' data[n] = self._sanitize_keyvals(data[n], delimiter) else: data[n] = varmap(self.sanitize, data[n]) if n == 'headers' and 'Cookie' in data[n]: data[n]['Cookie'] = self._sanitize_keyvals( data[n]['Cookie'], ';' ) def filter_extra(self, data): return varmap(self.sanitize, data) def _sanitize_keyvals(self, keyvals, delimiter): sanitized_keyvals = [] for keyval in keyvals.split(delimiter): keyval = keyval.split('=') if len(keyval) == 2: sanitized_keyvals.append((keyval[0], self.sanitize(*keyval))) else: sanitized_keyvals.append(keyval) return delimiter.join('='.join(keyval) for keyval in sanitized_keyvals)
HEPData/hepdata3
hepdata/factory.py
Python
gpl-2.0
2,063
0
# -*- coding: utf-8 -*- # # This file is part of HEPData. # Copyright (C) 2016 CERN. # # HEPData is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # HEP
Data is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # G
eneral Public License for more details. # # You should have received a copy of the GNU General Public License # along with HEPData; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """HEPData application factories.""" import os import sys from invenio_base.app import create_app_factory from invenio_base.wsgi import create_wsgi_factory from invenio_config import create_config_loader from . import config env_prefix = 'APP' conf_loader = create_config_loader(config=config, env_prefix=env_prefix) instance_path = os.getenv(env_prefix + '_INSTANCE_PATH') or \ os.path.join(sys.prefix, 'var', 'hepdata-instance') static_folder = os.getenv(env_prefix + '_STATIC_FOLDER') or \ os.path.join(instance_path, 'static') create_api = create_app_factory( 'hepdata', config_loader=conf_loader, extension_entry_points=['invenio_base.api_apps'], blueprint_entry_points=['invenio_base.api_blueprints'], instance_path=instance_path, ) create_app = create_app_factory( 'hepdata', config_loader=conf_loader, extension_entry_points=['invenio_base.apps'], blueprint_entry_points=['invenio_base.blueprints'], wsgi_factory=create_wsgi_factory({'/api': create_api}), instance_path=instance_path, static_folder=static_folder, )
Joergen/zamboni
apps/comm/models.py
Python
bsd-3-clause
6,495
0.001232
from datetime import datetime from django.db import models from uuidfield.fields import UUIDField from access import acl import amo.models from translations.fields import save_signal from mkt.constants import comm as const class CommunicationPermissionModel(amo.models.ModelBase): # Read permissions imply write permissions as well. read_permission_public = models.BooleanField() read_permission_developer = models.BooleanField() read_permission_reviewer = models.BooleanField() read_permission_senior_reviewer = models.BooleanField() read_permission_mozilla_contact = models.BooleanField() read_permission_staff = models.BooleanField() class Meta: abstract = True def check_acls(user, obj, acl_type): """Check ACLs.""" if acl_type == 'moz_contact': try: return user.email in obj.addon.get_mozilla_contacts() except AttributeError: return user.email in obj.thread.addon.get_mozilla_contacts() if acl_type == 'admin': return acl.action_allowed_user(user, 'Admin', '%') elif acl_type == 'reviewer': return acl.action_allowed_user(user, 'Apps', 'Review') elif acl_type == 'senior_reviewer': return acl.action_allowed_user(user, 'Apps', 'ReviewEscalated') else: raise Exception('Invalid ACL lookup.') return False def check_acls_comm_obj(obj, profile): """Cross-reference ACLs and Note/Thread permissions.""" if obj.read_permission_public: return True if (obj.read_permission_reviewer and check_acls(profile, obj, 'reviewer')): return True if (obj.read_permission_senior_reviewer and check_acls(profile, obj, 'senior_reviewer')): return True if (obj.read_permission_mozilla_contact and check_acls(profile, obj, 'moz_contact')): return True if (obj.read_permission_staff and check_acls(profile, obj, 'admin')): return True return False def user_has_perm_thread(thread, profile): """ Check if the user has read/write permissions on the given thread. Developers of the add-on used in the thread, users in the CC list, and users who post to the thread are allowed to access the object. Moreover, other object permissions are also checked agaisnt the ACLs of the user. """ user_post = CommunicationNote.objects.filter( author=profile, thread=thread) user_c
c = CommunicationThreadCC.objects.filter( user=profile, thread=thread) if user_post.exists() or user_cc.exists(): return True # User is a developer of the add-on and has the permission to read. user_is_author = profile.addons.filter(pk=thread.addon_id) if thread.read_permission_developer and user_is_author.exists(): return True return check_acls_comm_obj(thread, profile) def user_has_perm_note(note, profile): """ C
heck if the user has read/write permissions on the given note. Developers of the add-on used in the note, users in the CC list, and users who post to the thread are allowed to access the object. Moreover, other object permissions are also checked agaisnt the ACLs of the user. """ if note.author.id == profile.id: # Let the dude access his own note. return True # User is a developer of the add-on and has the permission to read. user_is_author = profile.addons.filter(pk=note.thread.addon_id) if note.read_permission_developer and user_is_author.exists(): return True return check_acls_comm_obj(note, profile) class CommunicationThread(CommunicationPermissionModel): addon = models.ForeignKey('addons.Addon', related_name='threads') version = models.ForeignKey('versions.Version', related_name='threads', null=True) class Meta: db_table = 'comm_threads' class CommunicationThreadCC(amo.models.ModelBase): thread = models.ForeignKey(CommunicationThread, related_name='thread_cc') user = models.ForeignKey('users.UserProfile', related_name='comm_thread_cc') class Meta: db_table = 'comm_thread_cc' unique_together = ('user', 'thread',) class CommunicationNoteManager(models.Manager): def with_perms(self, profile, thread): ids = [note.id for note in self.filter(thread=thread) if user_has_perm_note(note, profile)] return self.filter(id__in=ids) class CommunicationNote(CommunicationPermissionModel): thread = models.ForeignKey(CommunicationThread, related_name='notes') author = models.ForeignKey('users.UserProfile', related_name='comm_notes') note_type = models.IntegerField() body = models.TextField(null=True) reply_to = models.ForeignKey('self', related_name='replies', null=True, blank=True) read_by_users = models.ManyToManyField('users.UserProfile', through='CommunicationNoteRead') objects = CommunicationNoteManager() class Meta: db_table = 'comm_thread_notes' def save(self, *args, **kwargs): super(CommunicationNote, self).save(*args, **kwargs) self.thread.modified = self.created self.thread.save() class CommunicationNoteRead(models.Model): user = models.ForeignKey('users.UserProfile') note = models.ForeignKey(CommunicationNote) class Meta: db_table = 'comm_notes_read' class CommunicationThreadToken(amo.models.ModelBase): thread = models.ForeignKey(CommunicationThread, related_name='token') user = models.ForeignKey('users.UserProfile', related_name='comm_thread_tokens') uuid = UUIDField(unique=True, auto=True) use_count = models.IntegerField(default=0, help_text='Stores the number of times the token has been used') class Meta: db_table = 'comm_thread_tokens' unique_together = ('thread', 'user') def is_valid(self): # TODO: Confirm the expiration and max use count values. timedelta = datetime.now() - self.modified return (timedelta.days <= const.THREAD_TOKEN_EXPIRY and self.use_count < const.MAX_TOKEN_USE_COUNT) def reset_uuid(self): # Generate a new UUID. self.uuid = UUIDField()._create_uuid().hex models.signals.pre_save.connect(save_signal, sender=CommunicationNote, dispatch_uid='comm_thread_notes_translations')
ericxlive/abstract-data-types
abstract-data-types/adt_queue.py
Python
mit
2,361
0.002541
""" This class represents a Queue Node to store values and also links others Nodes with values.""" class Node: """ It starts with a value at all times. A note can not be created without a value associated. """ def __init__(self, value):
self.value = value self.next = None """ This class represents a Queue to store values. The Queue starts with a node called head. Every single element is going to be added after the last node entered.""" class Queue: """ The Queue is created with it's size zero and the head element head is None (undefined).""" def __init__(self): self.head = None self.size = 0 """ It adds a new value. The va
lue is going to be added always after the last value added. If the Queue has no elements, the value added is going to be the head/head and also the last/tail value.""" def enqueue(self, value): if (self.head is None): self.head = Node(value) self.size += 1 else: pointer = self.head while(pointer.next is not None): pointer = pointer.next pointer.next = Node(value) """ This routine removes and also returns the first element. After the remotion of the element, the head is updated and it turns to be the next element of the queue (it's next element). If there are no more elements other than the head, the Queue turns to be empty. If there are no elements at all, there will be no remotion or return.""" def dequeue(self): if (self.head is not None): removed = self.head.value self.head = self.head.next self.size -= 1 return removed """ It shows all the Queue elements one by one in a correct order. """ def display(self): pointer = self.head while (pointer is not None): print pointer.value pointer = pointer.next """ It returns the head node value, but it doesn't remove the node. """ def head(self): return self.head.value """ It verifies whether or not the Queue has elements. If the Queue doesn't have any elements, the head or head element is going to be None. """ def is_empty(self): return self.head is None
updatengine/updatengine-server
adminactions/__init__.py
Python
gpl-2.0
1,648
0.002427
NAME = 'django-adminactions' VERSION = __version__ = (0, 4, 0, 'final', 0) __author__ = 'sax' import subprocess import datetime import os def get_version(version=None): """Derives a PEP386-compliant version number from VERSION.""" if version is None: version = VERSION assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') parts = 2 if version[2] == 0 else 3 main = '.'.join(str(x) for x in version[:parts]) sub = '' if version[3] == 'alpha' and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: sub = '.a%s' % git_changeset elif version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[version[3]] + str(version[4]) return main + sub def get_git_changeset(): """Returns a numeric identifier of the latest git changeset. The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. This value isn't guaranteed to be unique, but collisions are very unlikely, so it's sufficient for generating the development version numbers. """ repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) git_log = subprocess.Pop
en('git log --pretty=form
at:%ct --quiet -1 HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=repo_dir, universal_newlines=True) timestamp = git_log.communicate()[0] try: timestamp = datetime.datetime.utcfromtimestamp(int(timestamp)) except ValueError: return None return timestamp.strftime('%Y%m%d%H%M%S')
Donkyhotay/MoonPy
zope/app/apidoc/browser/skin.py
Python
gpl-3.0
1,074
0.003724
############################################################################## # # Copyright (c) 2004 Zope Corporation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy
of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, A
ND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """`APIdoc` skin. $Id$ """ __docformat__ = "reStructuredText" from zope.publisher.interfaces.browser import IBrowserRequest from zope.publisher.interfaces.browser import IDefaultBrowserLayer class apidoc(IBrowserRequest): """The `apidoc` layer.""" class APIDOC(apidoc, IDefaultBrowserLayer): """The `APIDOC` skin.""" # BBB 2006/02/18, to be removed after 12 months import zope.app.skins zope.app.skins.set('APIDOC', APIDOC)
tsg-/pyeclib
pyeclib/__init__.py
Python
bsd-2-clause
1,348
0
# Copyright (c) 2013, Kevin Greenan (kmgreen2@gmail.com) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. THIS SOFTWARE IS # PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN # NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUD
ING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
plaidml/plaidml
mlperf/backend_tflite.py
Python
apache-2.0
1,874
0.001601
""" tflite backend (https://github.com/tensorflow/tensorflow/lite) """ # pylint: disable=unused-argument,missing-docstring,u
seless-super-delegation from threading import Lock try: # try dedicated tflite package first import tflite_runtime import tflite_runtime.interpreter as tflite _version = tflite_runtime.__version__ _git_version = tflite_runtime.__git_version__ except: # fall back to tflite bundled in tensorflow import tensorflow as tf from tensorflow.lite.python import interpreter as tflite _ver
sion = tf.__version__ _git_version = tf.__git_version__ import backend class BackendTflite(backend.Backend): def __init__(self): super(BackendTflite, self).__init__() self.sess = None self.lock = Lock() def version(self): return _version + "/" + _git_version def name(self): return "tflite" def image_format(self): # tflite is always NHWC return "NHWC" def load(self, model_path, inputs=None, outputs=None): self.sess = tflite.Interpreter(model_path=model_path) self.sess.allocate_tensors() # keep input/output name to index mapping self.input2index = {i["name"]: i["index"] for i in self.sess.get_input_details()} self.output2index = {i["name"]: i["index"] for i in self.sess.get_output_details()} # keep input/output names self.inputs = list(self.input2index.keys()) self.outputs = list(self.output2index.keys()) return self def predict(self, feed): self.lock.acquire() # set inputs for k, v in self.input2index.items(): self.sess.set_tensor(v, feed[k]) self.sess.invoke() # get results res = [self.sess.get_tensor(v) for _, v in self.output2index.items()] self.lock.release() return res
lidavidm/mathics-heroku
venv/lib/python2.7/site-packages/sympy/core/tests/test_sympify.py
Python
gpl-3.0
14,157
0.000212
from __future__ import with_statement from sympy import Symbol, exp, Integer, Float, sin, cos, log, Poly, Lambda, \ Function, I, S, sqrt, srepr, Rational, Tuple, Matrix, Interval from sympy.abc import x, y from sympy.core.sympify import sympify, _sympify, SympifyError, kernS from sympy.core.decorators import _sympifyit from sympy.utilities.pytest import XFAIL, raises from sympy.utilities.decorator import conserve_mpmath_dps from sympy.geometry import Point, Line from sympy.functions.combinatorial.factorials import factorial, factorial2 from sympy.abc import _clash, _clash1, _clash2 from sympy.core.compatibility import HAS_GMPY from sympy import mpmath def test_439(): v = sympify("exp(x)") assert v == exp(x) assert type(v) == type(exp(x)) assert str(type(v)) == str(type(exp(x))) def test_sympify1(): assert sympify("x") == Symbol("x") assert sympify(" x") == Symbol("x") assert sympify(" x ") == Symbol("x") # 1778 n1 = Rational(1, 2) assert sympify('--.5') == n1 assert sympify('-1/2') == -n1 assert sympify('-+--.5') == -n1 assert sympify('-.[3]') == Rational(-1, 3) assert sympify('.[3]') == Rational(1, 3) assert sympify('+.[3]') == Rational(1, 3) assert sympify('+0.[3]*10**-2') == Rational(1, 300) assert sympify('.[052631578947368421]') == Rational(1, 19) assert sympify('.0[526315789473684210]') == Rational(1, 19) assert sympify('.034[56]') == Rational(1711, 49500) # options to make reals into rationals assert sympify('1.22[345]', rational=True) == \ 1 + Rational(22, 100) + Rational(345, 99900) assert sympify('2/2.6', rational=True) == Rational(10, 13) assert sympify('2.6/2', rational=True) == Rational(13, 10) assert sympify('2.6e2/17', rational=True) == Rational(260, 17) assert sympify('2.6e+2/17', rational=True) == Rational(260, 17) assert sympify('2.6e-2/17', rational=True) == Rational(26, 17000) assert sympify('2.1+3/4', rational=True) == \ Rational(21, 10) + Rational(3, 4) assert sympify('2.234456', rational=True) == Rational(279307, 125000) assert sympify('2.234456e23', rational=True) == 223445600000000000000000 assert sympify('2.234456e-23', rational=True) == \ Rational(279307, 12500000000000000000000000000) assert sympify('-2.234456e-23', rational=True) == \ Rational(-279307, 12500000000000000000000000000) assert sympify('12345678901/17', rational=True) == \ Rational(12345678901, 17) assert sympify('1/.3 + x', rational=True) == Rational(10, 3) + x # make sure longs in fractions work assert sympify('222222222222/11111111111') == \ Rational(222222222222, 11111111111) # ... even if they come from repetend notation assert sympify('1/.2[123456789012]') == Rational(333333333333, 70781892967) # ... or from high precision reals assert sympify('.1234567890123456', rational=True) == \ Rational(19290123283179, 156250000000000) def test_sympify_Fraction(): try: import fractions except ImportError: pass else: value = sympify(fractions.Fraction(101, 127)) assert value == Rational(101, 127) and type(value) is Rational def test_sympify_gmpy(): if HAS_GMPY: if HAS_GMPY == 2: import gmpy2 as gmpy elif HAS_GMPY == 1: import gmpy value = sympify(gmpy.mpz(1000001)) assert value == Integer(1000001) and type(value) is Integer value = sympify(gmpy.mpq(101, 127)) assert value == Rational(101, 127) and type(value) is Rational @conserve_mpmath_dps def test_sympify_mpmath(): value = sympify(mpmath.mpf(1.0)) assert value == Float(1.0) and type(value) is Float mpmath.mp.dps = 12 assert sympify( mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-12")) is True assert sympify( mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-13")) is False mpmath.mp.dps = 6 assert sympify( mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-5")) is True assert sympify( mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-6")) is False assert sympify(mpmath.mpc(1.0 + 2.0j)) == Float(1.0) + Float(2.0)*I def test_sympify2(): class A: def _sympy_(self): return Symbol("x")**3 a = A() assert _sympify(a) == x**3 assert sympify(a) == x**3 assert a == x**3 def test_sympify3(): assert sympify("x**3") == x**3 assert sympify("x^3") == x**3 assert sympify("1/2") == Integer(1)/2 raises(SympifyError, lambda: _sympify('x**3')) raises(SympifyError, lambda: _sympify('1/2')) def test_sympify_keywords(): raises(SympifyError, lambda: sympify('if')) raises(SympifyError, lambda: sympify('for')) raises(SympifyError, lambda: sympify('while')) raises(SympifyError, lambda: sympify('lambda')) def test_sympify_float(): assert sympify("1e-64") != 0 assert sympify("1e-20000") != 0 def test_sympify_bool(): """Test that sympify accepts boolean values and that output leaves them unchanged""" assert sympify(True) is True assert sympify(False) is False def test_sympyify_iterables(): ans = [Rational(3, 10), Rational(1, 5)] asser
t sympify(['.3', '.2'], rational=True) == ans assert sympify(set(['.3', '.2']), rational=True) == set(ans) assert sympify(tuple(['.3', '.2']), rational=True) == Tuple(*ans) assert sympify(dict(x=0, y=1)) == {x: 0, y: 1} assert sympify(['1', '2', ['3', '4']]) == [S(1), S(2), [S(3), S(4)]] def test_sympify4(): class A:
def _sympy_(self): return Symbol("x") a = A() assert _sympify(a)**3 == x**3 assert sympify(a)**3 == x**3 assert a == x def test_sympify_text(): assert sympify('some') == Symbol('some') assert sympify('core') == Symbol('core') assert sympify('True') is True assert sympify('False') is False assert sympify('Poly') == Poly assert sympify('sin') == sin def test_sympify_function(): assert sympify('factor(x**2-1, x)') == -(1 - x)*(x + 1) assert sympify('sin(pi/2)*cos(pi)') == -Integer(1) def test_sympify_poly(): p = Poly(x**2 + x + 1, x) assert _sympify(p) is p assert sympify(p) is p def test_sympify_factorial(): assert sympify('x!') == factorial(x) assert sympify('(x+1)!') == factorial(x + 1) assert sympify('(1 + y*(x + 1))!') == factorial(1 + y*(x + 1)) assert sympify('(1 + y*(x + 1)!)^2') == (1 + y*factorial(x + 1))**2 assert sympify('y*x!') == y*factorial(x) assert sympify('x!!') == factorial2(x) assert sympify('(x+1)!!') == factorial2(x + 1) assert sympify('(1 + y*(x + 1))!!') == factorial2(1 + y*(x + 1)) assert sympify('(1 + y*(x + 1)!!)^2') == (1 + y*factorial2(x + 1))**2 assert sympify('y*x!!') == y*factorial2(x) assert sympify('factorial2(x)!') == factorial(factorial2(x)) raises(SympifyError, lambda: sympify("+!!")) raises(SympifyError, lambda: sympify(")!!")) raises(SympifyError, lambda: sympify("!")) raises(SympifyError, lambda: sympify("(!)")) raises(SympifyError, lambda: sympify("x!!!")) def test_sage(): # how to effectivelly test for the _sage_() method without having SAGE # installed? assert hasattr(x, "_sage_") assert hasattr(Integer(3), "_sage_") assert hasattr(sin(x), "_sage_") assert hasattr(cos(x), "_sage_") assert hasattr(x**2, "_sage_") assert hasattr(x + y, "_sage_") assert hasattr(exp(x), "_sage_") assert hasattr(log(x), "_sage_") def test_bug496(): assert sympify("a_") == Symbol("a_") assert sympify("_a") == Symbol("_a") @XFAIL def test_lambda(): x = Symbol('x') assert sympify('lambda: 1') == Lambda((), 1) assert sympify('lambda x: 2*x') == Lambda(x, 2*x) assert sympify('lambda x, y: 2*x+y') == Lambda([x, y], 2*x + y) def test_lambda_raises(): with raises(SympifyError): _sympify('lambda: 1') def test_sympify_raises(): raises(SympifyError, lambda: sympify("fx)")) def test__sympify(): x = Symbol('x') f = Function('f') # positive
JasonKessler/scattertext
scattertext/viz/__init__.py
Python
apache-2.0
329
0.009119
from .Scatterplot
Structure import ScatterplotStructure from .BasicHTMLFromScatterplotStructure import BasicHTMLFromScatterplotStructure from scattertext.viz.PairPlotFromScattertextStructure import PairPlotFromScatterplotStructure from .VizDataAdapter import VizDataAdapter from .HTMLSemioticSquareViz import
HTMLSemioticSquareViz
colaftc/webtool
top/api/rest/PictureIsreferencedGetRequest.py
Python
mit
318
0.028302
''' Created by auto_sdk on 2013.11.26 ''' from top.api.base import RestApi class PictureIsreferencedGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80): RestApi
.__init__(self,domain, port) self.picture_id = None def getapiname(self): return 'taobao.picture.isreferenced.get'
pp-mo/iris
docs/iris/example_code/Meteorology/__init__.py
Python
lgpl-3.0
78
0
""" Meteorolo
gy visualisation examples ===============
=================== """
wilas/lab-ci
samples/py_garden/py_simple_tdd/test_nose_flat_play.py
Python
apache-2.0
137
0.021898
def test_assert(): assert 'soup' == 'soup' def test_pass(): pass def test_fail(): assert
False test_fail.wi
ll_fail = True
brigittebigi/proceed
proceed/src/TagPDF/name.py
Python
gpl-3.0
3,002
0.003331
#! /usr/bin/env python # -*- coding: UTF-8 -*- # --------------------------------------------------------------------------- # ___ __ ___ ___ ____ ____ __ # | \ | \ | | / | | | \ Automatic # |__/ |__/ | | | |__ |__ | | Conference # | |\_ | | | | | | | Proceedings # | | \ |___| \___ |___ |___ |__/ Generator # ========================================================== # # http://www.lpl-aix.fr/~bigi/ # # --------------------------------------------------------------------------- # developed at: # # Laboratoire Parole et Langage # # Copyright (C) 2013-2014 Brigitte Bigi # # Use of this software is governed by the GPL, v3 # This banner notice must not be removed # --------------------------------------------------------------------------- # # Proceed is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Proceed is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Proceed. If not, see <http://www.gnu.org/licenses/>. # # --------------------------------------------------------------------------- __docformat__ = "epytext" # --------------------------------------------------------------------------- import sys import os import random import tempfile from datetime import date # --------------------------------------------------------------------------- class GenName(): """ @authors: Brigitte Bigi @contact: brigitte.bigi@gmail.com @license: GPL @summary: A class to generates a random file name of a non-existing file. """ def __init__(self,extens
ion=""): self.name = "/" while (os.path.exists(self.name)==True): self.set_name(extension)
def set_name(self, extension): """ Set a new file name. """ # random float value randval = str(int(random.random()*10000)) # process pid pid = str(os.getpid()) # today's date today = str(date.today()) # filename filename = "tmp_"+today+"_"+pid+"_"+randval # final file name is path/filename self.name = filename + extension def get_name(self): """ Get the current file name. """ return str(self.name) # --------------------------------------------------------------------------- if __name__ == "__main__": print GenName().get_name() # ---------------------------------------------------------------------------
mwgit00/poz
poz.py
Python
mit
3,487
0.000287
""" POZ Development Application. """ import numpy as np # import cv2 import pozutil as pu import test_util as tpu def perspective_test(_y, _z, _ele, _azi): print "--------------------------------------" print "Perspective Transform tests" print cam = pu.CameraHelper() # some landmarks in a 3x3 grid pattern p0 = np.float32([-1., _y - 1.0, _z]) p1 = np.float32([0., _y - 1.0, _z]) p2 = np.float32([1., _y - 1.0, _z]) p3 = np.float32([-1., _y + 1.0, _z]) p4 = np.float32([0., _y + 1.0, _z]) p5 = np.float32([1., _y + 1.0, _z]) p6 = np.float32([-1., _y, _z]) p7 = np
.float32([0, _y, _z]) p8 = np.float32([1., _y, _z]) # 3x3 grid array ppp = np.array([p0, p1, p2, p3, p4, p5, p6, p7, p8]) print "Here are some landmarks in world" print ppp puv_acc = [] quv_acc = [] for vp in ppp: # original view of landmarks u, v = cam.project_xyz_to_uv(vp)
puv_acc.append(np.float32([u, v])) # rotated view of landmarks xyz_r = pu.calc_xyz_after_rotation_deg(vp, _ele, _azi, 0) u, v = cam.project_xyz_to_uv(xyz_r) quv_acc.append(np.float32([u, v])) puv = np.array(puv_acc) quv = np.array(quv_acc) # 4-pt "diamond" array quv4 = np.array([quv[1], quv[4], quv[6], quv[8]]) puv4 = np.array([puv[1], puv[4], puv[6], puv[8]]) print print "Landmark img coords before rotate:" print puv print "Landmark img coords after rotate:" print quv print quv4 print # h, _ = cv2.findHomography(puv, quv) # hh = cv2.getPerspectiveTransform(puv4, quv4) # print h # print hh # perspectiveTransform needs an extra dimension puv1 = np.expand_dims(puv, axis=0) # print "Test perspectiveTransform with findHomography matrix:" # xpersp = cv2.perspectiveTransform(puv1, h) # print xpersp # print "Test perspectiveTransform with getPerspectiveTransform matrix:" # xpersp = cv2.perspectiveTransform(puv1, hh) # print xpersp # print if __name__ == "__main__": # robot always knows the Y and Elevation of its camera # (arbitrary assignments for testing) known_cam_y = -3. known_cam_el = 0.0 tests = [(1., 1., tpu.lm_vis_1_1), (7., 6., tpu.lm_vis_7_6)] print "--------------------------------------" print "Landmark Test" print test_index = 0 vis_map = tests[test_index][2] # robot does not know its (X, Z) position # it will have to solve for it cam_x = tests[test_index][0] cam_z = tests[test_index][1] print "Known (X,Z): ", (cam_x, cam_z) for key in sorted(vis_map.keys()): cam_azim = vis_map[key].az + 0. # change offset for testing cam_elev = vis_map[key].el + known_cam_el print "-----------" # print "Known Camera Elev =", cam_elev xyz = [cam_x, known_cam_y, cam_z] angs = [cam_azim, cam_elev] print "Landmark {:s}. Camera Azim = {:8.2f}".format(key, cam_azim) lm1 = tpu.mark1[key] f, x, z, a = tpu.landmark_test(lm1, tpu.mark2[key], xyz, angs) print "Robot is at: {:6.3f},{:6.3f},{:20.14f}".format(x, z, a) f, x, z, a = tpu.landmark_test(lm1, tpu.mark3[key], xyz, angs) print "Robot is at: {:6.3f},{:6.3f},{:20.14f}".format(x, z, a) tpu.pnp_test(key, xyz, angs)
Yelp/pysensu-yelp
setup.py
Python
apache-2.0
480
0
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='pysen
su-yelp', version='0.4.4', provides=['pysensu_yelp'], description='Emits Yelp-flavored Sensu events to a Sensu Client',
url='https://github.com/Yelp/pysensu-yelp', author='Yelp Operations Team', author_email='operations@yelp.com', packages=find_packages(exclude=['tests']), install_requires=['six'], license='Copyright Yelp 2014, all rights reserved', )
shendo/peerz
peerz/messaging/base.py
Python
gpl-3.0
2,720
0.002941
# Peerz - P2P python library using ZeroMQ sockets and gevent # Copyright (C) 2014-2015 Steve Henderson # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import time from transitions import Machine class MessageState(object): states = ['initialised', 'waiting response', 'complete', 'timedout'] transitions = [ {'trigger': 'query', 'source': 'initialised', 'dest': 'waiting response', 'before': '_update', 'after': '_send_query'}, {'trigger': 'response', 'source': 'waiting response', 'dest': 'complete', 'before': '_update', 'after': '_completed'}, {'trigger': 'timeout', 'source': '*', 'dest': 'timedout', 'before': '_update', 'after': '_completed', }, ] def __init__(self, engine, txid, msg, callback=None, max_duration=5000, max_concurrency=3): self.engine = engine self.callback = callback self.machine = Machine(model=self, states=self.states, transitions=self.transitions, initial='initialised') self.start = self.last_change = time.time() * 1000 self.max_duration = max_duration self.max_concurrency = max_concurrency self.txid = txid self.times = {} self.parse_message(msg) self.query() def query(self): pass def parse_message(self, msg): self.val = msg.pop(0) def is_complete(self): return self.state in ['complete', 'timedout'] def pack_request(self): return None @staticmethod def unpack_response(content): return None @staticmethod def pack_response(content): return None def _update(self): now = time.time() * 1000 self.tim
es.setdefault(self.state, 0.0) self.times[self.state] += (now - self.last_change) self.last_change = now def duration(self): return time.time() * 1000 - self.start def laten
cy(self): return self.times.setdefault('waiting response', 0.0) def _send_query(self): pass def _completed(self): pass
rubiconjosh/aoc-2015
day4-part2.py
Python
mit
345
0
import hashlib puzzle_input = 'iwrupvqb' cu
rrent = 0 done = False while not done: combined_input = puzzle_input + str(current) solution = hashlib.md5(combined_input.encode()) solution = str(solution.hexdigest()) print(solution) if solution.startswith('000000'): done = True print(current) c
urrent += 1
glemaitre/scikit-learn
sklearn/tests/test_calibration.py
Python
bsd-3-clause
23,376
0
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # License: BSD 3 clause import pytest import numpy as np from numpy.testing import assert_allclose from scipy import sparse from sklearn.base import BaseEstimator from sklearn.dummy import DummyClassifier from sklearn.model_selection import LeaveOneOut, train_test_split from sklearn.utils._testing import (assert_array_almost_equal, assert_almost_equal, assert_array_equal, ignore_warnings) from sklearn.utils.extmath import softmax from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification, make_blobs from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import KFold, cross_val_predict from sklearn.naive_bayes import MultinomialNB from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from sklearn.svm import LinearSVC from sklearn.isotonic import IsotonicRegression from sklearn.feature_extraction import DictVectorizer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.metrics import brier_score_loss from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration from sklearn.calibration import calibration_curve @pytest.fixture(scope="module") def data(): X, y = make_classification( n_samples=200, n_features=6, random_state=42 ) return X, y @pytest.mark.parametrize('method', ['sigmoid', 'isotonic']) @pytest.mark.parametrize('ensemble', [True, False]) def test_calibration(data, method, ensemble): # Test calibration objects with isotonic and sigmoid n_samples = 100 X, y = data sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) X -= X.min() # MultinomialNB only allows positive X # split train and test X_train, y_train, sw_train = \ X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_test, y_test = X[n_samples:], y[n_samples:] # Naive-Bayes clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train) prob_pos_clf = clf.predict_proba(X_test)[:, 1] cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble) with pytest.raises(ValueError): cal_clf.fit(X, y) # Naive Bayes with calibration for this_X_train, this_X_test in [(X_train, X_test), (sparse.csr_matrix(X_train), sparse.csr_matrix(X_test))]: cal_clf = CalibratedClassifierCV( clf, method=method, cv=5, ensemble=ensemble ) # Note that this fit overwrites the fit on the entire training # set cal_clf.fit(this_X_train, y_train, sample_weight=sw_train) prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1] # Check that brier score has improved after calibration assert (brier_score_loss(y_test, prob_pos_clf) > brier_score_loss(y_test, prob_pos_cal_clf)) # Check invariance against relabeling [0, 1] -> [1, 2] cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train) prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled) # Check invariance against relabeling [0, 1] -> [-1, 1] cal_clf.fit(this_X_train,
2 * y_train - 1, sample_weight=sw_train) prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] assert_array_almost_equal(prob_p
os_cal_clf, prob_pos_cal_clf_relabeled) # Check invariance against relabeling [0, 1] -> [1, 0] cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train) prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1] if method == "sigmoid": assert_array_almost_equal(prob_pos_cal_clf, 1 - prob_pos_cal_clf_relabeled) else: # Isotonic calibration is not invariant against relabeling # but should improve in both cases assert (brier_score_loss(y_test, prob_pos_clf) > brier_score_loss((y_test + 1) % 2, prob_pos_cal_clf_relabeled)) @pytest.mark.parametrize('ensemble', [True, False]) def test_calibration_bad_method(data, ensemble): # Check only "isotonic" and "sigmoid" are accepted as methods X, y = data clf = LinearSVC() clf_invalid_method = CalibratedClassifierCV( clf, method="foo", ensemble=ensemble ) with pytest.raises(ValueError): clf_invalid_method.fit(X, y) @pytest.mark.parametrize('ensemble', [True, False]) def test_calibration_regressor(data, ensemble): # `base-estimator` should provide either decision_function or # predict_proba (most regressors, for instance, should fail) X, y = data clf_base_regressor = \ CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble) with pytest.raises(RuntimeError): clf_base_regressor.fit(X, y) def test_calibration_default_estimator(data): # Check base_estimator default is LinearSVC X, y = data calib_clf = CalibratedClassifierCV(cv=2) calib_clf.fit(X, y) base_est = calib_clf.calibrated_classifiers_[0].base_estimator assert isinstance(base_est, LinearSVC) @pytest.mark.parametrize('ensemble', [True, False]) def test_calibration_cv_splitter(data, ensemble): # Check when `cv` is a CV splitter X, y = data splits = 5 kfold = KFold(n_splits=splits) calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble) assert isinstance(calib_clf.cv, KFold) assert calib_clf.cv.n_splits == splits calib_clf.fit(X, y) expected_n_clf = splits if ensemble else 1 assert len(calib_clf.calibrated_classifiers_) == expected_n_clf @pytest.mark.parametrize('method', ['sigmoid', 'isotonic']) @pytest.mark.parametrize('ensemble', [True, False]) def test_sample_weight(data, method, ensemble): n_samples = 100 X, y = data sample_weight = np.random.RandomState(seed=42).uniform(size=len(y)) X_train, y_train, sw_train = \ X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_test = X[n_samples:] base_estimator = LinearSVC(random_state=42) calibrated_clf = CalibratedClassifierCV( base_estimator, method=method, ensemble=ensemble ) calibrated_clf.fit(X_train, y_train, sample_weight=sw_train) probs_with_sw = calibrated_clf.predict_proba(X_test) # As the weights are used for the calibration, they should still yield # different predictions calibrated_clf.fit(X_train, y_train) probs_without_sw = calibrated_clf.predict_proba(X_test) diff = np.linalg.norm(probs_with_sw - probs_without_sw) assert diff > 0.1 @pytest.mark.parametrize('method', ['sigmoid', 'isotonic']) @pytest.mark.parametrize('ensemble', [True, False]) def test_parallel_execution(data, method, ensemble): """Test parallel calibration""" X, y = data X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) base_estimator = LinearSVC(random_state=42) cal_clf_parallel = CalibratedClassifierCV( base_estimator, method=method, n_jobs=2, ensemble=ensemble ) cal_clf_parallel.fit(X_train, y_train) probs_parallel = cal_clf_parallel.predict_proba(X_test) cal_clf_sequential = CalibratedClassifierCV( base_estimator, method=method, n_jobs=1, ensemble=ensemble ) cal_clf_sequential.fit(X_train, y_train) probs_sequential = cal_clf_sequential.predict_proba(X_test) assert_allclose(probs_parallel, probs_sequential) @pytest.mark.parametrize('method', ['sigmoid', 'isotonic']) @pytest.mark.parametrize('ensemble', [True, False]) # increase the number of RNG seeds to assess the statistical stability of this # test: @pytest.mark.parametrize('seed', range(2)) def test_calibration_multiclass(method, ensemble,
cscott/wikiserver
mwlib/serve.py
Python
gpl-2.0
19,146
0.004805
#! /usr/bin/env python """WSGI server interface to mw-render and mw-zip/mw-post""" import os import re import shutil import signal import StringIO import subprocess import time import urllib2 try: from hashlib import md5 except ImportError: from md5 import md5 try: import json except ImportError: import simplejson as json from mwlib import filequeue, log, podclient, utils, wsgi, _version # ============================================================================== log = log.Log('mwlib.serve') # ============================================================================== def no_job_queue(job_type, collection_id, args): """Just spawn a new process for the given job""" if os.name == 'nt': kwargs = {} else: kwargs = {'close_fds': True} try: log.info('queueing %r' % args) subprocess.Popen(args, **kwargs) except OSError, exc: raise RuntimeError('Could not execute command %r: %s' % ( args[0], exc, )) # ============================================================================== collection_id_rex = re.compile(r'^[a-z0-9]{16}$') def make_collection_id(data): sio = StringIO.StringIO() for key in ( _version.version, 'metabook', 'base_url', 'script_extension', 'template_blacklist', 'template_exclusion_category', 'login_credentials', ): sio.write(repr(data.get(key))) return md5(sio.getvalue()).hexdigest()[:16] # ============================================================================== def json_response(fn): """Decorator wrapping result of decorated function in JSON response""" def wrapper(*args, **kwargs): result = fn(*args, **kwargs) if isinstance(result, wsgi.Response): return result return wsgi.Response( content=json.dumps(result), headers={'Content-Type': 'application/json'}, ) return wrapper # ============================================================================== class Application(wsgi.Application): metabook_filename = 'metabook.json' error_filename = 'errors' status_filename = 'status' output_filename = 'output' pid_filename = 'pid' zip_filename = 'collection.zip' mwpostlog_filename = 'mw-post.log' mwziplog_filename = 'mw-zip.log' mw
renderlog_filename = 'mw-render.log' def __init__(self, cache_dir, mwrender_cmd, mwrender_logfile, mwzip_cmd, mwzip_logfile, mwpost_cmd, mwpost_logfile, queue_dir, default_writer='rl', report_from_mail=None, report_recipients=None, ): self.cache_dir = utils.ensure_dir(cache_dir) self.mwrender_cmd = mwrender_cmd self.mwrender_logfile = mwre
nder_logfile self.mwzip_cmd = mwzip_cmd self.mwzip_logfile = mwzip_logfile self.mwpost_cmd = mwpost_cmd self.mwpost_logfile = mwpost_logfile if queue_dir: self.queue_job = filequeue.FileJobQueuer(utils.ensure_dir(queue_dir)) else: self.queue_job = no_job_queue self.default_writer = default_writer self.report_from_mail = report_from_mail self.report_recipients = report_recipients def dispatch(self, request): try: command = request.post_data['command'] except KeyError: return self.error_response('no command given') try: method = getattr(self, 'do_%s' % command) except AttributeError: return self.error_response('invalid command %r' % command) try: return method(request.post_data) except Exception, exc: return self.error_response('error executing command %r: %s' % ( command, exc, )) @json_response def error_response(self, error): if isinstance(error, str): error = unicode(error, 'utf-8', 'ignore') elif not isinstance(error, unicode): error = unicode(repr(error), 'ascii') self.send_report_mail('error response', error=error) return {'error': error} def send_report_mail(self, subject, **kwargs): if not (self.report_from_mail and self.report_recipients): return utils.report( system='mwlib.serve', subject=subject, from_email=self.report_from_mail, mail_recipients=self.report_recipients, write_file=False, **kwargs ) def get_collection_dir(self, collection_id): return os.path.join(self.cache_dir, collection_id) def check_collection_id(self, collection_id): if not collection_id or not collection_id_rex.match(collection_id): raise RuntimeError('invalid collection ID %r' % collection_id) collection_dir = self.get_collection_dir(collection_id) if not os.path.exists(collection_dir): raise RuntimeError('no such collection: %r' % collection_id) def new_collection(self, post_data): collection_id = make_collection_id(post_data) collection_dir = self.get_collection_dir(collection_id) if not os.path.isdir(collection_dir): log.info('Creating new collection dir %r' % collection_dir) os.makedirs(collection_dir) return collection_id def get_path(self, collection_id, filename, ext=None): p = os.path.join(self.get_collection_dir(collection_id), filename) if ext is not None: p += '.' + ext[:10] return p @json_response def do_render(self, post_data): metabook_data = post_data.get('metabook') collection_id = post_data.get('collection_id') if not (metabook_data or collection_id): return self.error_response('POST argument metabook or collection_id required') if metabook_data and collection_id: return self.error_response('Specify either metabook or collection_id, not both') try: base_url = post_data['base_url'] writer = post_data.get('writer', self.default_writer) except KeyError, exc: return self.error_response('POST argument required: %s' % exc) writer_options = post_data.get('writer_options', '') template_blacklist = post_data.get('template_blacklist', '') template_exclusion_category = post_data.get('template_exclusion_category', '') login_credentials = post_data.get('login_credentials', '') force_render = bool(post_data.get('force_render')) script_extension = post_data.get('script_extension', '') if not collection_id: collection_id = self.new_collection(post_data) log.info('render %s %s' % (collection_id, writer)) response = { 'collection_id': collection_id, 'writer': writer, 'is_cached': False, } pid_path = self.get_path(collection_id, self.pid_filename, writer) if os.path.exists(pid_path): log.info('mw-render already running for collection %r' % collection_id) return response output_path = self.get_path(collection_id, self.output_filename, writer) if os.path.exists(output_path): if force_render: log.info('removing rendered file %r (forced rendering)' % output_path) utils.safe_unlink(output_path) else: log.info('re-using rendered file %r' % output_path) response['is_cached'] = True return response status_path = self.get_path(collection_id, self.status_filename, writer) if os.path.exists(status_path): if force_render: log.info('removing status file %r (forced rendering)' % status_path) utils.safe_unlink(status_path) else: log.info('status file exists %r' % status_path) return response error_pa
foxbunny/Timetrack
tt.py
Python
gpl-3.0
7,554
0.003574
#!/usr/bin/env python """ Simple-stupid time tracker script ================================= Timetrack opyright (C) 2010, Branko Vukelic <studio@brankovukelic.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys import getopt import os import re import sqlite3 HOME_DIR = os.path.expanduser('~') DEFAULT_FILE = os.path.join(HOME_DIR, 'timesheet.db') PID_RE = re.compile(r'^[A-Za-z]{3}$') def optpair(opts): """ Pair option switches and their own arguments """ optdict = {} for sw, a in opts: optdict[sw] = a return optdict def check_pid(pname): """ Check project name, return true if it is correct """ if PID_RE.match(pname): return True return False def generate_timestamp(): from datetime import datetime timenow = datetime.now() return (datetime.strftime(timenow, '%Y-%m-%d %H:%M:%S'), timenow) def getduration(seconds): seconds = int(seconds) hours = seconds // 3600 seconds = seconds - hours * 3600 minutes = seconds // 60 seconds = seconds - minutes * 60 return (hours, minutes, seconds) def get_pids(connection): """ Get unique PIDs from database """ pids = [] c = connection.cursor() c.execute("SELECT DISTINCT pid FROM timesheet ORDER BY pid ASC;") for pid in c: pids.append(pid[0]) c.close() return pids def get_times(connection, pidfilter): """ Return a dictionary of PIDs with [job, time] pairs """ if pidfilter: pids = [pidfilter] else: pids = get_pids(connection) pid_times = {} for pid in pids: c = connection.cursor() c.execute("SELECT desc, TOTAL(dur) FROM timesheet WHERE pid = ? GROUP BY desc;", (pid,)) results = [] for result in c: results.append(result) pid_times[pid] = results c.close() return pid_times def read_stats(connection, pidfilter): pid_times = get_times(connection, pidfilter) if not pid_times: print "No data in database. Exiting." return True for k in pid_times.keys(): print "" print "==========================" print "PID: %s" % k print "==========================" print "" for j in pid_times[k]: print "Job: %s" % j[0] print "Time: %02d:%02d:%02d" % getduration(j[1]) print "" print "==========================" print "" def export_tsv(connection, filename, pidfilter): pid_times = get_times(connection, pidfilter) if not pid_times: print "No data in database. Exiting." return True f = open(filename, 'w') # Write header f.write('PID\tJob\tTime\n') for k in pid_times.keys(): for j in pid_times[k]: f.write('%s\t%s\t%s\n' % (k, j[0], j[1])) f.close() def clean_string(s): """ Escapes characters in a string for SQL """ return s.replace(';', '\\;').replace('\'', '\\\'') def add_data(connection, pidfilter): """ Gives user a prompt and writes data to the fhandle file """ import readline print "Press Ctrl+C to exit." try: while True: pid = pidfilter while not check_pid(pid): pid = raw_input("PID: ") if not check_pid(pid): print "'%s' is not a valid pid, please use a 3 letter sequence" % pid print "Project ID is %s" % pid desc = raw_input("Job: ") desc = clean_string(desc) if pid and desc: timestamp, starttime = generate_timestamp() print "Timer started at %s" % timestamp raw_input("Press Enter to stop the timer or Ctrl+C to abort") endtimestamp, endtime = generate_timestamp() print "Timer stopped at %s" % endtimestamp delta = endtime - starttime dsecs = delta.seconds print "Total duration was %s seconds" % dsecs args = (timestamp, pid, desc, dsecs) c = connection.cursor() try: c.execute("INSERT INTO timesheet (timestamp, pid, desc, dur) VALUES (?, ?, ?, ?)", args) except: connection.rollback() print "DB error: Data was not written" raise else: connection.commit() c.close() print "\n" except KeyboardInterrupt: connection.rollback() def usage(): print """Timetrack Copyright (c) 2010, Branko Vukelic Released under GNU/GPL v3, see LICENSE file for details. Usage: tt.py [-a] [-r] [-t FILE] [-p PID] [--add] [--read] [--tsv FILE] [--pid PID] [dbfile] -r --read : Display the stats. -a --add : Start timer session (default action). -t --tsv : Export into a tab-separated table (TSV). FILE is the filename to use for exporting. -p --pid : With argument 'PID' (3 letters, no numbers or non-alphanumeric characters. Limits all operations to a single PID. dbfile : Use this file as database, instead of default file. If the specified file does not exist, it will be creadted. More information at: http://github.com/foxbunny/timetrack """ def main(argv): try: opts, args = getopt.getopt(argv, 'rat:p:', ['read', 'add', 'tsv=', 'pid=']) except getopt.GetoptError: usage() sys.exit(2) optdict = optpair(opts) statsfile = len(args) and args[0] or DEFAULT_FILE print "Using stats file '%s'" % statsfile pidfilter = optdict.get('-p', '') or optdict.get('--pid', '') if pidfilter: if check_pid(pidfilter): print "Using project ID filter '%s'" % pidfilter else: print "Project ID filter '%s' is invalid and will be ignored." % pidfilter print "Opening connection to database." try: connection = sqlite3.connect(statsfile) except: print "Database error. Exiting." sys.exit(2) print "Initialize table if none exists" c = connection.cursor() try: c.execute("""CREATE TABLE IF NOT EXISTS timesheet ( id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp DATETIME DEFAULT (datetime('now')), pid VARCHAR(3) NOT NULL, desc VARCHAR(255) NOT NULL, dur INTEGER NOT NULL);""") except: connection.rollback() raise else: connection.commit() c.close() if ('-r' in optdict.keys()) or ('--read' in optdict.keys()): read_stats(connection, pidfilter) elif ('-t' in optdict.keys()) or ('--tsv' in optdict.keys()): filename = optdict.get('-t', None) or optdict.get('--tsv') export_tsv(connection, filename, pidfilter) else: add_data(connecti
on, pidfilter) print "Closing connection to database" connection.close() sys.exit(1) if __name__ == '__main__': main(sys.
argv[1:])
data-refinery/data_refinery
common/data_refinery_common/models/jobs.py
Python
bsd-3-clause
8,068
0.001363
from typing import Dict, Set from django.db import transaction from django.db import models from django.utils import timezone from data_refinery_common.models.models import Sample, Experiment, OriginalFile class SurveyJob(models.Model): """Records information about a Surveyor Job.""" class Meta: db_table = "survey_jobs" source_type = models.CharField(max_length=256) success = models.NullBooleanField(null=True) no_retry = models.BooleanField(default=False) nomad_job_id = models.CharField(max_length=256, null=True) ram_amount = models.IntegerField(default=256) # The start time of the job start_time = models.DateTimeField(null=True) # T
he end time of the job end_time = models.DateTimeField(null=True) # This field represents how many times this job has been # retried. It starts at 0 and each time the job has to be retried # it will be incremented. num_retries = models.IntegerField(default=0) # This field indicates whether or not this job has been retried # already or not. retried = models.BooleanField(default=False) # This field allows jobs to specify why th
ey failed. failure_reason = models.TextField(null=True) created_at = models.DateTimeField(editable=False, default=timezone.now) last_modified = models.DateTimeField(default=timezone.now) def save(self, *args, **kwargs): """ On save, update timestamps """ current_time = timezone.now() if not self.id: self.created_at = current_time self.last_modified = current_time return super(SurveyJob, self).save(*args, **kwargs) def get_properties(self) -> Dict: """ Return all associated SurveyJobKeyValues as a dict""" return {pair.key: pair.value for pair in self.surveyjobkeyvalue_set.all()} def get_accession_code(self): """ Return `experiment_accession_code`, the most important code.""" try: kvp = self.surveyjobkeyvalue_set.get(key="experiment_accession_code") return kvp.value except: return None def __str__(self): return "SurveyJob " + str(self.pk) + ": " + str(self.source_type) class SurveyJobKeyValue(models.Model): """Tracks additional fields for SurveyJobs. Useful for fields that would be sparsely populated if they were their own columns. I.e. one source may have an extra field or two that are worth tracking but are specific to that source. """ survey_job = models.ForeignKey(SurveyJob, on_delete=models.CASCADE) key = models.CharField(max_length=256) value = models.CharField(max_length=256) class Meta: db_table = "survey_job_key_values" class ProcessorJob(models.Model): """Records information about running a processor.""" class Meta: db_table = "processor_jobs" # This field will contain an enumerated value specifying which # processor pipeline was applied during the processor job. pipeline_applied = models.CharField(max_length=256) original_files = models.ManyToManyField('OriginalFile', through='ProcessorJobOriginalFileAssociation') datasets = models.ManyToManyField('DataSet', through='ProcessorJobDataSetAssociation') no_retry = models.BooleanField(default=False) # Resources ram_amount = models.IntegerField(default=2048) volume_index = models.CharField(max_length=3, null=True) # Tracking start_time = models.DateTimeField(null=True) end_time = models.DateTimeField(null=True) success = models.NullBooleanField(null=True) nomad_job_id = models.CharField(max_length=256, null=True) # This field represents how many times this job has been # retried. It starts at 0 and each time the job has to be retried # it will be incremented. num_retries = models.IntegerField(default=0) # This field indicates whether or not this job has been retried # already or not. retried = models.BooleanField(default=False) # This point of this field is to identify which worker ran the # job. A few fields may actually be required or something other # than just an id. worker_id = models.CharField(max_length=256, null=True) # This field corresponds to the version number of the # data_refinery_workers project that was used to run the job. worker_version = models.CharField(max_length=128, null=True) # This field allows jobs to specify why they failed. failure_reason = models.TextField(null=True) # If the job is retried, this is the id of the new job retried_job = models.ForeignKey('self', on_delete=models.PROTECT, null=True) created_at = models.DateTimeField(editable=False, default=timezone.now) last_modified = models.DateTimeField(default=timezone.now) def get_samples(self) -> Set[Sample]: samples = set() for original_file in self.original_files.all(): for sample in original_file.samples.all(): samples.add(sample) return samples def save(self, *args, **kwargs): """ On save, update timestamps """ current_time = timezone.now() if not self.id: self.created_at = current_time self.last_modified = current_time return super(ProcessorJob, self).save(*args, **kwargs) def __str__(self): return "ProcessorJob " + str(self.pk) + ": " + str(self.pipeline_applied) class DownloaderJob(models.Model): """Records information about running a Downloader.""" class Meta: db_table = "downloader_jobs" # This field contains a string which corresponds to a valid # Downloader Task. Valid values are enumerated in: # data_refinery_common.job_lookup.Downloaders downloader_task = models.CharField(max_length=256) accession_code = models.CharField(max_length=256, blank=True, null=True) no_retry = models.BooleanField(default=False) original_files = models.ManyToManyField('OriginalFile', through='DownloaderJobOriginalFileAssociation') # Tracking start_time = models.DateTimeField(null=True) end_time = models.DateTimeField(null=True) success = models.NullBooleanField(null=True) nomad_job_id = models.CharField(max_length=256, null=True) # This field represents how many times this job has been # retried. It starts at 0 and each time the job has to be retried # it will be incremented. num_retries = models.IntegerField(default=0) # This field indicates whether or not this job has been retried # already or not. retried = models.BooleanField(default=False) # This point of this field is to identify which worker ran the # job. A few fields may actually be required or something other # than just an id. worker_id = models.CharField(max_length=256, null=True) # This field corresponds to the version number of the # data_refinery_workers project that was used to run the job. worker_version = models.CharField(max_length=128, null=True) # This field allows jobs to specify why they failed. failure_reason = models.TextField(null=True) # If the job is retried, this is the id of the new job retried_job = models.ForeignKey('self', on_delete=models.PROTECT, null=True) created_at = models.DateTimeField(editable=False, default=timezone.now) last_modified = models.DateTimeField(default=timezone.now) def get_samples(self) -> Set[Sample]: samples = set() for original_file in self.original_files.all(): for sample in original_file.samples.all(): samples.add(sample) return samples def save(self, *args, **kwargs): """ On save, update timestamps """ current_time = timezone.now() if not self.id: self.created_at = current_time self.last_modified = current_time return super(DownloaderJob, self).save(*args, **kwargs) def __str__(self): return "DownloaderJob " + str(self.pk) + ": " + str(self.downloader_task)
DGA-MI-SSI/YaCo
deps/swig-3.0.7/Examples/test-suite/python/cpp11_decltype_runme.py
Python
gpl-3.0
347
0.011527
import cpp11_decltype a = cp
p11_decltype.A() a.i = 5 if a.i != 5: raise RuntimeError, "Assignment t
o a.i failed." a.j = 10 if a.j != 10: raise RuntimeError, "Assignment to a.j failed." b = a.foo(5) if b != 10: raise RuntimeError, "foo(5) should return 10." b = a.foo(6) if b != 0: raise RuntimeError, "foo(6) should return 0."
lre/deeppy
deeppy/loss.py
Python
mit
3,134
0
import numpy as np import cudarray as ca from .base import PickleMixin _FLT_MIN = np.finfo(ca.float_).tiny class Loss(PickleMixin): # abll: I suspect that this interface is not ideal. It would be more # elegant if Loss only provided loss() and grad(). However, where should # we place the logic from fprop()? @classmethod def from_any(cls, arg): if isinstance(arg, Loss): return arg elif isinstance(arg, str): if arg == 'softmaxce': return SoftmaxCrossEntropy() elif arg == 'bce': return BinaryCrossEntropy() elif arg == 'mse': return MeanSquaredError() raise ValueError('Invalid constructor arguments: %s' % arg) def _setup(self, x_shape): pass def fprop(self, x): return x def loss(self, target, x): """ Returns the loss calculated from the target and the input. """ raise NotImplementedError() def grad(self, target, x): """ Returns the input gradient. """ raise NotImplementedError() def y_shape(self, x_shape): return x_shape class SoftmaxCrossEntropy(Loss)
: """ Softmax + cross entropy (aka. multinomial logistic loss) """ def __init__(self): self.name = 'softmaxce' self._tmp_x = None self._tmp_y = None self._tmp_target = None self._tmp_one_hot = None self.n_classes = None def _setup(self, x_shape): self.n_classes = x_shape[1] def _softmax(self, x): # caching wrapper if self._tmp_x is
not x: self._tmp_y = ca.nnet.softmax(x) self._tmp_x = x return self._tmp_y def _one_hot(self, target): # caching wrapper if self._tmp_target is not target: self._tmp_one_hot = ca.nnet.one_hot_encode(target, self.n_classes) self._tmp_target = target return self._tmp_one_hot def fprop(self, x): return ca.nnet.one_hot_decode(self._softmax(x)) def loss(self, target, x): y = self._softmax(x) target = self._one_hot(target) return ca.nnet.categorical_cross_entropy(y_pred=y, y_true=target) def grad(self, target, x): y = self._softmax(x) target = self._one_hot(target) return -(target - y) def y_shape(self, x_shape): return (x_shape[0],) class BinaryCrossEntropy(Loss): def __init__(self): self.name = 'bce' def loss(self, y, y_pred): y_pred = ca.maximum(y_pred, _FLT_MIN) return -ca.mean(y*ca.log(y_pred) + (1 - y)*ca.log(1 - y_pred), axis=1) def grad(self, y, y_pred): y_pred = ca.maximum(y_pred, _FLT_MIN) return -(y/y_pred - (1-y)/(1-y_pred)) class MeanSquaredError(Loss): def __init__(self): self.name = 'mse' self.n_targets = None def _setup(self, x_shape): self.n_targets = x_shape[1] def loss(self, y, y_pred): return ca.mean((y-y_pred)**2, axis=1) def grad(self, y, y_pred): return 2.0 / self.n_targets * (y_pred - y)
mnazim/django-rest-kickstart
helpers/serializers.py
Python
mit
206
0.004854
from rest_framework import
serializers class BaseModelSerializer(serializers.ModelSerializer): id = serializers.SerializerMethodField() def get_id(self, instance): return str(inst
ance.id)
zachjanicki/osf.io
website/addons/mendeley/serializer.py
Python
apache-2.0
155
0.006452
from website.addons.base.serializer import CitationsAddonSe
rializer class MendeleySerializer(CitationsAddonSerializer
): addon_short_name = 'mendeley'
hackebrot/pytest
src/_pytest/cacheprovider.py
Python
mit
13,931
0.00079
""" merged implementation of the cache provider the name cache was not chosen to ensure pluggy automatically ignores the external pytest-cache """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from collections import OrderedDict import attr import py import six import pytest from .compat import _PY2 as PY2 from .pathlib import Path from .pathlib import resolve_from_str from .pathlib import rmtree README_CONTENT = u"""\ # pytest cache directory # This directory contains data from the pytest's cache plugin, which provides the `--lf` and `--ff` options, as well as the `cache` fixture. **Do not** commit this to version control. See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. """ CACHEDIR_TAG_CONTENT = b"""\ Signature: 8a477f597d28d172789f06886806bc55 # This file is a cache directory tag created by pytest. # For information about cache directory tags, see: # http://www.bford.info/cachedir/spec.html """ @attr.s class Cache(object): _cachedir = attr.ib(repr=False) _config = attr.ib(repr=False) @classmethod def for_config(cls, config): cachedir = cls.cache_dir_from_config(config) if config.getoption("cacheclear") and cachedir.exists(): rmtree(cachedir, force=True) cachedir.mkdir() return cls(cachedir, config) @staticmethod def cache_dir_from_config(config): return resolve_from_str(config.getini("cache_dir"), config.rootdir) def warn(self, fmt, **args): from _pytest.warnings import _issue_warning_captured from _pytest.warning_types import PytestWarning _issue_warning_captured( PytestWarning(fmt.format(**args) if args else fmt), self._config.hook, stacklevel=3, ) def makedir(self, name): """ return a directory path object with the given name. If the directory does not yet exist, it will be created. You can use it to manage files likes e. g. store/retrieve database dumps across test sessions. :param name: must be a string not containing a ``/`` separator. Make sure the name contains your plugin or application identifiers to prevent clashes with other cache users. """ name = Path(name) if len(name.parts) > 1: raise ValueError("name is not allowed to contain path separators") res = self._cachedir.joinpath("d", name) res.mkdir(exist_ok=True, parents=True) return py.path.local(res) def _getvaluepath(self, key): return self._cachedir.joinpath("v", Path(key)) def get(self, key, default): """ return cached value for the given key. If no value was yet cached or the value cannot be read, the specified default is returned. :param key: must be a ``/`` separated value. Usually the first name is the name of your plugin or your application. :param default: must be provided in case of a cache-miss or invalid cache values. """ path = self._getvaluepath(key) try: with path.open("r") as f: return json.load(f) except (ValueError, IOError, OSError): return default def set(self, key, value): """ save value for the given key. :param key: must be a ``/`` separated value. Usually the first name is the name of your plugin or your application. :param value: must be of any combination of basic python types, including nested types like e. g. lists of dictionaries. """ path = self._getvaluepath(key) try: if path.parent.is_dir(): cache_dir_exists_already = True else: cache_dir_exists_already = self._cachedir.exists() path.parent.mkdir(exist_ok=True, parents=True) except (IOError, OSError): self.warn("could not create cache path {path}", path=path) return try: f = path.open("wb" if PY2 else "w") except (IOError, OSError): self.warn("cache could not write path {path}", path=path) else: with f: json.dump(value, f, indent=2, sort_keys=True) if not cache_dir_exists_already: self._ensure_supporting_files() def _ensure_supporting_files(self): """Create supporting files in the cache dir that are not really part of the cache.""" if self._cachedir.is_dir(): readme_path = self._cachedir / "README.md" if not readme_path.is_file(): readme_path.write_text(README_CONTENT) gitignore_path = self._cachedir.joinpath(".gitignore") if not gitignore_path.is_file(): msg = u"# Created by pytest automatically.\n*" gitignore_path.write_text(msg, encoding="UTF-8") cachedir_tag_path = self._
cachedir.joinpath("CACHEDIR.TAG") if not cachedir_tag_path.is_file(): cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) class LFPlugin(object): """ Plugin which implements the --lf (run last-failing) option """ def
__init__(self, config): self.config = config active_keys = "lf", "failedfirst" self.active = any(config.getoption(key) for key in active_keys) self.lastfailed = config.cache.get("cache/lastfailed", {}) self._previously_failed_count = None self._no_failures_behavior = self.config.getoption("last_failed_no_failures") def pytest_report_collectionfinish(self): if self.active and self.config.getoption("verbose") >= 0: if not self._previously_failed_count: return None noun = "failure" if self._previously_failed_count == 1 else "failures" suffix = " first" if self.config.getoption("failedfirst") else "" mode = "rerun previous {count} {noun}{suffix}".format( count=self._previously_failed_count, suffix=suffix, noun=noun ) return "run-last-failure: %s" % mode def pytest_runtest_logreport(self, report): if (report.when == "call" and report.passed) or report.skipped: self.lastfailed.pop(report.nodeid, None) elif report.failed: self.lastfailed[report.nodeid] = True def pytest_collectreport(self, report): passed = report.outcome in ("passed", "skipped") if passed: if report.nodeid in self.lastfailed: self.lastfailed.pop(report.nodeid) self.lastfailed.update((item.nodeid, True) for item in report.result) else: self.lastfailed[report.nodeid] = True def pytest_collection_modifyitems(self, session, config, items): if self.active: if self.lastfailed: previously_failed = [] previously_passed = [] for item in items: if item.nodeid in self.lastfailed: previously_failed.append(item) else: previously_passed.append(item) self._previously_failed_count = len(previously_failed) if not previously_failed: # running a subset of all tests with recorded failures outside # of the set of tests currently executing return if self.config.getoption("lf"): items[:] = previously_failed config.hook.pytest_deselected(items=previously_passed) else: items[:] = previously_failed + previously_passed elif self._no_failures_behavior == "none": config.hook.pytest_deselected(items=items) items[:] = [] def pytest_sessionfinish(self, session): config = self.config if config.getoption("cacheshow") or hasattr(config, "slavein
jungla/ICOM-fluidity-toolbox
2D/U/plot_Div_spec.py
Python
gpl-2.0
3,442
0.049099
import os, sys import myfun import numpy as np import matplotlib as mpl mpl.use('ps') import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy import interpolate import lagrangian_stats import scipy.fftpack ## READ archive (too many points... somehow) # args: name, dayi, dayf, days #label = 'm_25_2_512' label = 'm_25_1_particles' dayi = 481 #10*24*2 dayf = 581 #10*24*4 days = 1 #label = sys.argv[1] #basename = sys.argv[2] #dayi = int(sys.argv[3]) #dayf = int(sys.a
rgv[4]) #days = int(sys.argv[5]) path = './Velocity_CG/' try: os.stat('./plot/'+label) except OSError: os.mkdir('./plot/'+label) # dimensions archives # ML exp Xlist = np.linspace(0,2000,161) Ylist = np.linspace(0,2000,161) Zlist = np.l
inspace(0,-50,51) dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1] Zlist = np.cumsum(dl) xn = len(Xlist) yn = len(Ylist) zn = len(Zlist) dx = np.diff(Xlist) z = 1 for time in range(dayi,dayf,days): print 'time:', time tlabel = str(time) while len(tlabel) < 3: tlabel = '0'+tlabel #Velocity_CG_m_50_6e_9.csv fileU = path+'Velocity_CG_0_'+label+'_'+str(time)+'.csv' fileV = path+'Velocity_CG_1_'+label+'_'+str(time)+'.csv' fileT = '../RST/Temperature_CG/Temperature_CG_'+label+'_'+str(time)+'.csv' file1 = 'Divergence_'+label+'_'+str(time) # U = lagrangian_stats.read_Scalar(fileU,xn,yn,zn) V = lagrangian_stats.read_Scalar(fileV,xn,yn,zn) T = lagrangian_stats.read_Scalar(fileT,xn,yn,zn) for k in range(0,len(Zlist),5): dU = np.asarray(np.gradient(U[:,:,k])) dV = np.asarray(np.gradient(V[:,:,k])) Div = dU[0,:,:]/dx + dV[1,:,:]/dy # FT = np.zeros((xn/1,yn)) # for j in range(len(Ylist)): tempfft = scipy.fftpack.fft(Div[:,j]**2,xn) FT[:,j] = abs(tempfft)**2 w = scipy.fftpack.fftfreq(xn, dx[1]) # w = scipy.fftpack.fftshift(w) FTp = np.mean(FT,1)/xn fig = plt.figure(figsize=(10,8)) p25, = plt.loglog(w[w>0], FTp[w>0],'r',linewidth=2) plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+5/3.)],'k',linewidth=1.5) plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+3.)],'k',linewidth=1.5) plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+1.)],'k',linewidth=1.5) plt.text(6*10**-2, 5*10**-( -3+5/3.), '-5/3',fontsize=18) plt.text(6*10**-2, 5*10**-( -3+3.), '-3',fontsize=18) plt.text(6*10**-2, 5*10**-( -3+1.), '-1',fontsize=18) plt.text(10**-3, 10**2,str(time*360./3600)+'hr',fontsize=18) plt.xlabel(r'k $[m^{-1}]$',fontsize=20) plt.ylabel(r'PSD',fontsize=20) plt.yticks(fontsize=18) plt.xticks(fontsize=18) plt.xlim([1/2000.,1/10.]) plt.savefig('./plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps',bbox_inches='tight') print './plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps' plt.close() # v = np.linspace(0, 10, 10, endpoint=True) vl = np.linspace(0, 10, 5, endpoint=True) fig = plt.figure(figsize=(6,6)) fig.add_subplot(111,aspect='equal') plt.contourf(Xlist/1000,Ylist/1000,T,v,extend='both',cmap=plt.cm.PiYG) plt.colorbar(ticks=vl) plt.title(str(np.round(10*(time*360./3600))/10.0)+'h') plt.ylabel('Y [km]',fontsize=16) plt.xlabel('X [km]',fontsize=16) plt.savefig('./plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps',bbox_inches='tight') print './plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps' plt.close()
MathieuDuponchelle/django-sortedm2m
sortedm2m/__init__.py
Python
bsd-3-clause
48
0
# -*- c
oding: utf-8 -*- __version__ = '0.7.
0'
infoxchange/barman
barman/xlog.py
Python
gpl-3.0
14,318
0
# Copyright (C) 2011-2017 2ndQuadrant Limited # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see <http://www.gnu.org/licenses/>. """ This module contains functions to retrieve information about xlog files """ import collections import os import re from tempfile import NamedTemporaryFile from barman.exceptions import BadHistoryFileContents, BadXlogSegmentName # xlog file segment name parser (regular expression) _xlog_re = re.compile(r''' ^ ([\dA-Fa-f]{8}) # everything has a timeline (?: ([\dA-Fa-f]{8})([\dA-Fa-f]{8}) # segment name, if a wal file (?: # and optional \.[\dA-Fa-f]{8}\.backup # offset, if a backup label | \.partial # partial, if a partial file )? | \.history # or only .history, if a history file ) $ ''', re.VERBOSE) # xlog location parser for concurrent backup (regular expression) _location_re = re.compile(r'^([\dA-F]+)/([\dA-F]+)$') # Taken from xlog_internal.h from PostgreSQL sources #: XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2 #: and larger than XLOG_BLCKSZ (preferably, a great deal larger than #: XLOG_BLCKSZ). DEFAULT_XLOG_SEG_SIZE = 1 << 24 #: This namedtuple is a container for the information #: contained inside history files HistoryFileData = collections.namedtuple( 'HistoryFileData', 'tli parent_tli switchpoint reason') def is_any_xlog_file(path): """ Return True if the xlog is either a WAL segment, a .backup file or a .history file, False otherwise. It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.match(os.path.basename(path)) if match: return True return False def is_history_file(path): """ Return True if the xlog is a .history file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith('.history'): return True return False def is_backup_file(path): """ Return True if the xlog is a .backup file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith('.backup'): return True return False def is_partial_file(path): """ Return True if the xlog is a .partial file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith('.partial'): return True return False def is_wal_file(path): """ Return True if the xlog is a regular xlog file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if (match and not match.group(0).endswith('.backup') and not match.group(0).endswith('.history') and not match.group(0).endswith('.partial')): return True return False def decode_segment_name(path): """ Retrieve the timeline, log ID and segment ID from the name of a xlog
segment It can handle either a full file path or a simple file name. :param str path: the file name to decode :rtype: list[int] """ name =
os.path.basename(path) match = _xlog_re.match(name) if not match: raise BadXlogSegmentName(name) return [int(x, 16) if x else None for x in match.groups()] def encode_segment_name(tli, log, seg): """ Build the xlog segment name based on timeline, log ID and segment ID :param int tli: timeline number :param int log: log number :param int seg: segment number :return str: segment file name """ return "%08X%08X%08X" % (tli, log, seg) def encode_history_file_name(tli): """ Build the history file name based on timeline :return str: history file name """ return "%08X.history" % (tli,) def xlog_segments_per_file(xlog_segment_size): """ Given that WAL files are named using the following pattern: <timeline_number><xlog_file_number><xlog_segment_number> this is the number of XLOG segments in an XLOG file. By XLOG file we don't mean an actual file on the filesystem, but the definition used in the PostgreSQL sources: meaning a set of files containing the same file number. :param int xlog_segment_size: The XLOG segment size in bytes :return int: The number of segments in an XLOG file """ return 0xffffffff // xlog_segment_size def xlog_file_size(xlog_segment_size): """ Given that WAL files are named using the following pattern: <timeline_number><xlog_file_number><xlog_segment_number> this is the size in bytes of an XLOG file, which is composed on many segments. See the documentation of `xlog_segments_per_file` for a commentary on the definition of `XLOG` file. :param int xlog_segment_size: The XLOG segment size in bytes :return int: The size of an XLOG file """ return xlog_segment_size * xlog_segments_per_file(xlog_segment_size) def generate_segment_names(begin, end=None, version=None, xlog_segment_size=None): """ Generate a sequence of XLOG segments starting from ``begin`` If an ``end`` segment is provided the sequence will terminate after returning it, otherwise the sequence will never terminate. If the XLOG segment size is known, this generator is precise, switching to the next file when required. It the XLOG segment size is unknown, this generator will generate all the possible XLOG file names. The size of an XLOG segment can be every power of 2 between the XLOG block size (8Kib) and the size of a log segment (4Gib) :param str begin: begin segment name :param str|None end: optional end segment name :param int|None version: optional postgres version as an integer (e.g. 90301 for 9.3.1) :param int xlog_segment_size: the size of a XLOG segment :rtype: collections.Iterable[str] :raise: BadXlogSegmentName """ begin_tli, begin_log, begin_seg = decode_segment_name(begin) end_tli, end_log, end_seg = None, None, None if end: end_tli, end_log, end_seg = decode_segment_name(end) # this method doesn't support timeline changes assert begin_tli == end_tli, ( "Begin segment (%s) and end segment (%s) " "must have the same timeline part" % (begin, end)) # If version is less than 9.3 the last segment must be skipped skip_last_segment = version is not None and version < 90300 # This is the number of XLOG segments in an XLOG file. By XLOG file # we don't mean an actual file on the filesystem, but the definition # used in the PostgreSQL sources: a set of files containing the # same file number. if xlog_segment_size: # The generator is operating is precise and correct mode: # knowing exactly when a switch to the next file is required xlog_seg_per_file = xlog_segments_per_file(xlog_segment_size)
apyrgio/ganeti
lib/http/__init__.py
Python
bsd-2-clause
28,992
0.007243
# # # Copyright (C) 2007, 2008, 2010, 2012 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """HTTP module. """ import logging import mimetools import OpenSSL import select import socket import errno from cStringIO import StringIO from ganeti import constants from ganeti import utils HTTP_GANETI_VERSION = "Ganeti %s" % constants.RELEASE_VERSION HTTP_OK = 200 HTTP_NO_CONTENT = 204 HTTP_NOT_MODIFIED = 304 HTTP_0_9 = "HTTP/0.9" HTTP_1_0 = "HTTP/1.0" HTTP_1_1 = "HTTP/1.1" HTTP_GET = "GET" HTTP_HEAD = "HEAD" HTTP_POST = "POST" HTTP_PUT = "PUT" HTTP_DELETE = "DELETE" HTTP_ETAG = "ETag" HTTP_HOST = "Host" HTTP_SERVER = "Server" HTTP_DATE = "Date" HTTP_USER_AGENT = "User-Agent" HTTP_CONTENT_TYPE = "Content-Type" HTTP_CONTENT_LENGTH = "Content-Length" HTTP_CONNECTION = "Connection" HTTP_KEEP_ALIVE = "Keep-Alive" HTTP_WWW_AUTHENTICATE = "WWW-Authenticate" HTTP_AUTHORIZATION = "Authorization" HTTP_AUTHENTICATION_INFO = "Authentication-Info" HTTP_ALLOW = "Allow" HTTP_APP_OCTET_STREAM = "application/octet-stream" HTTP_APP_JSON = "application/json" _SSL_UNEXPECTED_EOF = "Unexpected EOF" # Socket operations (SOCKOP_SEND, SOCKOP_RECV, SOCKOP_SHUTDOWN, SOCKOP_HANDSHAKE) = range(4) # send/receive quantum SOCK_BUF_SIZE = 32768 class HttpError(Exception): """Internal exception for HTTP errors. This should only be used for internal error reporting. """ class HttpConnectionClosed(Exception): """Internal exception for a closed connection. This should only be used for internal error reporting. Only use it if there's no other way to report this condition. """ class HttpSessionHandshakeUnexpectedEOF(HttpError): """Internal exception for errors during SSL handshake. This should on
ly be used for internal error reporting. """ class HttpSocketTimeout(Exception): """Internal exception for socket timeouts. This should only be used for internal error reporting. """ class HttpException(Exception): code = None message = None def __init__(self, message=None, headers=None): Exception.__init__(self) self.message = message self.headers = headers class HttpBadRequest(HttpException): """400 Bad Request RFC2616, 10.4
.1: The request could not be understood by the server due to malformed syntax. The client SHOULD NOT repeat the request without modifications. """ code = 400 class HttpUnauthorized(HttpException): """401 Unauthorized RFC2616, section 10.4.2: The request requires user authentication. The response MUST include a WWW-Authenticate header field (section 14.47) containing a challenge applicable to the requested resource. """ code = 401 class HttpForbidden(HttpException): """403 Forbidden RFC2616, 10.4.4: The server understood the request, but is refusing to fulfill it. Authorization will not help and the request SHOULD NOT be repeated. """ code = 403 class HttpNotFound(HttpException): """404 Not Found RFC2616, 10.4.5: The server has not found anything matching the Request-URI. No indication is given of whether the condition is temporary or permanent. """ code = 404 class HttpMethodNotAllowed(HttpException): """405 Method Not Allowed RFC2616, 10.4.6: The method specified in the Request-Line is not allowed for the resource identified by the Request-URI. The response MUST include an Allow header containing a list of valid methods for the requested resource. """ code = 405 class HttpNotAcceptable(HttpException): """406 Not Acceptable RFC2616, 10.4.7: The resource identified by the request is only capable of generating response entities which have content characteristics not acceptable according to the accept headers sent in the request. """ code = 406 class HttpRequestTimeout(HttpException): """408 Request Timeout RFC2616, 10.4.9: The client did not produce a request within the time that the server was prepared to wait. The client MAY repeat the request without modifications at any later time. """ code = 408 class HttpConflict(HttpException): """409 Conflict RFC2616, 10.4.10: The request could not be completed due to a conflict with the current state of the resource. This code is only allowed in situations where it is expected that the user might be able to resolve the conflict and resubmit the request. """ code = 409 class HttpGone(HttpException): """410 Gone RFC2616, 10.4.11: The requested resource is no longer available at the server and no forwarding address is known. This condition is expected to be considered permanent. """ code = 410 class HttpLengthRequired(HttpException): """411 Length Required RFC2616, 10.4.12: The server refuses to accept the request without a defined Content-Length. The client MAY repeat the request if it adds a valid Content-Length header field containing the length of the message-body in the request message. """ code = 411 class HttpPreconditionFailed(HttpException): """412 Precondition Failed RFC2616, 10.4.13: The precondition given in one or more of the request-header fields evaluated to false when it was tested on the server. """ code = 412 class HttpUnsupportedMediaType(HttpException): """415 Unsupported Media Type RFC2616, 10.4.16: The server is refusing to service the request because the entity of the request is in a format not supported by the requested resource for the requested method. """ code = 415 class HttpInternalServerError(HttpException): """500 Internal Server Error RFC2616, 10.5.1: The server encountered an unexpected condition which prevented it from fulfilling the request. """ code = 500 class HttpNotImplemented(HttpException): """501 Not Implemented RFC2616, 10.5.2: The server does not support the functionality required to fulfill the request. """ code = 501 class HttpBadGateway(HttpException): """502 Bad Gateway RFC2616, 10.5.3: The server, while acting as a gateway or proxy, received an invalid response from the upstream server it accessed in attempting to fulfill the request. """ code = 502 class HttpServiceUnavailable(HttpException): """503 Service Unavailable RFC2616, 10.5.4: The server is currently unable to handle the request due to a temporary overloading or maintenance of the server. """ code = 503 class HttpGatewayTimeout(HttpException): """504 Gateway Timeout RFC2616, 10.5.5: The server, while acting as a gateway or proxy, did not receive a timely response from the upstream server specified by the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server (e.g. DNS) it needed to access in attempting to complete the request. """ code = 504 class HttpVersionNotSupported(HttpException): """505 HTTP Version Not Supported RFC2616, 10.5.6: The server does not support, or refuses to support, the HTTP prot
tomprince/gemrb
gemrb/GUIScripts/bg1/GUICG22.py
Python
gpl-2.0
3,963
0.028261
# GemRB - Infinity Engine Emulator # Copyright (C) 2003 The GemRB Project # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # #character generation, class kit (GUICG22) import GemRB
from GUIDefines import * from ie_stats import * import CharGen
Common import GUICommon import CommonTables KitWindow = 0 TextAreaControl = 0 DoneButton = 0 SchoolList = 0 ClassID = 0 def OnLoad(): global KitWindow, TextAreaControl, DoneButton global SchoolList, ClassID if GUICommon.CloseOtherWindow(OnLoad): if(KitWindow): KitWindow.Unload() KitWindow = None return GemRB.LoadWindowPack("GUICG", 640, 480) RaceName = CommonTables.Races.GetRowName(GemRB.GetVar("Race")-1 ) Class = GemRB.GetVar("Class")-1 ClassName = CommonTables.Classes.GetRowName(Class) ClassID = CommonTables.Classes.GetValue(Class, 5) KitTable = GemRB.LoadTable("kittable") KitTableName = KitTable.GetValue(ClassName, RaceName) KitTable = GemRB.LoadTable(KitTableName,1) SchoolList = GemRB.LoadTable("magesch") #there is only a specialist mage window for bg1 KitWindow = GemRB.LoadWindow(12) for i in range(8): Button = KitWindow.GetControl(i+2) Button.SetState(IE_GUI_BUTTON_DISABLED) Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_OR) if not KitTable: RowCount = 1 else: RowCount = KitTable.GetRowCount() for i in range(RowCount): Button = KitWindow.GetControl(i+2) if not KitTable: if ClassID == 1: Kit=GemRB.GetVar("MAGESCHOOL") KitName = SchoolList.GetValue(i, 0) else: Kit = 0 KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 0) else: Kit = KitTable.GetValue(i,0) if ClassID == 1: if Kit: Kit = Kit - 21 KitName = SchoolList.GetValue(Kit, 0) else: if Kit: KitName = CommonTables.KitList.GetValue(Kit, 1) else: KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 0) Button.SetState(IE_GUI_BUTTON_ENABLED) Button.SetText(KitName) Button.SetVarAssoc("Class Kit",Kit) if i==0: GemRB.SetVar("Class Kit",Kit) Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, KitPress) BackButton = KitWindow.GetControl(12) BackButton.SetText(15416) DoneButton = KitWindow.GetControl(0) DoneButton.SetText(11973) DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR) TextAreaControl = KitWindow.GetControl(11) TextAreaControl.SetText(17247) DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress) BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CharGenCommon.BackPress) #KitPress() KitWindow.ShowModal(MODAL_SHADOW_NONE) return def KitPress(): Kit = GemRB.GetVar("Class Kit") if Kit == 0: KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 1) else: if ClassID==1: KitName = SchoolList.GetValue(Kit, 1) else: KitName = CommonTables.KitList.GetValue(Kit, 3) TextAreaControl.SetText(KitName) DoneButton.SetState(IE_GUI_BUTTON_ENABLED) return def NextPress(): #class ClassIndex = GemRB.GetVar ("Class")-1 Class = CommonTables.Classes.GetValue (ClassIndex, 5) MyChar = GemRB.GetVar ("Slot") GemRB.SetPlayerStat (MyChar, IE_CLASS, Class) KitIndex = GemRB.GetVar ("Class Kit") if Class == 1: GemRB.SetVar("MAGESCHOOL", KitIndex) #the same as the unusable field Kit = CommonTables.KitList.GetValue(KitIndex, 6) GemRB.SetPlayerStat (MyChar, IE_KIT, Kit) CharGenCommon.next()
sebastian-steinmann/kodi-repo
src/service.library.video/resources/lib/date_utils.py
Python
mit
1,538
0.001951
""" Class to handle date-parsing and formatting """ # Workaround for http://bugs.python.org/issue8098 import _strptime # pylint: disable=unused-import from datetime import datetime import time class DateUtils(object): """ Class to h
andle date-parsing and formatting """ date_format = '%Y-%m-%dT%H:%M:%SZ' json_date_format = '%Y-%m-%dT%H:%M:%S.%fZ' kodi_date_format = '%Y-%m-%d %H:%M' def get_str_date(self, date):
""" Formats datetime to str of format %Y-%m-%dT%H:%M:%SZ Arguments date: datetime """ return datetime.strftime(date, self.date_format) def parse_str_date(self, str_date): """ Parse a date of format %Y-%m-%dT%H:%M:%SZ to date Arguments str_date: str, %Y-%m-%dT%H:%M:%SZ """ return self._parse_str_date(str_date, self.date_format) def _parse_str_date(self, str_date, date_format): try: return datetime.strptime(str_date, date_format) except TypeError: return datetime(*(time.strptime(str_date, date_format)[0:6])) def parse_kodi_date(self, str_date): if not str_date: return None return self._parse_str_date(str_date, '%Y-%m-%d %H:%M:%S') def get_kodi_date_format(self, str_date): """ Returns a date on format %Y-%m-%dT%H:%M:%SZ as %Y-%m-%d %H:%M """ parsed_date = self._parse_str_date(str_date, self.json_date_format) return datetime.strftime(parsed_date, '%Y-%m-%d %H:%M:%S')
umitproject/network-admin
netadmin/networks/urls.py
Python
agpl-3.0
2,718
0.002575
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 Adriano Monteiro Marques # # Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.conf.urls.defaults import * urlpatterns = patterns('netadmin.networks.views', url(r'^host/(?P<object_id>\d+)/$', 'host_detail', name='host_detail'), url(r'^host/list/$', 'host_list', name='host_list'), url(r'^host/list/page/(?P<page>\d+)/$', 'host_list', name='host_list_page'), url(r'^host/new/$', 'host_create', name="host_new"), url(r'^host/edit/(?P<object_id>\d+)/$', 'host_update', name="host_update"), url(r'^host/delete/(?P<object_id>\d+)/$', 'host_delete', name="host_delete"), url(r'^network/(?P<object_id>\d+)/$', 'network_detail', name='network_detail'), url(r'^network/list/$', 'network_list', name='network_list'), url(r'^network/list/page/(?P<page>\d+)/$', 'network_list', name='network_list_page'), url(r'^network/new/$', 'network_create', name="network_new"), url(r'^network/edit/(?P<object_id>\d+)/$', 'network_update', name="network_update"), url(r'^ne
twork/delete/(?P<object_id>\d+)/$', 'network_delete', name="network_delete"), url(r'^network/events/(?P<object_id>\d+)/$', 'network_events', name='network_events'), url(r'^network/netmask-create/$', 'subnet_network', name='subnet_network'), url(r'/update/(?P<object_id>\
d+)/$', 'network_select', name='network_select'), url(r'share/list/(?P<object_type>host|network)/(?P<object_id>\d+)/', 'share_list', name="share_list"), url(r'share/(?P<object_type>host|network)/(?P<object_id>\d+)/', 'share', name="share"), url(r'share/not/(?P<object_type>host|network)/(?P<object_id>\d+)/(?P<user_id>\d+)/', 'share_not', name="share_not"), url(r'share/edit/(?P<object_type>host|network)/(?P<object_id>\d+)/(?P<user_id>\d+)/', 'share_edit', name="share_edit"), )
yoannMoreau/Evapo_EraInterim
python/utils.py
Python
cc0-1.0
20,707
0.026002
#-*- coding: utf-8 -*- ''' Created on 16 déc. 2013 @author: yoann Moreau All controls operations : return true if control ok ''' import os import errno from datetime import date,datetime,timedelta import ogr,osr import re import gdal import osr import numpy as np import numpy.ma as ma import
subprocess impor
t shutil import math from pyspatialite._spatialite import Row import scipy.ndimage as ndimage import pyproj as pp def checkForFile(pathToFile): if os.path.isfile(pathToFile): return True else: return False def createParamFile(pathFile,user,key): f = open(pathFile, 'w+') f.write("{\n") f.write(' "url" : "https://api.ecmwf.int/v1",\n') f.write('"key" : "'+key+'",\n') f.write('"email" : "'+user+'"\n') f.write("}") f.close() def make_sure_path_exists(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def checkForFolder(pathToFolder): try: os.makedirs(pathToFolder) except OSError as exception: if exception.errno != errno.EEXIST: exit('Path for downloaded Era Interim could not be create. Check your right on the parent folder...') def checkForDate(dateC): #convert string to date from YYYY-MM-DD if len(dateC)==10: YYYY=dateC[0:4] MM=dateC[5:7] DD=dateC[8:10] if (YYYY.isdigit() and MM.isdigit() and DD.isdigit()): try: date(int(YYYY),int(MM),int(DD)) except ValueError: exit('Error on Date Format... please give a date in YYYY-MM-DD format') return date(int(YYYY),int(MM),int(DD)) else: exit('Error on Date Format... please give a date in YYYY-MM-DD format') else: exit('Error on Date Format... please give a date in YYYY-MM-DD format') def convertShpToExtend(pathToShp): """ reprojette en WGS84 et recupere l'extend """ driver = ogr.GetDriverByName('ESRI Shapefile') dataset = driver.Open(pathToShp) if dataset is not None: # from Layer layer = dataset.GetLayer() spatialRef = layer.GetSpatialRef() # from Geometry feature = layer.GetNextFeature() geom = feature.GetGeometryRef() spatialRef = geom.GetSpatialReference() #WGS84 outSpatialRef = osr.SpatialReference() outSpatialRef.ImportFromEPSG(4326) coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef) env = geom.GetEnvelope() pointMAX = ogr.Geometry(ogr.wkbPoint) pointMAX.AddPoint(env[1], env[3]) pointMAX.Transform(coordTrans) pointMIN = ogr.Geometry(ogr.wkbPoint) pointMIN.AddPoint(env[0], env[2]) pointMIN.Transform(coordTrans) return [pointMAX.GetPoint()[1],pointMIN.GetPoint()[0],pointMIN.GetPoint()[1],pointMAX.GetPoint()[0]] else: exit(" shapefile not found. Please verify your path to the shapefile") def is_float_re(element): _float_regexp = re.compile(r"^[-+]?(?:\b[0-9]+(?:\.[0-9]*)?|\.[0-9]+\b)(?:[eE][-+]?[0-9]+\b)?$").match return True if _float_regexp(element) else False def checkForExtendValidity(extendList): if len(extendList)==4 and all([is_float_re(str(x)) for x in extendList]) and extendList[0]>extendList[2] and extendList[1]<extendList[3]: if float(extendList[0]) > -180 and float(extendList[2]) <180 and float(extendList[1]) <90 and float(extendList[3]) > -90: extendArea=[str(x) for x in extendList] return extendArea else: exit('Projection given is not in WGS84. Please verify your -t parameter') else: exit('Area scpecified is not conform to a ymax xmin ymin xmax extend. please verify your declaration') def checkForTimeValidity(listTime): validParameters=('00','06','12','18') if len(listTime)>0 and isinstance(listTime, list) and all([x in validParameters for x in listTime]): return listTime else: exit('time parameters not conform to eraInterim posibility : '+ ",".join(validParameters)) def checkForStepValidity(listStep,typeData): validParameters=(0,3,6,9,12) if typeData=="forcast": if len(listStep)>0 and isinstance(listStep, list) and all([int(x) in validParameters for x in listStep]): listStep=[int(x) for x in listStep] return listStep else: exit('step parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters])) else: if len(listStep)>0: exit('step parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters])+ 'for analyse') else: return listStep def checkForGridValidity(grid): if (is_float_re(grid)): grid=float(grid) validParameters=(0.125,0.25,0.5,0.75,1.125,1.5,2,2.5,3) if grid in validParameters: return grid else: exit('grid parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters])) else: exit('grid parameters not conform to eraInterim posibility : '+ ",".join([str(x) for x in validParameters])) def create_request_sfc(dateStart,dateEnd, timeList,stepList,grid,extent,paramList,output,typeData=None): """ Genere la structure de requete sur les serveurs de l'ECMWF INPUTS:\n -date : au format annee-mois-jour\n -heure : au format heure:minute:seconde\n -coord : une liste des coordonnees au format [N,W,S,E]\n -dim_grille : taille de la grille en degre \n -output : nom & chemin du fichier resultat """ if typeData=='analyse': typeD='an' else: typeD='fc' struct = { 'dataset' : "interim", 'date' : dateStart.strftime("%Y-%m-%d")+"/to/"+dateEnd.strftime("%Y-%m-%d"), 'time' : "/".join(map(str, timeList)), 'stream' : "oper", 'step' : "/".join(map(str, stepList)), 'levtype' : "sfc", #pl -> pressure level ,sfc -> surface 'type' : typeD, #fc -> forcast , an -> analyse 'class' : "ei", 'param' : ".128/".join(map(str, paramList))+'.128', 'area' : "/".join(extent), 'grid' : str(grid)+"/"+str(grid), 'target' : output, 'format' : 'netcdf' } return struct def moveFile(inputImg,outputImg): "move a file to a directory" #TODO controls to check if exist #on déplace le fichier dans le bon répertoire shutil.move(inputImg, outputImg) def reprojRaster(pathToImg,output,shape,pathToShape): driver = ogr.GetDriverByName('ESRI Shapefile') dataSource = driver.Open(pathToShape, 0) layer = dataSource.GetLayer() srs = layer.GetSpatialRef() Xres=shape[1] Yres=shape[0] subprocess.call(["gdalwarp","-q","-s_srs","EPSG:4326","-t_srs",srs.ExportToWkt(),pathToImg,output,'-ts',str(Xres),str(Yres),'-overwrite','-dstnodata',"0"]) return output def convertNETCDFtoTIF(inputFile,outputFile,format='float'): #--convert netCDF to tif ds_in=gdal.Open('NETCDF:"'+inputFile+'"') metadata = ds_in.GetMetadata() scale=metadata['tp#scale_factor'] offset=metadata['tp#add_offset'] nodata=metadata['tp#_FillValue'] cols = ds_in.RasterXSize rows = ds_in.RasterYSize geotransform = ds_in.GetGeoTransform() originX = geotransform[0] originY = geotransform[3] pixelWidth = geotransform[1] pixelHeight = geotransform[5] nbBand= ds_in.RasterCount driver = gdal.GetDriverByName('GTiff') outRaster = driver.Create(outputFile, cols, rows, nbBand, gdal.GDT_Float32) outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight)) for b in range(1,nbBand+1): band = ds_in.GetRasterBand(b) arrayB = np.array(band.ReadAsArray(), dtype=format) np.putmask(arrayB,(arrayB==float(nodata)),0) #arrayB=numpy.mult
miguelgrinberg/heat
heat/engine/clients/os/trove.py
Python
apache-2.0
4,752
0
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from troveclient import client as tc from troveclient.openstack.common.apiclient import exceptions from heat.common import exception from heat.common.i18n import _ from heat.engine.clients import client_plugin from heat.engine
import constraints class TroveClientPlugin(client_plugin.ClientPlugin): exceptions_module = exceptions service_types = [DATABASE] = ['database'] def _create(self): con = self.context endpoint_type = self._get_client_option('trove', 'endpoint_type') args = {
'service_type': self.DATABASE, 'auth_url': con.auth_url or '', 'proxy_token': con.auth_token, 'username': None, 'password': None, 'cacert': self._get_client_option('trove', 'ca_file'), 'insecure': self._get_client_option('trove', 'insecure'), 'endpoint_type': endpoint_type } client = tc.Client('1.0', **args) management_url = self.url_for(service_type=self.DATABASE, endpoint_type=endpoint_type) client.client.auth_token = con.auth_token client.client.management_url = management_url return client def validate_datastore(self, datastore_type, datastore_version, ds_type_key, ds_version_key): if datastore_type: # get current active versions allowed_versions = self.client().datastore_versions.list( datastore_type) allowed_version_names = [v.name for v in allowed_versions] if datastore_version: if datastore_version not in allowed_version_names: msg = _("Datastore version %(dsversion)s " "for datastore type %(dstype)s is not valid. " "Allowed versions are %(allowed)s.") % { 'dstype': datastore_type, 'dsversion': datastore_version, 'allowed': ', '.join(allowed_version_names)} raise exception.StackValidationFailed(message=msg) else: if len(allowed_versions) > 1: msg = _("Multiple active datastore versions exist for " "datastore type %(dstype)s. " "Explicit datastore version must be provided. " "Allowed versions are %(allowed)s.") % { 'dstype': datastore_type, 'allowed': ', '.join(allowed_version_names)} raise exception.StackValidationFailed(message=msg) else: if datastore_version: msg = _("Not allowed - %(dsver)s without %(dstype)s.") % { 'dsver': ds_version_key, 'dstype': ds_type_key} raise exception.StackValidationFailed(message=msg) def is_not_found(self, ex): return isinstance(ex, exceptions.NotFound) def is_over_limit(self, ex): return isinstance(ex, exceptions.RequestEntityTooLarge) def is_conflict(self, ex): return isinstance(ex, exceptions.Conflict) def get_flavor_id(self, flavor): ''' Get the id for the specified flavor name. If the specified value is flavor id, just return it. :param flavor: the name of the flavor to find :returns: the id of :flavor: :raises: exception.FlavorMissing ''' flavor_id = None flavor_list = self.client().flavors.list() for o in flavor_list: if o.name == flavor: flavor_id = o.id break if o.id == flavor: flavor_id = o.id break if flavor_id is None: raise exception.FlavorMissing(flavor_id=flavor) return flavor_id class FlavorConstraint(constraints.BaseCustomConstraint): expected_exceptions = (exception.FlavorMissing,) def validate_with_client(self, client, flavor): client.client_plugin('trove').get_flavor_id(flavor)
hephestos/pythos
discovery/tasks.py
Python
gpl-3.0
3,656
0.000274
# import python modules import os import time import logging import multiprocessing # import django modules # import third party modules # import project specific model classes from config.models import Origin # import app specific utility classes # import app specific utility functions from .utils import packet_chunk from .utils import run_capture from .utils import read_pcap def discovery_task(origin_uuid="", offline=False, interface="", duration=0, filepath="", origin_description="" ): logging.basicConfig(filename="/tmp/pythos_debug.log", level=logging.DEBUG) m = multiprocessing.Manager() packets = m.Queue() multiprocessing.log_to_stderr(logging.INFO) num_processes = os.cpu_count() if not num_processes: num_processes = 2 pool = multiprocessing.Pool(processes=num_processes, maxtasksperchild=1) if offline: current_origin = Origin.objects.create(name="PCAP " + filepath, description=origin_description, sensor_flag=True, plant_flag=False ) discovery_process = multiprocessing.Process(target=read_pcap, args=(filepath, packets ) ) logging.info("Starting to read pcap file: " + filepath) else: try: current_origin = Origin.objects.get(uuid=origin_uuid) except: logging.error("Could not find specified origin: " + origin_uuid + " Aborting." ) return discovery_
process = multiprocessing.Process(target=run_capture, args=(interface, duration, packets
) ) logging.info("Starting live capture on: " + interface) discovery_process.start() logging.info("Starting " + str(num_processes) + " worker processes.") while discovery_process.is_alive() or not packets.empty(): num_packets = packets.qsize() chunk_size = max(num_packets//num_processes, 10000) logging.debug(str(num_packets) + " packets in queue.") if num_packets > chunk_size: chunk = m.Queue() for i in range(chunk_size): chunk.put(packets.get()) logging.debug("Processing chunk with size: " + str(chunk_size)) pool.apply_async(packet_chunk, args=(chunk, current_origin, packets ) ) elif not discovery_process.is_alive(): logging.debug("Processing last chunk.") pool.apply(packet_chunk, args=(packets, current_origin, packets)) time.sleep(10) pool.close() pool.join() if offline: logging.info("Pcap " + filepath + " has been processed successfully.") else: logging.info("Live capture on " + interface + " has been completed.")
ptorrestr/t2db_buffer
t2db_buffer/tests/test_buffer.py
Python
gpl-2.0
11,583
0.003367
import unittest import time import _thread as thread from threading import Barrier from threading imp
ort Lock from threading import Event from t2db_buffer.buffer import GlobalBuffer from t2db_buffer.buffer import BufferServer from t2db_objects.objects import Tweet from t2db_objects.objects import User from t2db_objects.objects impor
t TweetStreaming from t2db_objects.objects import TweetSearch from t2db_objects.objects import Streaming from t2db_objects.objects import Search from t2db_objects.objects import ObjectList from t2db_objects.tests.common import randomInteger from t2db_objects.tests.common import randomTweetStreaming from t2db_objects.tests.common import randomTweetSearch from t2db_objects.tests.common import randomStreaming from t2db_objects.tests.common import randomSearch from t2db_worker.parser import ParserStatus from t2db_worker.buffer_communicator import BufferCommunicator from t2db_worker.buffer_communicator import LocalBuffer from t2db_worker.tests.test_parser import getOneStatus getOneStatusLock = Lock() def getOneStatusTS(): getOneStatusLock.acquire() try: status = getOneStatus() finally: getOneStatusLock.release() return status def addOneElement(sharedList): status = getOneStatusTS() ps = ParserStatus(status) tweet = Tweet(ps.getTweet()) sharedList.addElement(tweet) def addManyElements(sharedList, randomElements): status = getOneStatusTS() localList = [] for i in range(0, randomElements): ps = ParserStatus(status) tweet = Tweet(ps.getTweet()) user = User(ps.getUser()) localList.append(tweet) localList.append(user) sharedList.addManyElements(localList) def oneThread(barrier, fun, *args): fun(*args) barrier.wait() def oneThreadUpSync(barrier, fun, *args): barrier.wait() fun(*args) def oneThreadDoubleSync(barrier1, barrier2, fun, *args): barrier1.wait() fun(*args) barrier2.wait() def createData(base): status = getOneStatusTS() randomTweets = base + randomInteger(99) + 1 tweetList = ObjectList() userList = ObjectList() streamingList = ObjectList() searchList = ObjectList() for i in range(base, randomTweets): status["id"] = i status["user"]["id"] = i ps = ParserStatus(status) tweet = Tweet(ps.getTweet()) user = User(ps.getUser()) tweetList.append(tweet) userList.append(user) streamingList.append(TweetStreaming(randomTweetStreaming(i, 1))) searchList.append(TweetSearch(randomTweetSearch(i, 1))) return tweetList, userList, streamingList, searchList sharedListDataLock = Lock() sharedListData = [] idNumber = 0 def fakeClient(host, port): global idNumber global sharedListDataLock global sharedListData sharedListDataLock.acquire() try: [tweetList, userList, streamingList, searchList] = createData(idNumber) idNumber += len(tweetList.list) finally: sharedListDataLock.release() bc = BufferCommunicator(host, port) bc.sendData(tweetList, userList, streamingList, searchList) sharedListDataLock.acquire() try: sharedListData.append(tweetList) sharedListData.append(userList) sharedListData.append(streamingList) sharedListData.append(searchList) finally: sharedListDataLock.release() """ class TestSharedElementList(unittest.TestCase): def setUp(self): self.sharedList = SharedElementList() def test_addElement(self): addOneElement(self.sharedList) self.assertEqual(len(self.sharedList.elementList), 1) def test_addManyElements(self): randomElements = randomInteger(100) addManyElements(self.sharedList, randomElements) self.assertEqual(len(self.sharedList.elementList), randomElements*2) def test_addTwoThreads(self): barrier = Barrier(2) thread.start_new_thread(oneThread, (barrier, addOneElement, self.sharedList,)) addOneElement(self.sharedList) barrier.wait() self.assertEqual(len(self.sharedList.elementList), 2) def test_addTwoThreadsManyElements(self): barrier = Barrier(2) randomElements = randomInteger(100) thread.start_new_thread(oneThread, (barrier, addManyElements, self.sharedList,randomElements,)) addManyElements(self.sharedList, randomElements) barrier.wait() totalElements = randomElements*2*2 self.assertEqual(len(self.sharedList.elementList), totalElements) def test_addManyThreadsManyElements(self): randomThreads = randomInteger(8) + 2 #Always graeter or equal than 2 barrier = Barrier(randomThreads + 1)# Include main thread randomElements = randomInteger(100) for i in range(0, randomThreads): thread.start_new_thread(oneThread, (barrier, addManyElements, self.sharedList, randomElements,)) barrier.wait() totalElements = randomElements*randomThreads*2 self.assertEqual(len(self.sharedList.elementList), totalElements) def test_addGetAllElementsAndClean(self): randomElements = randomInteger(100) addManyElements(self.sharedList, randomElements) copyElementList = self.sharedList.getAllElementsAndClean() self.assertEqual(len(self.sharedList.elementList), 0) self.assertEqual(len(copyElementList), randomElements*2) def test_addGetAllElementsAndCleanWhileAdding(self): barrier1 = Barrier(2) barrier2 = Barrier(2) randomElements = randomInteger(100) thread.start_new_thread(oneThreadDoubleSync, (barrier1, barrier2, addManyElements, self.sharedList,randomElements,)) barrier1.wait() copyElementList = self.sharedList.getAllElementsAndClean() barrier2.wait() totalElements = len(copyElementList) + len(self.sharedList.elementList) self.assertEqual(randomElements*2, totalElements) """ def countData(lb, tweetList, userList, tweetStreamingList, tweetSearchList): # count originals for tweet in tweetList.list: try: lb.addTweet(tweet) except: continue for user in userList.list: try: lb.addUser(user) except: continue for tweetStreaming in tweetStreamingList.list: try: lb.addTweetStreaming(tweetStreaming) except: continue for tweetSearch in tweetSearchList.list: try: lb.addTweetSearch(tweetSearch) except: continue return lb class TestServer(unittest.TestCase): def setUp(self): global sharedListData sharedListData = [] def test_serverOneClient(self): global sharedListData # Create event stopEvent = Event() # Create server barrier sBarrier = Barrier(2) # Create server bs = BufferServer(13001, 5, stopEvent, sBarrier, 5, 5, "http://localhost:8000", "quiltro", "perroCallejero") streamingList = ObjectList() streamingList.append(Streaming(randomStreaming(1))) bs.communicator.service.postStreamings(streamingList) searchList = ObjectList() searchList.append(Search(randomSearch(1))) bs.communicator.service.postSearches(searchList) bs.start() # Create barrier for client cBarrier = Barrier(2) # Create client thread.start_new_thread(oneThread, (cBarrier, fakeClient, bs.getHostName(), 13001,)) cBarrier.wait() time.sleep(5) # Stop server stopEvent.set() # Wait for server sBarrier.wait() time.sleep(5) # Get data and compare numberTweets = len(bs.globalBuffer.localBuffer.tweetList.list) numberUsers = len(bs.globalBuffer.localBuffer.userList.list) numberTweetStreaming = len(bs.globalBuffer.localBuffer.tweetStreamingList.list) numberTweetSearch = len(bs.globalBuffer.localBuffer.tweetSearchList.list) self.assertEqual(numberTweets, 0) self.assertEqual(numberUsers, 0) self.assertEqual(numberTweetStreaming
wpjesus/codematch
ietf/community/management/commands/update_doc_change_dates.py
Python
bsd-3-clause
1,440
0.002083
import sys from django.core.management.base import BaseCommand from ietf.community.constants import SIGNIFICANT_STATES from ietf.community.models import DocumentChangeDates from ietf.doc.models import Document class Command(BaseCommand): help = (u"Update drafts in community lists by reviewing their rules") def handle(self, *args, **options): documents = Document.objects.filter(type='draft') index = 1 total = documents.count() for doc in documents.iterator(): (changes, created) = DocumentChangeDates.objects.get_or_create(document=doc) new_version = doc.latest_event(type='new_revision') n
ormal_change = doc.latest_event() significant_change = None for event in doc.docevent_set.filter(type='changed_document'): for state in SIGNIFICANT_STATES: if ('<b>%s</b>' % state) in event.desc: significant_change = event break changes.new_version_date = new_version and ne
w_version.time.date() changes.normal_change_date = normal_change and normal_change.time.date() changes.significant_change_date = significant_change and significant_change.time.date() changes.save() sys.stdout.write('Document %s/%s\r' % (index, total)) sys.stdout.flush() index += 1 print
okfn/owslib
owslib/wfs.py
Python
bsd-3-clause
930
0.008602
# -*- coding: ISO-8859-15 -*- # ============================================================================= # Copyright (c) 2004, 2006 Sean C. Gillies # Copyright (c) 2009 STFC <http://www.stfc.ac.uk> # # Authors : # Dominic Lowe <dominic.lowe@stfc.ac.uk> # # Contact email: dominic.lowe@stfc.ac.uk # ============================================================================= """ Web Feature Server (WFS) methods and metadata. Factory function. """ from feature import wfs100, wfs200 def WebFeatureService(url, version='1.0.0', xml=None): ''' wfs factory function, returns a version
specific WebFeatureService object ''' if version in ['1.0', '1.0.0']: return wfs100.WebFeatureService_1_0_0.__new__(wfs100.Web
FeatureService_1_0_0, url, version, xml) elif version in ['2.0', '2.0.0']: return wfs200.WebFeatureService_2_0_0.__new__(wfs200.WebFeatureService_2_0_0, url, version, xml)
alexeiramone/django-default-template
manage.py
Python
mit
348
0
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys if __name__ == "__main__": settings_name = "settings.local" if os.na
me == 'nt' else "settings
.remote" os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_name) from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
MonicaHsu/truvaluation
venv/lib/python2.7/site-packages/pymysql/tests/test_basic.py
Python
mit
12,154
0.003291
import pymysql.cursors from pymysql.tests import base from pymysql import util from pymysql.err import ProgrammingError import time import datetime __all__ = ["TestConversion", "TestCursor", "TestBulkInserts"] class TestConversion(base.PyMySQLTestCase): def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = conn.cursor() c.execute("create table test_datatypes (b bit, i int, l bigint, f real, s varchar(32), u varchar(32), bb blob, d date, dt datetime, ts timestamp, td time, t time, st datetime)") try: # insert values v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988,2,2), datetime.datetime.now(), datetime.timedelta(5,6), datetime.time(16,32), time.localtime()) c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = c.fetchone() self.assertEqual(util.int2byte(1), r[0]) self.assertEqual(v[1:8], r[1:8]) # mysql throws away microseconds so we need to check datetimes # specially. additionally times are turned into timedeltas. self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) self.assertEqual(v[9], r[9]) # just timedeltas self.assertEqual(datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) c.execute("delete from test_datatypes") # check nulls c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12) c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = c.fetchone() self.assertEqual(tuple([None] * 12), r) c.execute("delete from test_datatypes")
# check sequence type c.execute("insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)") c.execute("select l from test_datatypes where i in %s order by i", ((2,6),)) r = c.fetchall() self.assertEqual(((4,),(8,)), r) finally: c.execute("drop table test_datatypes") def test_dict(sel
f): """ test dict escaping """ conn = self.connections[0] c = conn.cursor() c.execute("create table test_dict (a integer, b integer, c integer)") try: c.execute("insert into test_dict (a,b,c) values (%(a)s, %(b)s, %(c)s)", {"a":1,"b":2,"c":3}) c.execute("select a,b,c from test_dict") self.assertEqual((1,2,3), c.fetchone()) finally: c.execute("drop table test_dict") def test_string(self): conn = self.connections[0] c = conn.cursor() c.execute("create table test_dict (a text)") test_value = "I am a test string" try: c.execute("insert into test_dict (a) values (%s)", test_value) c.execute("select a from test_dict") self.assertEqual((test_value,), c.fetchone()) finally: c.execute("drop table test_dict") def test_integer(self): conn = self.connections[0] c = conn.cursor() c.execute("create table test_dict (a integer)") test_value = 12345 try: c.execute("insert into test_dict (a) values (%s)", test_value) c.execute("select a from test_dict") self.assertEqual((test_value,), c.fetchone()) finally: c.execute("drop table test_dict") def test_big_blob(self): """ test tons of data """ conn = self.connections[0] c = conn.cursor() c.execute("create table test_big_blob (b blob)") try: data = "pymysql" * 1024 c.execute("insert into test_big_blob (b) values (%s)", (data,)) c.execute("select b from test_big_blob") self.assertEqual(data.encode(conn.charset), c.fetchone()[0]) finally: c.execute("drop table test_big_blob") def test_untyped(self): """ test conversion of null, empty string """ conn = self.connections[0] c = conn.cursor() c.execute("select null,''") self.assertEqual((None,u''), c.fetchone()) c.execute("select '',null") self.assertEqual((u'',None), c.fetchone()) def test_timedelta(self): """ test timedelta conversion """ conn = self.connections[0] c = conn.cursor() c.execute("select time('12:30'), time('23:12:59'), time('23:12:59.05100')") self.assertEqual((datetime.timedelta(0, 45000), datetime.timedelta(0, 83579), datetime.timedelta(0, 83579, 51000)), c.fetchone()) def test_datetime(self): """ test datetime conversion """ conn = self.connections[0] c = conn.cursor() dt = datetime.datetime(2013,11,12,9,9,9,123450) try: c.execute("create table test_datetime (id int, ts datetime(6))") c.execute("insert into test_datetime values (1,'2013-11-12 09:09:09.12345')") c.execute("select ts from test_datetime") self.assertEqual((dt,),c.fetchone()) except ProgrammingError: # User is running a version of MySQL that doesn't support msecs within datetime pass finally: c.execute("drop table if exists test_datetime") class TestCursor(base.PyMySQLTestCase): # this test case does not work quite right yet, however, # we substitute in None for the erroneous field which is # compatible with the DB-API 2.0 spec and has not broken # any unit tests for anything we've tried. #def test_description(self): # """ test description attribute """ # # result is from MySQLdb module # r = (('Host', 254, 11, 60, 60, 0, 0), # ('User', 254, 16, 16, 16, 0, 0), # ('Password', 254, 41, 41, 41, 0, 0), # ('Select_priv', 254, 1, 1, 1, 0, 0), # ('Insert_priv', 254, 1, 1, 1, 0, 0), # ('Update_priv', 254, 1, 1, 1, 0, 0), # ('Delete_priv', 254, 1, 1, 1, 0, 0), # ('Create_priv', 254, 1, 1, 1, 0, 0), # ('Drop_priv', 254, 1, 1, 1, 0, 0), # ('Reload_priv', 254, 1, 1, 1, 0, 0), # ('Shutdown_priv', 254, 1, 1, 1, 0, 0), # ('Process_priv', 254, 1, 1, 1, 0, 0), # ('File_priv', 254, 1, 1, 1, 0, 0), # ('Grant_priv', 254, 1, 1, 1, 0, 0), # ('References_priv', 254, 1, 1, 1, 0, 0), # ('Index_priv', 254, 1, 1, 1, 0, 0), # ('Alter_priv', 254, 1, 1, 1, 0, 0), # ('Show_db_priv', 254, 1, 1, 1, 0, 0), # ('Super_priv', 254, 1, 1, 1, 0, 0), # ('Create_tmp_table_priv', 254, 1, 1, 1, 0, 0), # ('Lock_tables_priv', 254, 1, 1, 1, 0, 0), # ('Execute_priv', 254, 1, 1, 1, 0, 0), # ('Repl_slave_priv', 254, 1, 1, 1, 0, 0), # ('Repl_client_priv', 254, 1, 1, 1, 0, 0), # ('Create_view_priv', 254, 1, 1, 1, 0, 0), # ('Show_view_priv', 254, 1, 1, 1, 0, 0), # ('Create_routine_priv', 254, 1, 1, 1, 0, 0), # ('Alter_routine_priv', 254, 1, 1, 1, 0, 0), # ('Create_user_priv', 254, 1, 1, 1, 0, 0), # ('Event_priv', 254, 1, 1, 1, 0, 0), # ('Trigger_priv', 254, 1, 1, 1, 0, 0), # ('ssl_type', 254, 0, 9, 9, 0, 0), # ('ssl_cipher', 252, 0, 65535, 65535, 0, 0), # ('x509_issuer', 252, 0, 65535, 65535, 0, 0), # ('x509_subject', 252, 0, 65535, 65535, 0, 0), # ('max_questions', 3, 1, 11, 11, 0, 0), # ('max_updates', 3, 1, 11, 11, 0, 0), # ('max_connections', 3, 1, 11, 11, 0, 0), # ('max_user_connections', 3, 1, 11, 11, 0