repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
EmreAtes/spack | var/spack/repos/builtin/packages/r-mda/package.py | Python | lgpl-2.1 | 1,727 | 0.000579 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
fr | om spack import *
class RMda(RPackage):
"""Mixture and flexible discriminant analysis, multivariate adaptive
regression splines (MARS), BRUTO."""
homepage = "https://cran.r-project.org/package=mda"
url = "https://cran.r-project.org/src/contrib/mda_0.4-9.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/mda"
| version('0.4-9', '2ce1446c4a013e0ebcc1099a00269ad9')
depends_on('r@1.9.0:')
depends_on('r-class', type=('build', 'run'))
|
UManPychron/pychron | pychron/core/ui/image_editor.py | Python | apache-2.0 | 1,154 | 0 | # ========================== | =====================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by | applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pychron.core.ui.factory import toolkit_factory
# ============= standard library imports ========================
# ============= local library imports ==========================
ImageEditor = toolkit_factory('image_editor', 'ImageEditor')
# ============= EOF =============================================
|
bsmr-eve/Pyfa | eos/effects/shipbonusmissilelauncherheavyrofatc1.py | Python | gpl-3.0 | 300 | 0.006667 | # shipBonusMissileLauncherHeavyROFATC1
#
# Used by:
# Shi | p: Vangel
type = "passive"
def handler(fit, ship, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Missile Launcher Heavy",
"speed", ship.getModifiedItemAttr("shipBon | usATC1"))
|
procangroup/edx-platform | common/djangoapps/student/apps.py | Python | agpl-3.0 | 603 | 0.001658 | """
Configuration for the ``student`` Django application.
"""
from __future__ import absolute_import
from django.apps import AppConfig
from django.contrib.auth.signals import user_logged_in
class Stud | entConfig(AppConfig):
"""
Default configuration for the `` | student`` application.
"""
name = 'student'
def ready(self):
from django.contrib.auth.models import update_last_login as django_update_last_login
user_logged_in.disconnect(django_update_last_login)
from .signals.receivers import update_last_login
user_logged_in.connect(update_last_login)
|
pylada/pylada-light | src/pylada/vasp/incar/__init__.py | Python | gpl-3.0 | 10,395 | 0.002116 | ###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Subpackage defining vasp incar parameters. """
__docformat__ = "restructuredtext en"
__all__ = ["SpecialVaspParam", "ExtraElectron", "Algo", "Precision", "Ediff",
"Encut", "FFTGrid", "Restart", "UParams", "IniWave", 'Ediffg', "EncutGW",
"Incar", "Magmom", 'Npar', 'Boolean', 'Integer', 'Choices', 'PrecFock',
"System", 'PartialRestart', 'Relaxation', 'Smearing', 'Lsorbit']
from ._params import SpecialVaspParam, ExtraElectron, Algo, Precision, Ediff,\
Encut, FFTGrid, PartialRestart, Restart, UParams, IniWave, Magmom,\
Npar, Boolean, Integer, PrecFock, NonScf, Ediffg, Choices, \
EncutGW, System, Relaxation, Smearing, Lsorbit
from ...misc import add_setter
class Incar(object):
""" Base class containing vasp input parameters.
The following assumes you know how to write an INCAR. Although you won't
need to anymore. This class separates vasp parameters from methods to
launch and control vasp.
There are two kinds of parameters:
- Normal parameters which will simply print "NAME = VALUE" to the incar
- Special parameters which enhance the default behavior of vasp
The special parameters achieve a variety of design-goals. For instance,
when passed a previous VASP run, :py:attr:`restart` will set ISTART_ and
ICHARG_ accordingly, as well as copy the relevant files. If it is a
:py:class:`Restart` object, it will also copy the CONTCAR file from the
previous run (default behavior). If it as :py:class:`PartialRestart`,
then the CONTCAR is not copied, which allows restarting from the charge
with a slightly different structure than it was generated for.
"""
def __init__(self):
super(Incar, self).__init__()
# first, actually sets these two variables by hand, since they are used in __setattr__.
super(Incar, self).__setattr__("params", {})
super(Incar, self).__setattr__("special", {})
self.add_param = "addgrid", None
self.add_param = "ispin", 1
self.add_param = "istart", None
self.add_param = "isym", None
self.add_param = "lmaxfockae", None
self.add_param = "lmaxmix", 4
self.add_param = "lvhar", False
self.add_param = "lorbit", None
self.add_param = "nbands", None
self.add_param = "nomega", None
self.add_param = "nupdown", None
self.add_param = "symprec", None
# objects derived from SpecialVaspParams will be recognized as such and can
# be added without further fuss.
self.extraelectron = ExtraElectron(0)
self.algo = Algo()
self.precision = Precision("accurate")
self.ediff = Ediff(1e-4)
self.ediffg = Ediffg(None)
self.encut = Encut(None)
self.encutgw = EncutGW(None)
self.fftgrid = FFTGrid(None)
self.restart = Restart(None)
self.U_verbosity = UParams("occupancy")
self.magmom = Magmom()
self.npar = Npar(None)
self.precfock = PrecFock(Non | e)
self.nonscf = NonScf(False)
self.system = System(True)
self.smearing = Smearing(None)
self.relaxation = Relaxation(None)
self.lsorbit = Lsorbit(None)
self.lwave = Boolean("lwave", False)
self.lcharg = Boolean("lcharg", True)
self.lvtot = Boolean("lvtot", Fal | se)
self.lrpa = Boolean("lrpa", None)
self.loptics = Boolean("loptics", None)
self.lpead = Boolean("lpead", None)
self.lplane = Boolean("lplane", None)
self.nelm = Integer("nelm", None)
self.nelmin = Integer("nelmin", None)
self.nelmdl = Integer("nelmdl", None)
def incar_lines(self, **kwargs):
""" List of incar lines. """
# gathers special parameters.
# Calls them first in case they change normal key/value pairs.
result, specials, comments = [], [], []
for key, value in self.special.items():
if value.value is None:
continue
line = value.incar_string(**kwargs)
# Then calls a second time in case they change each other.
for key, value in self.special.items():
if value.value is None:
continue
line = value.incar_string(**kwargs)
if line is None:
continue
line = line.rstrip().lstrip()
if line[-1] != '\n':
line += '\n'
if line[0] == '#':
comments.append(line)
continue
if '=' in line and line.find('=') < 18:
line = "{0: <{1}}".format(' ', 19 - line.find('=')) + line
specials.append(line)
# prints key/value pairs
for key, value in self.params.items():
if value is None:
continue
if isinstance(value, bool):
value = ".TRUE." if value else ".FALSE."
else:
try:
value = str(value)
except ValueError:
raise ValueError(
"Could not convert vasp parameter {0} to string: {1}.".format(key, value))
result.append("{0: >18s} = {1}\n".format(key.upper(), value))
# adds special parameter lines.
result.extend(specials)
result = sorted(result, key=lambda a: a.lstrip()[0])
result.extend(comments)
return result
@add_setter
def add_param(self, args):
""" Adds/sets a vasp parameter.
Consists of a key value pair.
>>> vasp.add_param = "ispin", 2
This will result in the INCAR as "ISPIN = 2". Once set, the value can be accessed directly:
>>> vasp.add_param = "ispin", 2
>>> vasp.ispin = 1
>>> print vasp.ispin # prints 1
"""
key, value = args
if isinstance(value, SpecialVaspParam):
if key in self.params:
del self.params[key] # one or other dictionary.
self.special[key] = value
else:
if key in self.special:
del self.special[key] # one or other dictionary.
self.params[key] = value
def __getattr__(self, name):
""" Gets a VASP parameter from standard and special dictionaries. """
if name in self.params:
return self.params[name]
elif name in self.special:
return self.special[name].value
raise AttributeError("Unknown parameter " + name)
def __setattr__(self, name, value):
""" Sets a VASP parameter to standard and special dictionaries. """
if isinstance(value, SpecialVaspParam):
if name in self.params:
del self.params[name]
self.special[name] = value
elif name in self.params:
self.params[name] = value
elif name in sel |
ZhizhongPan/LeetCode | AddTwoNumbers/Solution.py | Python | mit | 193 | 0.010363 | fr | om _DS import ListNode
class Solution:
def add_two_numbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
| |
amitt001/Analytics-App | API/rate/__init__.py | Python | mit | 19 | 0.052632 | impo | rt rate_op | inion |
Azure/azure-sdk-for-python | sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2019_09_01/operations/_operations.py | Python | mit | 4,671 | 0.004282 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License | .txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# ----- | ---------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databox.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationList"]
"""This method gets all the operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.OperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.DataBox/operations'} # type: ignore
|
gnarula/eden_deployment | modules/s3db/cap.py | Python | mit | 78,715 | 0.007305 | # -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_gis_location_xml_post_parse",
"cap_gis_location_xml_post_render",
]
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ["cap_alert",
"cap_alert_represent",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
]
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed") | ,
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
| "aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSqual |
fboers/jumegX | scripts/jumeg_tsv.py | Python | bsd-3-clause | 1,968 | 0.03252 | #!/usr/bin/env python
"""
JuMEG TSV Time Series Viewer
F | B 26.02.2015
last updae FB 26.02.2015
"""
import sys, getopt, os, os.path
import mne
#fout="/localdata/frank/data/MEG94T/mne/110058/MEG94T/121001_1331/1/110058_MEG94T_121001_1331_1_c,rfDC-float32,30sec-raw.fif'
#raw.save( fname,format='single',tmax=30.0)import wx
# jumeg_tsv.py --fname=110058_MEG94T_121001_1331_1_c,rfDC-raw.fif --path=/localdata/frank/data/MEG94T/mne/110058/MEG94T/121001_1331/1
# jumeg_ts | v.py --fname=110058_MEG94T_121001_1331_1_c,rfDC_30sec-raw.fif --path=/localdata/frank/data/MEG94T/mne/110058/MEG94T/121001_1331/1
#--- jumegs functions
from jumeg.tsvgl.utils.jumeg_tsv_utils import jumeg_tsv_utils_get_args
from jumeg.tsvgl.jumeg_tsv_gui import jumeg_tsv_gui
#from jumeg.tsv.jumeg_tsv_gui_orig import jumeg_tsv_gui
def run():
opt,parser = jumeg_tsv_utils_get_args()
if opt.debug:
opt.verbose = True
opt.fname= "205382_MEG94T_120904_1310_2_c,rfDC-raw.fif"
opt.path = os.environ['HOME'] + "/MEGBoers/data/exp/MEG94T/mne/205382/MEG94T/120904_1310/2/"
elif opt.dd:
opt.fname='200098_leda_test_10_raw.fif'
opt.verbose = True
elif opt.ddd:
opt.fname='200098_leda_test_60_raw.fif'
opt.verbose = True
if opt.verbose:
for k,v in vars(opt).iteritems():
if v:
print "---> " + k +" : "+ str( v )
else:
print "---> " + k + " : None"
print"\n"
jumeg_tsv_gui(fname=opt.fname,path=opt.path,verbose=opt.verbose,debug=opt.debug,experiment=opt.experiment,
duration=opt.duration,start=opt.start,n_channels=opt.n_channels,n_cols=opt.n_cols,bads=opt.bads)
is_main = (__name__ == '__main__')
if is_main:
run()
#p='/localdata/frank/data/MEG94T/mne/110058/MEG94T/121001_1331/1'
#f='110058_MEG94T_121001_1331_1_c,rfDC_30sec-raw.fif'
#ffif=p+'/'+f
#raw=mne.io.Raw(ffif,preload=True)
|
parinporecha/backend_gtgonline | GTG/gtk/browser/custominfobar.py | Python | gpl-3.0 | 8,564 | 0 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2012 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any | later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------ | -----
import gtk
import threading
from GTG import _
from GTG.backends.backendsignals import BackendSignals
from GTG.tools.networkmanager import is_connection_up
class CustomInfoBar(gtk.InfoBar):
'''
A gtk.InfoBar specialized for displaying errors and requests for
interaction coming from the backends
'''
AUTHENTICATION_MESSAGE = _("The <b>%s</b> synchronization service cannot "
"login with the supplied authentication data "
"and has been disabled. To retry the login, "
"re-enable the service.")
NETWORK_MESSAGE = _("Due to a network problem, I cannot contact "
"the <b>%s</b> synchronization service.")
DBUS_MESSAGE = _("Cannot connect to DBus, I've disabled "
"the <b>%s</b> synchronization service.")
def __init__(self, req, browser, vmanager, backend_id):
'''
Constructor, Prepares the infobar.
@param req: a Requester object
@param browser: a TaskBrowser object
@param vmanager: a ViewManager object
@param backend_id: the id of the backend linked to the infobar
'''
super(CustomInfoBar, self).__init__()
self.req = req
self.browser = browser
self.vmanager = vmanager
self.backend_id = backend_id
self.backend = self.req.get_backend(backend_id)
def get_backend_id(self):
'''
Getter function to return the id of the backend for which this
gtk.InfoBar was created
'''
return self.backend_id
def _populate(self):
'''Setting up gtk widgets'''
content_hbox = self.get_content_area()
content_hbox.set_homogeneous(False)
self.label = gtk.Label()
self.label.set_line_wrap(True)
self.label.set_alignment(0.5, 0.5)
self.label.set_justify(gtk.JUSTIFY_FILL)
content_hbox.pack_start(self.label, True, True)
def _on_error_response(self, widget, event):
'''
Signal callback executed when the user acknowledges the error displayed
in the infobar
@param widget: not used, here for compatibility with signals callbacks
@param event: the code of the gtk response
'''
self.hide()
if event == gtk.RESPONSE_ACCEPT:
self.vmanager.configure_backend(backend_id=self.backend_id)
def set_error_code(self, error_code):
'''
Sets this infobar to show an error to the user
@param error_code: the code of the error to show. Error codes are
listed in BackendSignals
'''
self._populate()
self.connect("response", self._on_error_response)
backend_name = self.backend.get_human_name()
if error_code == BackendSignals.ERRNO_AUTHENTICATION:
self.set_message_type(gtk.MESSAGE_ERROR)
self.label.set_markup(self.AUTHENTICATION_MESSAGE % backend_name)
self.add_button(_('Configure synchronization service'),
gtk.RESPONSE_ACCEPT)
self.add_button(_('Ignore'), gtk.RESPONSE_CLOSE)
elif error_code == BackendSignals.ERRNO_NETWORK:
if not is_connection_up():
return
self.set_message_type(gtk.MESSAGE_WARNING)
self.label.set_markup(self.NETWORK_MESSAGE % backend_name)
# FIXME: use gtk stock button instead
self.add_button(_('Ok'), gtk.RESPONSE_CLOSE)
elif error_code == BackendSignals.ERRNO_DBUS:
self.set_message_type(gtk.MESSAGE_WARNING)
self.label.set_markup(self.DBUS_MESSAGE % backend_name)
self.add_button(_('Ok'), gtk.RESPONSE_CLOSE)
self.show_all()
def set_interaction_request(self, description, interaction_type, callback):
'''
Sets this infobar to request an interaction from the user
@param description: a string describing the interaction needed
@param interaction_type: a string describing the type of interaction
(yes/no, only confirm, ok/cancel...)
@param callback: the function to call when the user provides the
feedback
'''
self._populate()
self.callback = callback
self.set_message_type(gtk.MESSAGE_INFO)
self.label.set_markup(description)
self.connect("response", self._on_interaction_response)
self.interaction_type = interaction_type
if interaction_type == BackendSignals().INTERACTION_CONFIRM:
self.add_button(_('Confirm'), gtk.RESPONSE_ACCEPT)
elif interaction_type == BackendSignals().INTERACTION_TEXT:
self.add_button(_('Continue'), gtk.RESPONSE_ACCEPT)
self.show_all()
def _on_interaction_response(self, widget, event):
'''
Signal callback executed when the user gives the feedback for a
requested interaction
@param widget: not used, here for compatibility with signals callbacks
@param event: the code of the gtk response
'''
if event == gtk.RESPONSE_ACCEPT:
if self.interaction_type == BackendSignals().INTERACTION_TEXT:
self._prepare_textual_interaction()
print "done"
elif self.interaction_type == BackendSignals().INTERACTION_CONFIRM:
self.hide()
threading.Thread(target=getattr(self.backend,
self.callback)).start()
def _prepare_textual_interaction(self):
'''
Helper function. gtk calls to populate the infobar in the case of
interaction request
'''
title, description\
= getattr(self.backend,
self.callback)("get_ui_dialog_text")
self.dialog = gtk.Window() # type = gtk.WINDOW_POPUP)
self.dialog.set_title(title)
self.dialog.set_transient_for(self.browser.window)
self.dialog.set_destroy_with_parent(True)
self.dialog.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.dialog.set_modal(True)
# self.dialog.set_size_request(300,170)
vbox = gtk.VBox()
self.dialog.add(vbox)
description_label = gtk.Label()
description_label.set_justify(gtk.JUSTIFY_FILL)
description_label.set_line_wrap(True)
description_label.set_markup(description)
align = gtk.Alignment(0.5, 0.5, 1, 1)
align.set_padding(10, 0, 20, 20)
align.add(description_label)
vbox.pack_start(align)
self.text_box = gtk.Entry()
self.text_box.set_size_request(-1, 40)
align = gtk.Alignment(0.5, 0.5, 1, 1)
align.set_padding(20, 20, 20, 20)
align.add(self.text_box)
vbox.pack_start(align)
button = gtk.Button(stock=gtk.STOCK_OK)
button.connect("clicked", self._on_text_confirmed)
button.set_size_request(-1, 40)
vbox.pack_start(button, False)
self.dialog.show_all()
self.hide()
def _on_text_confirmed(self, widget):
'''
Signal callback, used when the |
fedora-infra/faitout | faitout/default_config.py | Python | gpl-3.0 | 2,128 | 0.00047 | #-*- coding: utf-8 -*-
"""
default_config - the default configuration allowing to run this project
quickly and easily from the sources.
(c) 2013 - Copyright Red Hat Inc.
Authors:
- Pierre-Yves Chibon <pingou@pingoured.fr>
Distributed under License GPLv3 or later
You can find a copy of this license on the website
http://www.gnu.org/licenses/gpl.html
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied | warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
# url to the database server:
DB_URL = 'sqlite:////var/tmp/faitout_dev.sqlite'
# url to the admin database:
ADMIN_DB_URL = 'sqlite:////var/tmp/faitout_dev_admin.sqlite'
# Default port of a postgresql server
DB_PORT = 5432
# Host of the postgresql server to return to in the db url
DB_HOST = '127.0.0.1'
# The cron job can be set with any frequency but faitout_cron
CRON_FREQUENCY = 30
# URL at which the application is made available
URL = 'http://127.0.0.1:5000'
# The maximum number of simultaneous connection allowed at the same time
MAX_CONNECTIONS = 3
# List of IPs allowed to get as many connections as they want
# This is useful if for example you use faitout in combination with a jenkins
# instance or some other sort of CI system.
IP_UNLIMITED = ['127.0.0.1']
# List of IPs not allowed to get any connections
IP_BLOCKED = []
# Restrict the use of faitout to those IPs only
# This is useful either for testing or if you want to restrict your faitout
# instance to a group of people and not make it world accessible.
IP_ONLY = []
|
chromium/chromium | tools/traffic_annotation/scripts/generator_utils_tests.py | Python | bsd-3-clause | 5,005 | 0.002997 | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for generator_utils.py
"""
import os
import unittest
import generator_utils
# Absolute path to chrome/src.
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "../../.."))
TESTS_DIR = os.path.join(SCRIPT_DIR, "test_data")
class ParserTest(unittest.TestCase):
TSV_CONTENTS = [[
u"unique_id_A", u"", u"sender_A", u"description_A", u"trigger_A",
u"data_A", u"destination_A", u"cookies_allowed_A", u"cookies_store_A",
u"settings_A", u"chrome_policy_A", u"", u"source_file_A",
u"id_hash_code_A", u"content_hash_code_A"],
[
u"unique_id_B", u"", u"sender_B", u"description_B", u"trigger_B",
u"data_B", u"destination_B", u"cookies_allowed_B", u"cookies_store_B",
u"settings_B", u"chrome_policy_B", u"", u"source_file_B",
u"id_hash_code_B", u"content_hash_code_B"],
[
u"unique_id_C", u"", u"sender_C", u"description_C", u"trigger_C",
u"data_C", u"destination_C", u"cookies_allowed_C", u"cookies_store_C",
u"settings_C", u"chrome_policy_C", u"", u"source_file_C",
u"id_hash_code_C", u"content_hash_code_C"]
]
ANNOTATIONS_MAPPING = {
"unique_id_A":
generator_utils.TrafficAnnotation(
**{
"unique_id": "unique_id_A",
"description": "description_A",
"trigger": "trigger_A",
"data": "data_A",
"settings": "settings_A",
"policy": "chrome_policy_A"
}),
"unique_id_B":
generator_utils.TrafficAnnotation(
**{
"unique_id": "unique_id_B",
"description": "description_B",
"trigger": "trigger_B",
"data": "data_B",
"settings": "settings_B",
"policy": "chrome_policy_B"
}),
"unique_id_C":
generator_utils.TrafficAnnotation(
**{
"unique_id": "unique_id_C",
"description": "description_C",
"trigger": "trigger_C",
"data": "data_C",
"settings": "settings_C",
"policy": "chrome_policy_C"
})
}
PLACEHOLDERS = [
{"type": generator_utils.Placeholder.GROUP, "name": "Group A"},
{"type": generator_utils.Placeholder.SENDER, "name": "Sender 1"},
{
"type": generator_utils.Placeholder.ANNOTATION,
"traffic_annotation": ANNOTATIONS_MAPPING["unique_id_A"]},
{"type": generator_utils.Placeholder.SENDER, "name": "Sender 2"},
{
"type": generator_utils.Placeholder.ANNOTATION,
"traffic_annotation": ANNOTATIONS_MAPPING["unique_id_B"]},
{"type": generator_utils.Placeholder.GROUP, "name": "Group C"},
{"type": generator_utils.Placeholder.SENDER, "name": "Sender 3"},
{
"type": generator_utils.Placeholder.ANNOTATION,
"traffic_annotation": ANNOTATIONS_MAPPING["unique_id_C"]}
]
# Document formatted according to fake_grouping.xml
DOC_JSON = generator_utils.extract_body(target="all",
json_file_path=os.path.join(
TESTS_DIR, "fake_doc.json"))
def test_load_tsv_file(self):
self.assertEqual(self.TSV_CONTENTS, generator_utils.load_tsv_file(
os.path.join(SRC_DIR,
"tools/traffic_annotation/scripts/test_data/fake_annotations.tsv"),
False))
def test_map_annotations(self):
self. | assertEqual(self.ANNOTATIONS_MAPPING,
generator_utils.map_annotation | s(self.TSV_CONTENTS))
def test_xml_parser_build_placeholders(self):
xml_parser = generator_utils.XMLParser(
os.path.join(TESTS_DIR, "fake_grouping.xml"), self.ANNOTATIONS_MAPPING)
self.assertEqual(self.PLACEHOLDERS, xml_parser.build_placeholders())
def test_find_first_index(self):
first_index = generator_utils.find_first_index(self.DOC_JSON)
self.assertEqual(1822, first_index)
def test_find_last_index(self):
last_index = generator_utils.find_last_index(self.DOC_JSON)
self.assertEqual(2066, last_index)
def test_find_chrome_browser_version(self):
current_version = generator_utils.find_chrome_browser_version(self.DOC_JSON)
self.assertEqual("86.0.4187.0", current_version)
def test_find_bold_ranges(self):
expected_bold_ranges = [(1843, 1855), (1859, 1867), (1871, 1876),
(1880, 1889), (1893, 1900), (1918, 1930),
(1934, 1942), (1968, 1975), (1946, 1951),
(1955, 1964), (2001, 2013), (2017, 2025),
(2029, 2034), (2038, 2047), (2051, 2058)]
bold_ranges = generator_utils.find_bold_ranges(self.DOC_JSON)
self.assertItemsEqual(expected_bold_ranges, bold_ranges)
if __name__ == "__main__":
unittest.main()
|
rupor-github/fb2mobi | ui/gui_config.py | Python | mit | 7,187 | 0.004176 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import etree
from lxml.builder import ElementMaker
import os
import codecs
class GuiConfig():
def __init__(self, config_file):
self.config_file = os.path.normpath(os.path.abspath(config_file))
self.converterConfig = {}
self.currentProfile = None
self.currentFormat = None
self.outputFolder = None
self.lastUsedTargetPath = None
self.hyphens = None
self.kindlePath = None
self.kindleSyncCovers = False
self.kindleDocsSubfolder = None
self.lastUsedPath = None
self.GoogleMail = None
self.GooglePassword = None
self.KindleMail = None
self.embedFontFamily = None
self.fontDb = None
self.writeLog = True
self.logLevel = 'DEBUG'
self.clearLogAfterExit = False
self.bookInfoVisible = True
self.bookInfoSplitterState = None
self.columns = {}
self.authorPattern = '#l, #f'
self.filenamePattern = '#author. #title{ (пер. #translator)}'
self.renameDestDir = ''
self.deleteAfterRename = False
self.geometry = {}
self.geometry['x'] = None
self.geometry['y'] = None
self.geometry['width'] = None
self.geometry['height'] = None
if not os.path.exists(self.config_file):
self.write()
self.load()
def load(self):
config = etree.parse(self.config_file)
for e in config.getroot():
if e.tag == 'currentProfile':
self.currentProfile = e.text
eli | f e.tag == 'currentFormat':
self.currentFormat = e.text
elif e.tag == 'outputFolder':
self.outputFolder = e.text
elif e.tag == 'lastUsedTargetPath':
self.lastUsedTargetPath = e.text
elif e.tag == 'embedFontFamily':
self.embedFontFamily = e.text
elif e.tag == 'hyphens':
| self.hyphens = e.text
elif e.tag == 'lastUsedPath':
self.lastUsedPath = e.text
elif e.tag == 'kindlePath':
self.kindlePath = e.text
elif e.tag == 'kindleDocsSubfolder':
self.kindleDocsSubfolder = e.text
elif e.tag == 'writeLog':
self.writeLog = e.text.lower() == 'true'
elif e.tag == 'bookInfoVisible':
self.bookInfoVisible = e.text.lower() == 'true'
elif e.tag == 'bookInfoSplitterState':
self.bookInfoSplitterState = e.text
elif e.tag == 'clearLogAfterExit':
self.clearLogAfterExit = e.text.lower() == 'true'
elif e.tag == 'GoogleMail':
self.GoogleMail = e.text
elif e.tag == 'GooglePassword':
self.GooglePassword = e.text
elif e.tag == 'KindleMail':
self.KindleMail = e.text
elif e.tag == 'logLevel':
self.logLevel = e.text
elif e.tag == 'kindleSyncCovers':
self.kindleSyncCovers = e.text.lower() == 'true'
elif e.tag == 'columns':
for c in e:
if c.tag == 'column':
self.columns[c.attrib['number']] = int(c.text) if c.text else 100
elif e.tag == 'geometry':
for g in e:
self.geometry[g.tag] = int(g.text) if g.text else None
elif e.tag == 'authorPattern':
self.authorPattern = e.text
elif e.tag == 'filenamePattern':
self.filenamePattern = e.text
elif e.tag == 'renameDestDir':
self.renameDestDir = e.text
elif e.tag == 'deleteAfterRename':
self.deleteAfterRename = e.text.lower() == 'true'
def write(self):
def number(v):
return {'number': str(v)}
E = ElementMaker()
config = E.settings(
E.currentProfile(self.currentProfile) if self.currentProfile else E.currentProfile(),
E.currentFormat(self.currentFormat) if self.currentFormat else E.currentFormat(),
E.embedFontFamily(self.embedFontFamily) if self.embedFontFamily else E.embedFontFamily(),
E.hyphens(self.hyphens) if self.hyphens else E.hyphens(),
E.outputFolder(self.outputFolder) if self.outputFolder else E.outputFolder(),
E.lastUsedTargetPath(self.lastUsedTargetPath) if self.lastUsedTargetPath else E.lastUsedTargetPath(),
E.lastUsedPath(self.lastUsedPath) if self.lastUsedPath else E.lastUsedPath(),
E.writeLog(str(self.writeLog)),
E.clearLogAfterExit(str(self.clearLogAfterExit)),
E.logLevel(self.logLevel) if self.logLevel else E.logLevel(),
E.kindlePath(self.kindlePath) if self.kindlePath else E.kindlePath(),
E.kindleSyncCovers(str(self.kindleSyncCovers)),
E.kindleDocsSubfolder(self.kindleDocsSubfolder) if self.kindleDocsSubfolder else E.kindleDocsSubfolder(),
E.GoogleMail(self.GoogleMail) if self.GoogleMail else E.GoogleMail(),
E.GooglePassword(self.GooglePassword) if self.GooglePassword else E.GooglePassword(),
E.KindleMail(self.KindleMail) if self.KindleMail else E.KindleMail(),
E.bookInfoVisible(str(self.bookInfoVisible)),
E.bookInfoSplitterState(self.bookInfoSplitterState) if self.bookInfoSplitterState else E.bookInfoSplitterState(),
E.authorPattern(self.authorPattern) if self.authorPattern else E.authorPattern(),
E.filenamePattern(self.filenamePattern) if self.filenamePattern else E.filenamePattern(),
E.renameDestDir(self.renameDestDir) if self.renameDestDir else E.renameDestDir(),
E.deleteAfterRename(str(self.deleteAfterRename)),
E.columns(
*[E.column(str(self.columns[col]), number(col)) for col in self.columns.keys()]
),
E.geometry(
E.x(str(self.geometry['x'])) if self.geometry['x'] else E.x(),
E.y(str(self.geometry['y'])) if self.geometry['y'] else E.y(),
E.width(str(self.geometry['width'])) if self.geometry['width'] else E.width(),
E.height(str(self.geometry['height'])) if self.geometry['height'] else E.height()
)
)
config_dir = os.path.dirname(self.config_file)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
with codecs.open(self.config_file, "wb") as f:
f.write(etree.tostring(config, encoding="utf-8", pretty_print=True, xml_declaration=True))
f.close()
if __name__ == '__main__':
gui_config = GuiConfig('fb2mobi-gui.config')
gui_config.write()
|
saxix/django-concurrency | tests/test_reversion.py | Python | mit | 961 | 0.002081 | import pytest
from demo.models import ReversionConcurrentModel
from django.urls import reverse
from reversion import add_to_revision, revisions, set_comment
from reversion.models import Version
@pytest.mark.django_db
@pytest.mark.functional
def test_recover(admin_user, client):
concurrentmodel = ReversionConcurrentModel.objects.create(username='USERNAME-OLD')
with revisions.create_revision():
set_comment("Initial revision")
add_to_revision(concurrentmodel)
ver = Version.objects.get_for_model(concurrentmodel).first()
url = reverse('admin:demo_reversionconcurrentmodel_recover',
args=[concurrentmodel.pk])
res = client.get( | url, user | =admin_user.username)
res.form.submit().follow()
concurrentmodel2 = ReversionConcurrentModel.objects.get(pk=concurrentmodel.pk)
assert concurrentmodel2.username == ver.field_dict['username']
assert concurrentmodel2.version > ver.field_dict['version']
|
odoo-romania/extra | account_statement_operation_multicompany/__openerp__.py | Python | agpl-3.0 | 1,519 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account Statement Operation multi-company module for Odoo
# Copyright (C) 2015 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more det | ails.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Statement Operation Multi-company',
'version': '0.2',
'category': 'Accounting & Finance',
'license': 'AGPL-3', |
'summary': 'Fix multi-company issue on Statement Operation Templates',
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['account'],
'data': [
'account_view.xml',
'security/rule.xml',
],
'installable': True,
}
|
TheTimmy/spack | var/spack/repos/builtin/packages/simul/package.py | Python | lgpl-2.1 | 2,009 | 0.000498 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGP | L.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT A | NY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Simul(Package):
"""simul is an MPI coordinated test of parallel
filesystem system calls and library functions. """
homepage = "https://github.com/LLNL/simul"
url = "https://github.com/LLNL/simul/archive/1.16.tar.gz"
version('1.16', 'd616c1046a170c1e1b7956c402d23a95')
version('1.15', 'a5744673c094a87c05c6f0799d1f496f')
version('1.14', 'f8c14f0bac15741e2af354e3f9a0e30f')
version('1.13', '8a80a62d569557715d6c9c326e39a8ef')
depends_on('mpi')
def install(self, spec, prefix):
filter_file('mpicc', '$(MPICC)', 'Makefile', string=True)
filter_file('inline void', 'void', 'simul.c', string=True)
make('simul')
mkdirp(prefix.bin)
install('simul', prefix.bin)
|
resmo/ansible | lib/ansible/modules/cloud/google/gcp_kms_key_ring.py | Python | gpl-3.0 | 7,192 | 0.003059 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_kms_key_ring
description:
- A `KeyRing` is a toplevel logical grouping of `CryptoKeys`.
short_description: Creates a GCP KeyRing
version_added: 2.9
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- The resource name for the KeyRing.
required: true
type: str
location:
description:
- The location for the KeyRing.
- A full list of valid locations can be found by running `gcloud kms locations
list`.
required: true
type: str
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings)'
- 'Creating a key ring: U(https://cloud.google.com/kms/docs/creating-keys#create_a_key_ring)'
'''
EXAMPLES = '''
- name: create a key ring
gcp_kms_key_ring:
name: test_object
location: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The resource name for the KeyRing.
returned: success
type: str
creationTime:
description:
- The time that this resource was created on the server.
- This is in RFC3339 text format.
returned: success
type: str
| location:
description:
- The location for the KeyRing.
- A full list of valid locations can be found by running `gcloud kms locations list`.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import | json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
location=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloudkms']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, create_link(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'kms')
return return_if_object(module, auth.post(link, resource_to_request(module)))
def update(module, link):
delete(module, self_link(module))
create(module, create_link(module))
def delete(module, link):
module.fail_json(msg="KeyRings cannot be deleted")
def resource_to_request(module):
request = {u'name': module.params.get('name')}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'kms')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings/{name}".format(**module.params)
def collection(module):
return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings".format(**module.params)
def create_link(module):
return "https://cloudkms.googleapis.com/v1/projects/{project}/locations/{location}/keyRings?keyRingId={name}".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {u'name': response.get(u'name'), u'creationTime': response.get(u'creationTime')}
def decode_response(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
return response
if __name__ == '__main__':
main()
|
pu6ki/elsyser | talks/models.py | Python | mit | 891 | 0.002245 | from django.db import models
from django.contrib.auth.models import User
from vote.models import VoteModel
cla | ss Meetup(models.Model):
date = models.DateTimeField()
description = models.CharField(max_length=10000, blank=True, null=True)
def __str__(se | lf):
return '{} ({})'.format(self.__class__.__name__, self.date)
class Meta:
ordering = ['date']
class Talk(VoteModel):
meetup = models.ForeignKey(Meetup, related_name='talks', null=True, on_delete=models.CASCADE)
author = models.ForeignKey(User, related_name='talks', on_delete=models.CASCADE)
topic = models.CharField(max_length=500)
description = models.CharField(max_length=10000)
video_url = models.URLField(blank=True, null=True)
def __str__(self):
return '{} - {}'.format(self.__class__.__name__, self.topic)
class Meta:
ordering = ['-vote_score']
|
MAKOMO/artisan | src/comm/CommFujiExamplePython2.6.py | Python | gpl-3.0 | 5,757 | 0.012159 | #!/usr/bin/env python
#####################################################################################
# COMM TEST PROGRAM FOR FUJI PXG4 or PXR3 with RS485 option and modbus protocol
# This program shows how to read Temp from a Fuji PID
# ##################################################################################################
# REQUIREMENTS
# python 2.6 : http://www.python.org/ftp/python/2.6.2/python-2.6.2.msi
# pyserial for python 2.6: http://sourceforge.net/projects/pyserial/files/pyserial/2.5/pyserial-2.5-rc1.win32.exe/download
# javacomm: http://www.xpl4java.org/xPL4Java/javacomm.html
# Java JDK or JRE: http://java.sun.com/javase/downloads/index.jsp
import serial
import time
import binascii
def main():
#Read temperature in a forever loop to check operation
delay = 2 # set time between each reading in seconds
deci = 10. # if decimal position in PID is 1, then divide by 10 the temperature
start = time.time() # time
print "Press <CTRL 'C'> to stop"
while True:
t = temperature()/deci # read present temperature
print str(t) # output temperature
time.sleep(delay) # wait delay before next reading in while loop
#TEMPERATURE function reads the temperature and compares it with the CRC16 received. Returns an int
def temperature():
#PART A: Here we send the command to read temp and read the receive data in to r.
serPID = None
try:
# choose either a command for unit #1 or or a command for unit #2
#command for unit id = 1
#command = "\x01\x04\x03\xE8\x00\x01\xB1\xBA"
#command for unit id = 2
| command = "\x02\x04\x03\xE8\x00\x01\xB1\x89"
serPID = serial.Serial("COM12", baudrate=9600, bytesize=8, parity='O', stopbits=1, timeout=1)
serPID.write(command)
|
#example of data received r = "\x01\x04\x02\x03\x46\x38\x32" (7 bytes)
r = serPID.read(7)
serPID.close()
if len(r) != 7:
print "bad RX data"
return 0
except serial.SerialException, e:
print "Serial port error" + str(e)
finally:
if serPID:
serPID.close()
#PART B: Here we extract the value of the CRC16 sent by finding the sum of the last two bytes (bin to hex; then hex to int)
crcRx = int(binascii.hexlify(r[-1]+r[-2]),16)
#PART C: Here we calculate our own value of the CRC16 from the data we received (r minus last two bytes)
crcCal1 = fujiCrc16(r[:-2])
#PART D: We compare PART B with PART C. Return temperature if OK else print crc16 Error and return 0
if crcCal1 == crcRx: #crcRx:
# convert data part binary string to hex representation
s1 = binascii.hexlify(r[3] + r[4])
#conversion from hex to dec
return int(s1,16)
else:
print "bad crc16 sum"
return 0
#FUJICRC16 function calculates the CRC16 of the data. It expects a binary string as input and returns and int
def fujiCrc16(string):
crc16tab = (0x0000,
0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880,
0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0,
0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100,
0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740,
0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80,
0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0,
0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200,
0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80,
0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0,
0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700,
0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140,
0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80,
0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0,
0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400,
0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040)
cr=0xFFFF
for j in string:
tmp = cr ^(ord(j))
cr =(cr >> 8)^crc16tab[(tmp & 0xff)]
return cr
main()
|
square/pants | tests/python/pants_test/tasks/test_bundle_create.py | Python | apache-2.0 | 1,220 | 0.005738 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache Licen | se, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals | )
from pants.util.dirutil import safe_mkdtemp, safe_rmtree
from pants.backend.jvm.tasks.bundle_create import BundleCreate
from pants_test.task_test_base import TaskTestBase
sample_ini_test_1 = """
[DEFAULT]
pants_distdir = /tmp/dist
"""
class BundleCreateTest(TaskTestBase):
@classmethod
def task_type(cls):
return BundleCreate
def setUp(self):
super(BundleCreateTest, self).setUp()
self.workdir = safe_mkdtemp()
def tearDown(self):
super(BundleCreateTest, self).tearDown()
safe_rmtree(self.workdir)
def test_bundle_create_init(self):
options = {
self.options_scope: {
'deployjar': None,
'archive_prefix': None,
'archive': None
}
}
bundle_create = self.create_task(self.context(config=sample_ini_test_1, new_options=options),
self.workdir)
self.assertEquals(bundle_create._outdir, '/tmp/dist')
|
makinacorpus/ionyweb | ionyweb/plugin_app/plugin_text/views.py | Python | bsd-3-clause | 222 | 0.004505 | # -*- coding: utf-8 -*-
from ionyweb.website.ren | dering.utils import render_view
def index_view(request, | plugin):
return render_view(
plugin.get_templates('plugin_text/index.html'),
{'object': plugin})
|
etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-umap.py | Python | gpl-3.0 | 517 | 0 | # -------------------------------------------------- | ----------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('uma | p-learn')
|
GNOME/conduit | conduit/modules/BoxDotNetModule/BoxDotNetModule.py | Python | gpl-2.0 | 11,179 | 0.008677 | """
BoxDotNet Module
"""
import os, sys
import traceback
import md5
import logging
log = logging.getLogger("modules.BoxDotNet")
import conduit
import conduit.utils as Utils
import conduit.Web as Web
import conduit.dataproviders.DataProvider as DataProvider
import conduit.Exceptions as Exceptions
from conduit.datatypes import Rid
import conduit.datatypes.File as File
Utils.dataprovider_add_dir_to_path(__file__, "BoxDotNetAPI")
from boxdotnet import BoxDotNet
from gettext import gettext as _
MODULES = {
"BoxDotNetTwoWay" : { "type": "dataprovider" }
}
class BoxDotNetTwoWay(DataProvider.TwoWay):
_name_ = _("Box.net")
_description_ = _("Synchronize your Box.net files")
_category_ = conduit.dataproviders.CATEGORY_FILES
_module_type_ = "twoway"
_in_type_ = "file"
_out_type_ = "file"
_icon_ = "boxdotnet"
_configurable_ = True
API_KEY="nt0v6a232z6r47iftjx7g0azu6dg4p10"
def __init__(self, *args):
DataProvider.TwoWay.__init__(self)
self.update_configuration(
foldername = "",
)
self.boxapi = None
self.user_id = None
self.token = None
self.folder_id = None
self.files = {}
#------------------------------------------
# File info related functions
#------------------------------------------
def _get_file_info(self, fileID):
"""
Returns the id, if the id is present in the configured folder
"""
self.files = self._get_files(self.folder_id)
if self.files.has_key(fileID):
log.debug("File [%s] does exist" % fileID)
return fileID
else:
log.debug("File [%s] does not exist" % fileID)
return None
def _get_files(self,folderID):
"""
Gets a list of files present in the configured folder
"""
rsp = self.boxapi.get_account_tree (api_key=BoxDotNetTwoWay.API_KEY,
auth_token=self.token,
folder_id=folderID,
params=['nozip'])
files = {}
try:
for file in rsp.tree[0].folder[0].files[0].file:
files[file.attrib['id']] = file.attrib['file_name']
finally:
return files
def _get_raw_file_url(self, fileID):
"""
Format an url that can be used for downloading a file
"""
ret | urn "http://box.net/api/1.0/download/%s/%s" % (self.token, fileID)
#------------------------------------------
# Upload functions
#------------------------------------------
def _upload_file (self, file_path, filename):
"""
Upload the file to box.net
@returns: uid of the file
"""
rsp = self.boxapi.upload(file_path,
aut | h_token=self.token,
folder_id=self.folder_id,
share=0,
filename=filename
)
uid = rsp.files[0].file[0].attrib['id']
return uid
def _replace_file (self, fileID, url, name):
"""
Box.net automatically replaces files with same name, so we can
use the plain upload method
@returns: uid of the file
"""
return self._upload_file(url, name)
#------------------------------------------
# File info related functions
#------------------------------------------
def _get_folder_id(self):
"""
Returns a folder id for the configured folder name, it re-uses existing ones
and creates a new folder if it isn't present
"""
id = None
# see if folder already exists
folders = self._get_folders()
if folders.has_key (self.foldername):
id = folders[self.foldername]
# return if it does
if id:
return id
# create otherwise
else:
return self._create_folder ()
def _get_folders(self):
"""
Returns a dictionary of name-id representing the upper-level
folders
"""
rsp = self.boxapi.get_account_tree(api_key=BoxDotNetTwoWay.API_KEY,
auth_token=self.token,
folder_id=0,
params=['nozip'])
folders = {}
try:
# this might throw an exception if user has no folders yet
for folder in rsp.tree[0].folder[0].folders[0].folder:
folders[folder.attrib['name']] = folder.attrib['id']
finally:
return folders
def _create_folder(self):
"""
Create a top-level folder with the configured name, and return the id
"""
rsp = self.boxapi.create_folder(api_key=BoxDotNetTwoWay.API_KEY,
auth_token=self.token,
parent_id=0,
name=self.foldername,
share=0)
return rsp.folder[0].folder_id[0].elementText
#------------------------------------------
# Authentication methods
#------------------------------------------
def _login(self):
"""
Logs the user in to box.net
"""
if self.boxapi == None:
self.boxapi = BoxDotNet()
# login if not done yet, we only login once to prevent
# the browser for popping up each time
if not self.token:
# get the ticket and open login url
self._set_ticket()
url = BoxDotNet.get_login_url(self.ticket)
#wait for log in
Web.LoginMagic("Log into Box.net", url, login_function=self._try_login)
def _try_login (self):
"""
Try to perform a login, return None if it does not succeed
"""
try:
self._set_login_info(self.ticket)
return self.token
except:
return None
def _set_ticket(self):
"""
Get the ticket that can be used for logging in for real
"""
rsp = self.boxapi.get_ticket(api_key=self.API_KEY)
self.ticket = rsp.ticket[0].elementText
def _set_login_info (self, ticket):
"""
Get a token and the user id
"""
rsp = self.boxapi.get_auth_token(api_key=self.API_KEY, ticket=ticket)
self.user_id = rsp.user[0].user_id[0].elementText
self.token = rsp.auth_token[0].elementText
self.ticket = None
#------------------------------------------
# Dataprovider Functions
#------------------------------------------
def refresh(self):
DataProvider.TwoWay.refresh(self)
self._login()
# set folder id if not done yet or configuration changed
folder_id = self._get_folder_id()
if not self.folder_id or self.folder_id != folder_id:
self.folder_id = folder_id
self.files = self._get_files(self.folder_id)
def put (self, file, overwrite, LUID=None):
"""
Puts the file in the sink, this uploads the file if it is not present yet
or updates it if necessary
"""
DataProvider.TwoWay.put(self, file, overwrite, LUID)
originalName = file.get_filename()
#Gets the local URI (/foo/bar). If this is a remote file then
#it is first transferred to the local filesystem
fileURI = file.get_local_uri()
mimeType = file.get_mimetype()
if LUID == None:
log.debug("Uploading file URI = %s, Mimetype = %s, Original Name = %s" % (fileURI, mimeType, originalName))
LUID = self._upload_file (fileURI, originalName)
else:
#check if a file exists at that UID
id = self._get_file_info(LUID)
if id != None:
if overwrite == True:
log.debug("Replacing file URI = %s, Mimetype = %s, Original Name = %s" % (fileURI, mimeType, originalName))
LUID = sel |
danielmellado/tempest | tempest/api/image/v2/test_images_negative.py | Python | apache-2.0 | 3,822 | 0 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations |
# under the License.
import uuid
from tempest_lib import exceptions as lib_exc
from tempest.api.image import base
from tempest import test
class ImagesNegativeTest(base.BaseV2ImageTest):
"""
here we have -ve tests for get_image and delete_image api
Tests
** get non-existent image
** get image with image_id=NULL
** get the delete | d image
** delete non-existent image
** delete rimage with image_id=NULL
** delete the deleted image
"""
@test.attr(type=['negative'])
@test.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81d9f')
def test_get_non_existent_image(self):
# get the non-existent image
non_existent_id = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound, self.client.get_image,
non_existent_id)
@test.attr(type=['negative'])
@test.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2562ad')
def test_get_image_null_id(self):
# get image with image_id = NULL
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.get_image, image_id)
@test.attr(type=['negative'])
@test.idempotent_id('e57fc127-7ba0-4693-92d7-1d8a05ebcba9')
def test_get_delete_deleted_image(self):
# get and delete the deleted image
# create and delete image
body = self.client.create_image(name='test',
container_format='bare',
disk_format='raw')
image_id = body['id']
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# get the deleted image
self.assertRaises(lib_exc.NotFound, self.client.get_image, image_id)
# delete the deleted image
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@test.attr(type=['negative'])
@test.idempotent_id('6fe40f1c-57bd-4918-89cc-8500f850f3de')
def test_delete_non_existing_image(self):
# delete non-existent image
non_existent_image_id = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
non_existent_image_id)
@test.attr(type=['negative'])
@test.idempotent_id('32248db1-ab88-4821-9604-c7c369f1f88c')
def test_delete_image_null_id(self):
# delete image with image_id=NULL
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@test.attr(type=['negative'])
@test.idempotent_id('292bd310-369b-41c7-a7a3-10276ef76753')
def test_register_with_invalid_container_format(self):
# Negative tests for invalid data supplied to POST /images
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
'test', 'wrong', 'vhd')
@test.attr(type=['negative'])
@test.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
def test_register_with_invalid_disk_format(self):
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
'test', 'bare', 'wrong')
|
noelevans/sandpit | bayesian_methods_for_hackers/LoLN_convergence_examples_ch04.py | Python | mit | 867 | 0 | import numpy as np
import matplotlib.pyplot as plt
import pymc as pm
def main():
sample_size = 100000
expected_value = lambda_ = 4.5
N_samples = range(1, sample_size, 100)
for k in range(3):
samples = | pm.rpoisson(lambda_, size=sample_size)
partial_average = [samples[:i].mean() for i in N_samples]
label = "average of $n$ samples; seq. %d" % k
plt.plot(N_samples, partial_average, lw=1.5, label=label)
plt.plot(N_samples, expected_value * np.ones_like(partial_average),
ls="--", label="true expected value", c="k")
plt.ylim(4.35, 4.65)
plt.title("Convergence of the average of \n random variables to its" +
"expected value")
| plt.ylabel("average of $n$ samples")
plt.xlabel("# of samples, $n$")
plt.legend()
plt.show()
if __name__ == '__main__':
main()
|
stripecoin/stripecoin | share/qt/clean_mac_info_plist.py | Python | mit | 901 | 0.016648 | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the stripecoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt m | ac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "stripecoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.p | ro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
JSBCCA/pythoncode | early_projects/perfcountertest.py | Python | mit | 135 | 0 | imp | ort time
import find_remove_find5
import sort_then_find3
import walk_through7
def time_find_two_smallest(find_func, lst):
| ...
|
MalwareLabHagenberg/ambrosia | ambrosia_plugins/network/__init__.py | Python | gpl-3.0 | 2,143 | 0.000467 | # -*- coding: utf-8 -*-
###############################################################################
# #
# Ambrosia - a tool to visualize ANANAS results #
# | #
# Copyright (C) 2015 Wolfgang Ettlinger and the ANANAS Team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# the ANANAS Project Copyright (C) 2015 #
# #
###############################################################################
import dateutil
import json
import ambrosia
from ambrosia import model
from ambrosia.context import AmbrosiaContext
from ambrosia.plugins import PluginInfoTop
__author__ = 'Wolfgang Ettlinger'
class PluginInfo(PluginInfoTop):
"""This plugin is not implemented. Implement as soon as ANANAS properly supports network traffic analysis.
"""
pass
| |
dreispt/department | analytic_department/__init__.py | Python | agpl-3.0 | 16 | 0 | import | anal | ytic
|
reidswan/Checkit | checkit/wsgi.py | Python | mit | 392 | 0 | """
WSGI config for checkit project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"" | "
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "checkit.settings")
application = get_wsgi_application( | )
|
commtrack/temp-aquatest | apps/xformmanager/admin.py | Python | bsd-3-clause | 817 | 0.0306 | from django.contrib import admin
from xformmanager.models import *
class FormDefModelAdmin(admin.ModelAdmin):
list_display = ('id','uploaded_by', 'domain', 'form_display_name','form_name', 'submit_time',)
list_filter = ["domain"]
class MetaDataModelAdmin(admin.ModelAdmin):
list_display = ( 'for | mname','formversion','deviceid','timestart','timeend','username','chw_id','uid', 'attachment', 'raw_data', 'formdefmodel')
list_filter = ( 'formname','formversion','deviceid','timestart','timeend','username','chw_id','formdefmodel')
admin.site.register(FormDefModel,FormDefModelAdmin)
admin.site.register(ElementDefModel)
admin.site.register(Metadata, MetaDataModelAdmin)
admin.site.register(FormDataPointer)
admin.site.register(FormDataColumn)
admin.site | .register(FormDataGroup)
|
uclaros/QGIS | python/plugins/processing/algs/gdal/GridInverseDistanceNearestNeighbor.py | Python | gpl-2.0 | 9,771 | 0.003787 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GridInverseDistanceNearestNeighbor.py
---------------------
Date : September 2017
Copyright : (C) 2017 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2017'
__copyright__ = '(C) 2017, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridInverseDistanceNearestNeighbor(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
POWER = 'POWER'
SMOOTHING = 'SMOOTHING'
RADIUS = 'RADIUS'
MAX_POINTS = 'MAX_POINTS'
MIN_POINTS = 'MIN_POINTS'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
z_field_param = QgsProcessingParameterField(self.Z_FIELD,
self.tr('Z value from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True)
z_field_param.setFlags(z_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(z_field_param)
self.addParameter(QgsProcessingParameterNumber(self.POWER,
self.tr('Weighting power'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
maxValue=100.0,
defaultValue=2.0))
self.addParameter(QgsProcessingParameterNumber(self.SMOOTHING,
self.tr('Smoothing'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS,
self.t | r('The radius of the search circle'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=1.0))
self.addParameter(QgsProcessingParameterNumber(self.MAX_POINTS,
self.tr('Maximum number of da | ta points to use'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=12))
self.addParameter(QgsProcessingParameterNumber(self.MIN_POINTS,
self.tr('Minimum number of data points to use'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('NODATA marker to fill empty points'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated (IDW with NN search)')))
def name(self):
return 'gridinversedistancenearestneighbor'
def displayName(self):
return self.tr('Grid (IDW with nearest neighbor searching)')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def commandName(self):
return 'gdal_grid'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
arguments = [
'-l',
layerName
]
fieldName = self.parameterAsString(parameters, self.Z_FIELD, context)
if fieldName:
arguments.append('-zfield')
|
SU-ECE-17-7/hotspotter | hotspotter/report_results2.py | Python | apache-2.0 | 40,958 | 0.004346 | #!/usr/env python
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off, rrr, profile, printDBG) =\
__common__.init(__name__, '[rr2]', DEBUG=False)
# Matplotlib
import matplotlib
matplotlib.use('Qt4Agg')
# Python
import os
import sys
import textwrap
import fnmatch
import warnings
from itertools import izip
from os.path import join, exists
# Scientific imports
import numpy as np
# Hotspotter imports
from hscom import fileio as io
from hscom import helpers
from hscom.Printable import DynStruct
from hsviz import draw_func2 as df2
from hsviz import viz
import load_data2 as ld2
import spatial_verification2 as sv2
#import match_chips3 as mc3
#import datetime
#import subprocess
REPORT_MATRIX = True
REPORT_MATRIX_VIZ = True
# ========================================================
# Report result initialization
# ========================================================
class AllResults(DynStruct):
'Data container for all compiled results'
def __init__(self, hs, qcx2_res, SV):
super(DynStruct, self).__init__()
| self.hs = hs
self.qcx2_res = qcx2_res
self.SV = SV
self.rankres_str = None
self.title_suffix = None
self.scalar_mAP_str = '# mAP score = NA\n'
self.scal | ar_summary = None
self.problem_false_pairs = None
self.problem_true_pairs = None
self.greater1_cxs = None
self.greater5_cxs = None
self.matrix_str = None
def get_orgres2_distances(allres, *args, **kwargs):
return _get_orgres2_distances(allres, *args, **kwargs)
def __str__(allres):
#print = tores.append
hs = allres.hs
toret = ('+======================\n')
scalar_summary = str(allres.scalar_summary).strip()
toret += ('| All Results: %s \n' % hs.get_db_name())
toret += ('| title_suffix=%s\n' % str(allres.title_suffix))
toret += ('| scalar_summary=\n%s\n' % helpers.indent(scalar_summary, '| '))
toret += ('| ' + str(allres.scalar_mAP_str))
toret += ('|---\n')
toret += ('| greater5_%s \n' % (hs.cidstr(allres.greater5_cxs),))
toret += ('|---\n')
toret += ('| greater1_%s \n' % (hs.cidstr(allres.greater1_cxs),))
toret += ('|---\n')
toret += ('+======================.\n')
#toret+=('| problem_false_pairs=\n%r' % allres.problem_false_pairs)
#toret+=('| problem_true_pairs=\n%r' % allres.problem_true_pairs)
return toret
class OrganizedResult(DynStruct):
def __init__(self):
super(DynStruct, self).__init__()
self.qcxs = []
self.cxs = []
self.scores = []
self.ranks = []
def append(self, qcx, cx, rank, score):
self.qcxs.append(qcx)
self.cxs.append(cx)
self.scores.append(score)
self.ranks.append(rank)
def __len__(self):
num_qcxs = len(self.qcxs)
num_cxs = len(self.cxs)
num_scores = len(self.scores)
num_ranks = len(self.ranks)
assert num_qcxs == num_cxs
assert num_cxs == num_scores
assert num_scores == num_ranks
return num_qcxs
def iter(self):
'useful for plotting'
result_iter = izip(self.qcxs, self.cxs, self.scores, self.ranks)
for qcx, cx, score, rank in result_iter:
yield qcx, cx, score, rank
def qcx_arrays(self, hs):
'useful for reportres_str'
cx2_cid = hs.tables.cx2_cid
qcx2_rank = np.zeros(len(cx2_cid)) - 2
qcx2_score = np.zeros(len(cx2_cid)) - 2
qcx2_cx = np.arange(len(cx2_cid)) * -1
#---
for (qcx, cx, score, rank) in self.iter():
qcx2_rank[qcx] = rank
qcx2_score[qcx] = score
qcx2_cx[qcx] = cx
return qcx2_rank, qcx2_score, qcx2_cx
def printme3(self):
for qcx, cx, score, rank in self.iter():
print('%4d %4d %6.1f %4d' % (qcx, cx, score, rank))
def get_false_match_distances(allres):
false_distances = get_orgres_match_distances(allres, 'false')
return false_distances
def get_true_match_distances(allres):
true_distances = get_orgres_match_distances(allres, 'true')
return true_distances
def res2_true_and_false(hs, res, SV):
'Organizes results into true positive and false positive sets'
if not 'SV' in vars():
SV = True
#if not 'res' in vars():
#res = qcx2_res[qcx]
indx_samp = hs.indexed_sample_cx
qcx = res.qcx
cx2_score = res.cx2_score if SV else res.cx2_score
unfilt_top_cx = np.argsort(cx2_score)[::-1]
# Get top chip indexes and scores
top_cx = np.array(helpers.intersect_ordered(unfilt_top_cx, indx_samp))
top_score = cx2_score[top_cx]
# Get the true and false ground truth ranks
qnx = hs.tables.cx2_nx[qcx]
if qnx <= 1:
qnx = -1 # disallow uniden animals from being marked as true
top_nx = hs.tables.cx2_nx[top_cx]
true_ranks = np.where(np.logical_and(top_nx == qnx, top_cx != qcx))[0]
false_ranks = np.where(np.logical_and(top_nx != qnx, top_cx != qcx))[0]
# Construct the true positive tuple
true_scores = top_score[true_ranks]
true_cxs = top_cx[true_ranks]
true_tup = (true_cxs, true_scores, true_ranks)
# Construct the false positive tuple
false_scores = top_score[false_ranks]
false_cxs = top_cx[false_ranks]
false_tup = (false_cxs, false_scores, false_ranks)
# Return tuples
return true_tup, false_tup
def init_organized_results(allres):
print('[rr2] Initialize organized results')
hs = allres.hs
SV = allres.SV
qcx2_res = allres.qcx2_res
allres.true = OrganizedResult()
allres.false = OrganizedResult()
allres.top_true = OrganizedResult()
allres.top_false = OrganizedResult()
allres.bot_true = OrganizedResult()
allres.problem_true = OrganizedResult()
allres.problem_false = OrganizedResult()
# -----------------
# Query result loop
for qcx in hs.test_sample_cx:
res = qcx2_res[qcx]
# Use ground truth to sort into true/false
true_tup, false_tup = res2_true_and_false(hs, res, SV)
last_rank = -1
skipped_ranks = set([])
# Record: all_true, missed_true, top_true, bot_true
topx = 0
for cx, score, rank in zip(*true_tup):
allres.true.append(qcx, cx, rank, score)
if rank - last_rank > 1:
skipped_ranks.add(rank - 1)
allres.problem_true.append(qcx, cx, rank, score)
if topx == 0:
allres.top_true.append(qcx, cx, rank, score)
last_rank = rank
topx += 1
if topx > 1:
allres.bot_true.append(qcx, cx, rank, score)
# Record the all_false, false_positive, top_false
topx = 0
for cx, score, rank in zip(*false_tup):
allres.false.append(qcx, cx, rank, score)
if rank in skipped_ranks:
allres.problem_false.append(qcx, cx, rank, score)
if topx == 0:
allres.top_false.append(qcx, cx, rank, score)
topx += 1
print('[rr2] len(allres.true) = %r' % len(allres.true))
print('[rr2] len(allres.false) = %r' % len(allres.false))
print('[rr2] len(allres.top_true) = %r' % len(allres.top_true))
print('[rr2] len(allres.top_false) = %r' % len(allres.top_false))
print('[rr2] len(allres.bot_true) = %r' % len(allres.bot_true))
print('[rr2] len(allres.problem_true) = %r' % len(allres.problem_true))
print('[rr2] len(allres.problem_false) = %r' % len(allres.problem_false))
# qcx arrays for ttbttf
allres.top_true_qcx_arrays = allres.top_true.qcx_arrays(hs)
allres.bot_true_qcx_arrays = allres.bot_true.qcx_arrays(hs)
allres.top_false_qcx_arrays = allres.top_false.qcx_arrays(hs)
def init_score_matrix(allres):
print(' * init score matrix')
hs = allres.hs
SV = allres.SV
qcx2_res = allres.qcx2_res
cx2_nx |
raju249/oppia | core/domain/feedback_services_test.py | Python | apache-2.0 | 27,168 | 0.00011 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feedback-related services."""
import json
from core.domain import feedback_domain
from core.domain import feedback_jobs_continuous_test
from core.domain import feedback_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
taskqueue_services = models.Registry.import_taskqueue_services()
class FeedbackServicesUnitTests(test_utils.GenericTestBase):
"""Test functions in feedback_services."""
def test_feedback_ids(self):
"""Test various conventions for thread and message ids."""
exp_id = '0'
feedback_services.create_thread(
exp_id, 'a_state_name', None, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(exp_id, False)
self.assertEqual(len(threadlist), 1)
thread_id = threadlist[0].get_thread_id()
# The thread id should not have any full stops.
self.assertNotIn('.', thread_id)
messages = feedback_services.get_messages(exp_id, thread_id)
self.assertEqual(len(messages), 1)
message_id = messages[0].message_id
self.assertTrue(isinstance(message_id, int))
# Retrieve the message instance from the storage layer.
datastore_id = feedback_models.FeedbackMessageModel.get_messages(
exp_id, thread_id)[0].id
full_thread_id = (feedback_models.FeedbackThreadModel
.generate_full_thread_id(exp_id, thread_id))
# The message id should be prefixed with the full thread id and a full
# stop, followed by the message id.
self.assertEqual(
datastore_id, '%s.%s' % (full_thread_id, message_id))
def test_create_message_fails_if_invalid_thread_id(self):
exp_id = '0'
with self.assertRaises(
feedback_models.FeedbackMessageModel.EntityNotFoundError
):
feedback_services.create_message(
exp_id, 'invalid_thread_id', 'user_id', None, None, 'Hello')
def test_status_of_newly_created_thread_is_open(self):
exp_id = '0'
feedback_services.create_thread(
exp_id, 'a_state_name', None, 'a subject', 'some text')
threadlist = feedback_services.get_all_threads(exp_id, False)
thread_status = threadlist[0].status
self.assertEqual(thread_status, feedback_models.STATUS_CHOICES_OPEN)
class SuggestionQueriesUnitTests(test_utils.GenericTestBase):
"""Test learner suggestion query functions in feedback_services."""
THREAD_ID1 = '1111'
THREAD_ID2 = '2222'
THREAD_ID3 = '3333'
THREAD_ID4 = '4444'
THREAD_ID5 = '5555'
EXP_ID1 = 'exp_id1'
EXP_ID2 = 'exp_id2'
USER_EMAIL = 'abc@xyz.com'
USERNAME = 'user123'
CURRENT_TIME_IN_MSEC = 12345678
def _generate_thread_id(self, unused_exp_id):
return self.THREAD_ID1
def setUp(self):
super(SuggestionQueriesUnitTests, self).setUp()
# Register users.
self.user_id = self.get_user_id_from_email(se | lf.USER_EMAIL)
user_services.get_or_create_user(self.user_id, self.USER_EMAIL)
self.signup(self.USER_EMAIL, self.USERNAME)
# Open thread with suggestion.
thread1 = feedback_models.FeedbackThreadModel(
id=feedback_models.FeedbackThreadModel.generate_full_thread_id(
self.EXP_ID1, self.THREAD_ID1),
exploration_id=self.EXP_ID1,
state_name='state_name',
| original_author_id=self.user_id,
subject='SUGGESTION',
has_suggestion=True)
# Closed threads with suggestion.
thread2 = feedback_models.FeedbackThreadModel(
id=feedback_models.FeedbackThreadModel.generate_full_thread_id(
self.EXP_ID1, self.THREAD_ID2),
exploration_id=self.EXP_ID1,
state_name='state_name',
original_author_id=self.user_id,
subject='SUGGESTION',
status=feedback_models.STATUS_CHOICES_FIXED,
has_suggestion=True)
thread3 = feedback_models.FeedbackThreadModel(
id=feedback_models.FeedbackThreadModel.generate_full_thread_id(
self.EXP_ID1, self.THREAD_ID3),
exploration_id=self.EXP_ID1,
state_name='state_name',
original_author_id=self.user_id,
subject='SUGGESTION',
status=feedback_models.STATUS_CHOICES_IGNORED,
has_suggestion=True)
# Closed thread without suggestion.
thread4 = feedback_models.FeedbackThreadModel(
id=feedback_models.FeedbackThreadModel.generate_full_thread_id(
self.EXP_ID1, self.THREAD_ID4),
exploration_id=self.EXP_ID1,
state_name='state_name',
original_author_id=self.user_id,
subject='NO SUGGESTION',
status=feedback_models.STATUS_CHOICES_IGNORED)
# Open thread without suggestion.
thread5 = feedback_models.FeedbackThreadModel(
id=feedback_models.FeedbackThreadModel.generate_full_thread_id(
self.EXP_ID1, self.THREAD_ID5),
exploration_id=self.EXP_ID1,
state_name='state_name',
original_author_id=self.user_id,
subject='NO SUGGESTION',
status=feedback_models.STATUS_CHOICES_OPEN)
for thread in [thread1, thread2, thread3, thread4, thread5]:
thread.put()
def test_create_and_get_suggestion(self):
with self.swap(feedback_models.FeedbackThreadModel,
'generate_new_thread_id', self._generate_thread_id):
feedback_services.create_suggestion(
self.EXP_ID2, self.user_id, 3, 'state_name',
'description', {'old_content': {}})
suggestion = feedback_services.get_suggestion(
self.EXP_ID2, self.THREAD_ID1)
thread = feedback_models.FeedbackThreadModel.get(
feedback_models.FeedbackThreadModel.generate_full_thread_id(
self.EXP_ID2, self.THREAD_ID1))
expected_suggestion_dict = {
'exploration_id': self.EXP_ID2,
'author_name': 'user123',
'exploration_version': 3,
'state_name': 'state_name',
'description': 'description',
'state_content': {'old_content': {}}
}
self.assertEqual(thread.status, feedback_models.STATUS_CHOICES_OPEN)
self.assertDictEqual(expected_suggestion_dict, suggestion.to_dict())
def test_get_open_threads_with_suggestions(self):
threads = feedback_services.get_open_threads(self.EXP_ID1, True)
self.assertEqual(len(threads), 1)
self.assertEqual(threads[0].id, self.EXP_ID1 + '.' + self.THREAD_ID1)
def test_get_open_threads_without_suggestions(self):
threads = feedback_services.get_open_threads(self.EXP_ID1, False)
self.assertEqual(len(threads), 1)
self.assertEqual(threads[0].id, self.EXP_ID1 + '.' + self.THREAD_ID5)
def test_get_closed_threads_with_suggestions(self):
threads = feedback_services.get_closed_threads(self.EXP_ID1, True)
self.assertEqual(len(threads), 2)
self.assertEqual(threads[0].id, self.EXP_ID1 + '.' + self.THREAD_ID2)
self.assertEqual(threads[1].id, self.EXP_ID1 + '.' + self.THREAD_ID3)
def test_get_closed_threads_without_suggestions(self):
threads = feedback_services.get_closed_threads(self.EX |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/operations/_azure_firewalls_operations.py | Python | mit | 26,909 | 0.004645 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations(object):
"""AzureFirewallsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
" | ""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map | = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.AzureFirewall
:raises: ~azure.core. |
jmptrader/duktape | tools/duk_meta_to_strarray.py | Python | mit | 1,076 | 0.002788 | #!/usr/bin/env python2
#
# Create an array of C strings with Duktape built-in strings.
# Useful when using external strings.
#
import os
import sys
import json
def to_c_string(x):
res = '"'
| term = False
for i, c in enumerate(x):
if term:
term = False
res += '" "'
o = ord(c)
if o < 0x20 or o > 0x7e or c in '\'"\\':
# Terminate C string so tha | t escape doesn't become
# ambiguous
res += '\\x%02x' % o
term = True
else:
res += c
res += '"'
return res
def main():
f = open(sys.argv[1], 'rb')
d = f.read()
f.close()
meta = json.loads(d)
print('const char *duk_builtin_strings[] = {')
strlist = meta['builtin_strings_base64']
for i in xrange(len(strlist)):
s = strlist[i]
if i == len(strlist) - 1:
print(' %s' % to_c_string(s.decode('base64')))
else:
print(' %s,' % to_c_string(s.decode('base64')))
print('};')
if __name__ == '__main__':
main()
|
WeakGroup/twitter-rec | interface/website/website/settings.py | Python | gpl-2.0 | 2,100 | 0 | """
Django settings for website project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0a$bgpz#bn3hid==*$ee^@v79^6&q0_x$&&i=@(jivwnrv73b^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'dja | ngo.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.mi | ddleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
rajrohith/blobstore | azure/storage/_http/__init__.py | Python | apache-2.0 | 2,461 | 0.00122 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#----------------------------------------------------------------- | ---------
class HTTPError(Exception):
'''
Represents an HTTP Exception when response status code >= 300.
:ivar int status:
the status code of the response
:ivar str message:
the message
:i | var list headers:
the returned headers, as a list of (name, value) pairs
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, respheader, respbody):
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
'''
Represents a response from an HTTP request.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar dict headers:
the returned headers
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''
Represents an HTTP Request.
:ivar str host:
the host name to connect to
:ivar str method:
the method to use to connect (string such as GET, POST, PUT, etc.)
:ivar str path:
the uri fragment
:ivar dict query:
query parameters
:ivar dict headers:
header values
:ivar bytes body:
the body of the request.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = {} # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes | anaconda/6.00.1x.hello.py | Python | mit | 126 | 0.007937 | # -*- coding: utf-8 -*-
"""
Spyde | r Editor
This is a temporary script file.
"" | "
print('hello python!')
print('I like 6.00.1x') |
tongwang01/tensorflow | tensorflow/python/training/supervisor.py | Python | apache-2.0 | 41,789 | 0.004068 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python import summary as _summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the | Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to b | e initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed |
maoy/zknova | nova/compute/resource_tracker.py | Python | apache-2.0 | 23,300 | 0.000429 | # Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
from nova.compute import claims
from nova.compute import instance_types
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import context
from nova import exception
from nova. | openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
help='Amount of disk in MB to reserve for | the host'),
cfg.IntOpt('reserved_host_memory_mb', default=512,
help='Amount of memory in MB to reserve for the host'),
cfg.StrOpt('compute_stats_class',
default='nova.compute.stats.Stats',
help='Class that will manage stats for the local compute host')
]
CONF = cfg.CONF
CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
self.nodename = nodename
self.compute_node = None
self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
self.tracked_migrations = {}
self.conductor_api = conductor.API()
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def instance_claim(self, context, instance_ref, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance_ref: instance to reserve resources for
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# set the 'host' and node fields and continue the build:
self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
# sanity checks:
if instance_ref['host']:
LOG.warning(_("Host field should not be set on the instance until "
"resources have been claimed."),
instance=instance_ref)
if instance_ref['node']:
LOG.warning(_("Node field should be not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
claim = claims.Claim(instance_ref, self)
if claim.test(self.compute_node, limits):
self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(self.compute_node, instance_ref)
# persist changes to the compute node:
self._update(context, self.compute_node)
return claim
else:
raise exception.ComputeResourcesUnavailable()
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def resize_claim(self, context, instance_ref, instance_type, limits=None):
"""Indicate that resources are needed for a resize operation to this
compute host.
:param context: security context
:param instance_ref: instance to reserve resources for
:param instance_type: new instance_type being resized to
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
migration_ref = self._create_migration(context, instance_ref,
instance_type)
return claims.NopClaim(migration=migration_ref)
claim = claims.ResizeClaim(instance_ref, instance_type, self)
if claim.test(self.compute_node, limits):
migration_ref = self._create_migration(context, instance_ref,
instance_type)
claim.migration = migration_ref
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(self.compute_node, migration_ref)
elevated = context.elevated()
self._update(elevated, self.compute_node)
return claim
else:
raise exception.ComputeResourcesUnavailable()
def _create_migration(self, context, instance, instance_type):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
# TODO(russellb): no-db-compute: Send the old instance type
# info that is needed via rpc so db access isn't required
# here.
old_instance_type_id = instance['instance_type_id']
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
return self.conductor_api.migration_create(context, instance,
{'dest_compute': self.host,
'dest_node': self.nodename,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': instance_type['id'],
'status': 'pre-migrating'})
def _set_instance_host_and_node(self, context, instance_ref):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim
will not be lost if the audit process starts.
"""
values = {'host': self.host, 'node': self.nodename,
'launched_on': self.host}
self.conductor_api.instance_update(context, instance_ref['uuid'],
**values)
instance_ref['host'] = self.host
instance_ref['launched_on'] = self.host
instance_ref['node'] = self.nodename
def abort_instance_claim(self, instance):
"""Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
self._update_usage_from_instance(self.compute_node, instance)
ctxt = context.get_admin_context()
self._update(ctxt, |
amitsaha/learning | python/search/find_missing_number.py | Python | unlicense | 228 | 0.039474 | """"
Find the missing number from a sequence of
numbers
"""
def find_missing(arr):
n = len(arr)+1
s = n*(n+1.0)/2.0
return s - sum(arr)
print find_m | issing([5,4,2,1])
print | find_missing([10,9,2,1, 8, 7, 6, 5, 4])
|
nicorellius/pdxpixel | pdxpixel/apps/blog/migrations/0002_post_tags.py | Python | mit | 602 | 0.001661 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-30 15:39
from __future__ import unicode_literal | s
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedI | tem', to='taggit.Tag', verbose_name='Tags'),
),
]
|
chelberserker/mipt_cs_on_python | exes/ex4.py | Python | gpl-2.0 | 93 | 0.021505 | max = A[ | 1]
for i in range(len(A)):
if A.count(A[i]) > A.count(max):
max = A[i]
print | (max)
|
prasannav7/ggrc-core | src/ggrc/migrations/versions/20131113030336_3df9e3188977_remove_control_type_.py | Python | apache-2.0 | 991 | 0.005045 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""R | emove Control.type, add Control assessors
Revision ID: 3df9e3188977
Revises: 2277e3978eb5
Create Date: 2013-11-13 03:03:36.585160
"""
# revision identifiers, used by Alembic.
revision = '3df9e3188977'
down_revision = '2277e3978eb5'
from alembic import op
imp | ort sqlalchemy as sa
def upgrade():
op.add_column('controls', sa.Column('principal_assessor_id', sa.Integer(), nullable=True))
op.add_column('controls', sa.Column('secondary_assessor_id', sa.Integer(), nullable=True))
op.drop_column('controls', u'type_id')
def downgrade():
op.add_column('controls', sa.Column(u'type_id', sa.Integer(), nullable=True))
op.drop_column('controls', 'secondary_assessor_id')
op.drop_column('controls', 'principal_assessor_id')
|
hengyicai/OnlineAggregationUCAS | python/pyspark/statcounter.py | Python | apache-2.0 | 4,354 | 0.001608 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is ported from spark/util/StatCounter.scala
import copy
import math
try:
from numpy import maximum, minimum, sqrt
except ImportError:
maximum = max
minimum = min
sqrt = math.sqrt
class StatCounter(object):
def __init__(self, values=[]):
self.n = 0L # Running count of our values
self.mu = 0.0 # Running mean of our values
self.m2 = 0.0 # Running variance numerator (sum of (x - mean)^2)
self.maxValue = float("-inf")
self.minValue = float("inf")
for v in values:
self.merge(v)
| # Add a value into this StatCounter, updating the internal statistics.
def merge(self, value):
delta = value - self.mu
self.n += 1
self.mu += delta / self.n
self.m2 += delta * (value - self.mu)
| self.maxValue = maximum(self.maxValue, value)
self.minValue = minimum(self.minValue, value)
return self
# Merge another StatCounter into this one, adding up the internal statistics.
def mergeStats(self, other):
if not isinstance(other, StatCounter):
raise Exception("Can only merge Statcounters!")
if other is self: # reference equality holds
self.merge(copy.deepcopy(other)) # Avoid overwriting fields in a weird order
else:
if self.n == 0:
self.mu = other.mu
self.m2 = other.m2
self.n = other.n
self.maxValue = other.maxValue
self.minValue = other.minValue
elif other.n != 0:
delta = other.mu - self.mu
if other.n * 10 < self.n:
self.mu = self.mu + (delta * other.n) / (self.n + other.n)
elif self.n * 10 < other.n:
self.mu = other.mu - (delta * self.n) / (self.n + other.n)
else:
self.mu = (self.mu * self.n + other.mu * other.n) / (self.n + other.n)
self.maxValue = maximum(self.maxValue, other.maxValue)
self.minValue = minimum(self.minValue, other.minValue)
self.m2 += other.m2 + (delta * delta * self.n * other.n) / (self.n + other.n)
self.n += other.n
return self
# Clone this StatCounter
def copy(self):
return copy.deepcopy(self)
def count(self):
return self.n
def mean(self):
return self.mu
def sum(self):
return self.n * self.mu
def min(self):
return self.minValue
def max(self):
return self.maxValue
# Return the variance of the values.
def variance(self):
if self.n == 0:
return float('nan')
else:
return self.m2 / self.n
#
# Return the sample variance, which corrects for bias in estimating the variance by dividing
# by N-1 instead of N.
#
def sampleVariance(self):
if self.n <= 1:
return float('nan')
else:
return self.m2 / (self.n - 1)
# Return the standard deviation of the values.
def stdev(self):
return sqrt(self.variance())
#
# Return the sample standard deviation of the values, which corrects for bias in estimating the
# variance by dividing by N-1 instead of N.
#
def sampleStdev(self):
return sqrt(self.sampleVariance())
def __repr__(self):
return ("(count: %s, mean: %s, stdev: %s, max: %s, min: %s)" %
(self.count(), self.mean(), self.stdev(), self.max(), self.min()))
|
fujicoin/fujicoin | test/functional/wallet_coinbase_category.py | Python | mit | 2,302 | 0.000869 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test coinbase transactions return the correct categories.
Tests listtransactions, listsinceblock, and gettransaction.
"""
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_array_result
)
class CoinbaseCategoryTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_category(self, category, address, txid, skip):
assert_array_result(self.nodes[0].listtransactions(skip=skip),
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].listsinceblock()["transactions"],
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].gettransaction(txid)["details"],
{"address": address},
{"category": category})
def run_test(self):
# Generate one block to an address
address = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(1, address)
hash = self.nodes[0].getbestblockhash()
txid = self.nodes[0].getblock(hash)["tx"][0]
# Coinbase transaction is immature after 1 confirmation
self.assert_category("immature", address, txid, 0)
# Mine another 99 blocks on top
self.nodes[0].generate(99)
# Coinbase transaction is still immature after 100 confirmations
self.assert_category("immature", ad | dress, txid, 99)
# Mine one more block
self.nodes[0].generate(1)
# Coinbase transaction is now matured, so category is "generate"
self.assert_category("generate", address, txid, 100)
# Orphan block that paid to address
self.nodes[0].invalidateblock(hash)
# Coinbase transaction is now orphaned
self.assert_category("orphan | ", address, txid, 100)
if __name__ == '__main__':
CoinbaseCategoryTest().main()
|
UTSA-ICS/keystone-SID | keystone/tests/test_v3_oauth1.py | Python | apache-2.0 | 25,789 | 0.000039 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from six.moves import urllib
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone import config
from keystone import contrib
from keystone.contrib import oauth1
from keystone.contrib.oauth1 import controllers
from keystone import exception
from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common import importutils
from keystone.tests import test_v3
CONF = config.CONF
class OAuth1Tests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'oauth1'
EXTENSION_TO_ADD = 'oauth1_extension'
CONSUMER_URL = '/OS-OAUTH1/consumers'
def setup_database(self):
super(OAuth1Tests, self).setup_database()
package_name = '.'.join((contrib.__name__, self.EXTENSION_NAME))
package = importutils.import_module(package_name)
abs_path = migration_helpers.find_migrate_repo(package)
migration.db_version_control(sql.get_engine(), abs_path)
migration.db_sync(sql.get_engine(), abs_path)
def setUp(self):
super(OAuth1Tests, self).setUp()
# Now that the app has been served, we can query CONF values
self.base_url = 'http://localhost/v3'
self.controller = controllers.OAuthControllerV3()
def _create_single_consumer(self):
ref = {'description': uuid.uuid4().hex}
resp = self.post(
self.CONSUMER_URL,
body={'consumer': ref})
return resp.result['consumer']
def _create_request_token(self, consumer, project_id):
endpoint = '/OS-OAUTH1/request_token'
client = oauth1.Client(consumer['key'],
client_secret=consumer['secret'],
signature_method=oauth1.SIG_HMAC,
callback_uri="oob")
headers = {'requested_project_id': project_id}
url, headers, body = client.sign(self.base_url + endpoint,
http_method='POST',
headers=headers)
return endpoint, headers
def _create_access_token(self, consumer, token):
endpoint = '/OS-OAUTH1/access_token'
client = oauth1.Client(consumer['key'],
client_secret=consumer['secret'],
resource_owner_key=token.key,
resource_owner_secret=token.secret,
signature_method=oauth1.SIG_HMAC,
verifier=token.verifier)
url, h | eaders, body = client.sign(self.base_url + endpoint,
http_method='POST')
headers.update({'Content-Type': 'application/json'})
return endpoint, headers
def _get_oauth_token(self, consumer, token):
client = oauth1.Client(consumer['key'],
client_secret=consumer['secret'],
resourc | e_owner_key=token.key,
resource_owner_secret=token.secret,
signature_method=oauth1.SIG_HMAC)
endpoint = '/auth/tokens'
url, headers, body = client.sign(self.base_url + endpoint,
http_method='POST')
headers.update({'Content-Type': 'application/json'})
ref = {'auth': {'identity': {'oauth1': {}, 'methods': ['oauth1']}}}
return endpoint, headers, ref
def _authorize_request_token(self, request_id):
return '/OS-OAUTH1/authorize/%s' % (request_id)
class ConsumerCRUDTests(OAuth1Tests):
def _consumer_create(self, description=None, description_flag=True,
**kwargs):
if description_flag:
ref = {'description': description}
else:
ref = {}
if kwargs:
ref.update(kwargs)
resp = self.post(
self.CONSUMER_URL,
body={'consumer': ref})
consumer = resp.result['consumer']
consumer_id = consumer['id']
self.assertEqual(consumer['description'], description)
self.assertIsNotNone(consumer_id)
self.assertIsNotNone(consumer['secret'])
return consumer
def test_consumer_create(self):
description = uuid.uuid4().hex
self._consumer_create(description=description)
def test_consumer_create_none_desc_1(self):
self._consumer_create()
def test_consumer_create_none_desc_2(self):
self._consumer_create(description_flag=False)
def test_consumer_create_normalize_field(self):
# If create a consumer with a field with : or - in the name,
# the name is normalized by converting those chars to _.
field_name = 'some:weird-field'
field_value = uuid.uuid4().hex
extra_fields = {field_name: field_value}
consumer = self._consumer_create(**extra_fields)
normalized_field_name = 'some_weird_field'
self.assertEqual(field_value, consumer[normalized_field_name])
def test_consumer_delete(self):
consumer = self._create_single_consumer()
consumer_id = consumer['id']
resp = self.delete(self.CONSUMER_URL + '/%s' % consumer_id)
self.assertResponseStatus(resp, 204)
def test_consumer_get(self):
consumer = self._create_single_consumer()
consumer_id = consumer['id']
resp = self.get(self.CONSUMER_URL + '/%s' % consumer_id)
self_url = ['http://localhost/v3', self.CONSUMER_URL,
'/', consumer_id]
self_url = ''.join(self_url)
self.assertEqual(resp.result['consumer']['links']['self'], self_url)
self.assertEqual(resp.result['consumer']['id'], consumer_id)
def test_consumer_list(self):
self._consumer_create()
resp = self.get(self.CONSUMER_URL)
entities = resp.result['consumers']
self.assertIsNotNone(entities)
self_url = ['http://localhost/v3', self.CONSUMER_URL]
self_url = ''.join(self_url)
self.assertEqual(resp.result['links']['self'], self_url)
self.assertValidListLinks(resp.result['links'])
def test_consumer_update(self):
consumer = self._create_single_consumer()
original_id = consumer['id']
original_description = consumer['description']
update_description = original_description + '_new'
update_ref = {'description': update_description}
update_resp = self.patch(self.CONSUMER_URL + '/%s' % original_id,
body={'consumer': update_ref})
consumer = update_resp.result['consumer']
self.assertEqual(consumer['description'], update_description)
self.assertEqual(consumer['id'], original_id)
def test_consumer_update_bad_secret(self):
consumer = self._create_single_consumer()
original_id = consumer['id']
update_ref = copy.deepcopy(consumer)
update_ref['description'] = uuid.uuid4().hex
update_ref['secret'] = uuid.uuid4().hex
self.patch(self.CONSUMER_URL + '/%s' % original_id,
body={'consumer': update_ref},
expected_status=400)
def test_consumer_update_bad_id(self):
consumer = self._create_single_consumer()
original_id = consumer['id']
original_description = consumer['description']
update_description = original_description + "_new"
update_ref = copy.deepcopy(consumer)
update_ref['description'] = update_description
update_ref['id'] = update_d |
Jokymon/hpcs | test_input/test_while.py | Python | gpl-3.0 | 36 | 0.027778 | i = 0
while i < 25:
| i = i + 1
| |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_subnets_operations.py | Python | mit | 35,492 | 0.0051 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations:
"""SubnetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_pa | rameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
re | quest = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Subnet":
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
|
FeroxTL/pynginxconfig | test.py | Python | mit | 3,830 | 0.002872 | #coding: utf8
import unittest
#from conf import NginxConfig
from blocks import KeyValueOption, KeyOption, Block
'''
s = """server {
nameserver 123;
}"""
s = """server 123;"""
a = NginxConfig()
a.load(s)
print(a.server)
#print(a.server.nameserver)
'''
class NgKVB(Block):
kv = KeyValueOption('kv_value')
class Test(unittest.TestCase):
def test_base(self):
"""
Base tests of Block, KeyValueOption, KeyOption classes functionality
"""
a1 = NgKVB()
a2 = NgKVB()
self.assertEqual(str(a1.kv), 'kv_value')
self.assertEqual(str(a2.kv), 'kv_value')
self.assertEqual('kv' in a1._options, True)
self.assertEqual('kv' in a2._options, True)
self.assertEqual(type(a1.kv), KeyValueOption)
self.assertEqual(type(a2.kv), KeyValueOption)
a1.kv = 'kv_another_value'
self.assertEqual(type(a1.kv), KeyValueOption)
self.assertEqual(id(a1.kv) == id(KeyValueOption), False)
self.assertEqual(id(a2.kv) == id(KeyValueOption), False)
self.assertEqual(str(a1.kv), 'kv_another_value')
self.assertEqual(str(a2.kv), 'kv_value')
def test_block_attribute_inheritance(self):
"""
Tests that base class Block does not take any of child class attributes
"""
a1 = NgKVB()
self.assertEqual(hasattr(Block, '_options'), False)
self.assertEqual(a1._options, ['kv'])
self.assertEqual(hasattr(a1, 'kv'), True)
def test_block_item_assigment(self):
a1 = NgKVB()
a1['kv'] = KeyValueOption('kv_value')
self.assertEqual(a1._options, ['kv'])
self.assertEqual(type(a1['kv']), KeyValueOption)
def test_kv_option(self):
"""
Tests key-value option
"""
kv = KeyValueOption('value')
self.assertEqual(kv.render('kv_name'), '\nkv_name value;')
self.assertEqual(kv.render('kv_name', indent_level=1), '\n kv_name value;')
self.assertEqual(kv.render('kv_name', indent_level=2, indent_char='\t', indent=1), '\n\t\tkv_name value;')
def test_k_option(self):
"""
Tests key option
"""
k = KeyOption()
self.assertEqual(k.render('name'), '\nname;')
self.assertEqual(k.render('name', indent_level=1), '\n name;')
self.assertEqual(k.render('name', indent_level=2, indent_char='\t', indent=1), '\n\t\tname;')
def test_kv_block(self):
"""
Tests key-value option in block
"""
kvb = Block()
kvb.kv = KeyValueOption('value')
self.assertEqual(kvb.render('kbv_name'), '\nkbv_name {\n kv value;\n}')
def test_kv_block_initial(self):
"""
Tests initial values in key-value block and deletions of attributes
"""
kvb = NgKVB()
self.assertEqual(str(kvb.kv), 'kv_value')
self.assertEqual(kvb.kv.render('kv'), '\nkv kv_value;')
self.assertEqual(kvb.render('kvb_n | ame'), '\nkvb_name {\n kv kv_value;\n}')
| kvb.kv = 'kv_another_value'
self.assertEqual(type(kvb.kv), KeyValueOption)
self.assertEqual(str(kvb.kv), 'kv_another_value')
self.assertEqual(kvb.kv.render('kv'), '\nkv kv_another_value;')
self.assertEqual(kvb.render('kvb_name'), '\nkvb_name {\n kv kv_another_value;\n}')
del kvb.kv
self.assertEqual(hasattr(kvb, 'kv'), False)
self.assertEqual('kv' in kvb._options, False)
self.assertEqual(kvb.render('kvb_name'), '\nkvb_name {\n}')
kvb2 = NgKVB()
self.assertEqual(str(kvb2.kv), 'kv_value')
self.assertEqual(kvb2.kv.render('kv'), '\nkv kv_value;')
self.assertEqual(kvb2.render('kvb_name'), '\nkvb_name {\n kv kv_value;\n}')
if __name__ == "__main__":
unittest.main()
|
lhellebr/GreenTea | tttt/urls.py | Python | gpl-2.0 | 3,215 | 0.001555 | from django.conf import settings
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.views.decorators.cache import cache_page
import apps.api.urls
from apps.core.views import (HomePageView, JobDetailView, JobHistoryView,
JobsDiffView, JobsListView, TestDetailView,
TestsListView)
from apps.kerberos.views import LoginView
from apps.report.views import ReportListView, ReportPageView
admin.autodiscover()
CACHE = 0
urlpatterns = patterns('',
url(r'^xml/(?P<id>[0-9]+)$',
'apps.core.views.to_xml', name='beaker-xml'),
url(r'^import/$', 'apps.core.views.import_xml',
name='import-xml'),
url(r'^import/group$',
'apps.core.views.import_group', name='import-group'),
url(r'^api/', include(apps.api.urls)),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^tests/(?P<email>.+)$',
TestsListView.as_view(), name='tests-email'),
url(r'^reports/$',
ReportListView.as_view(), name='reports'),
url(r'^reports/(?P<id>[0-9]+)$',
ReportPageView.as_view(), name='report-page'),
url(r'^accounts/login',
LoginView.as_view(), name="login"),
url(r'^job/(?P<id>[0-9]+)$',
cache_page(60 * CACHE)(JobDetailView.as_view()), name='job-detail'),
url(r'^job_history/(?P<id>[0-9]+)$',
JobHistoryView.as_view(), name='job-history'),
url(r'^test/(?P<id>[0-9]+)$',
TestDetailView.as_view(), name='test-detail'),
url(r'^(Automation/)?[tT]ests.html$',
cache_page(60 * CACHE)(TestsListView.as_view()), name='tests-list'),
url(r'^(Automation/)?[jJ]obs.html$',
cache_page(60 * CACHE)(JobsListView.as_view()), name='jobs-list'),
url(r'^(Automation/)?[dD]iffs.html$',
| JobsDiffView.as_view(), name='jobs-diff'),
url(r'^(Automation/?)?$',
| HomePageView.as_view(), name='homepage'),
url(r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:],
'django.views.static.serve',
{"document_root": settings.MEDIA_ROOT}),
url(r'^%s(?P<path>.*)$' % settings.STATIC_URL[1:],
'django.views.static.serve',
{"document_root": settings.STATIC_ROOT}),
url(r'^admin/doc/', include(
'django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/grappelli/', include('grappelli.urls')),
)
|
Davideddu/kivy-forkedtouch | kivy/uix/listview.py | Python | mit | 40,161 | 0.0001 | '''
List View
===========
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
The :class:`~kivy.uix.listview.ListView` widget provides a scrollable/pannable
viewport that is clipped to the scrollview's bounding box which contains
list item view instances.
The :class:`~kivy.uix.listview.ListView` implements an :class:`AbstractView` as
a vertical, scrollable list. The :class:`AbstractView` has one property:
:class:`~kivy.adapters.adapter`.
The :class:`~kivy.uix.listview.ListView` sets an adapter to one of a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`,
:class:`~kivy.adapters.listadapter.ListAdapter` or a
:class:`~kivy.adapters.dictadapter.DictAdapter`.
Introduction
------------
Lists are central parts of many software projects. Kivy's approach to lists
includes providing solutions for simple lists, along with a substantial
framework for building lists of moderate to advanced complexity. For a new
user, it can be difficult to ramp up from simple to advanced. For
this reason, Kivy provides an extensive set of examples that you may wish to
run first, to get a taste of the range of functionality offered. You can tell
from the names of the examples that they illustrate the "ramping up" from
simple to advanced:
* kivy/examples/widgets/lists/list_simple.py
* kivy/examples/widgets/lists/list_simple_in_kv.py
* kivy/examples/widgets/lists/list_simple_in_kv_2.py
* kivy/examples/widgets/lists/list_master_detail.py
* kivy/examples/widgets/lists/list_two_up.py
* kivy/examples/widgets/lists/list_kv.py
* kivy/examples/widgets/lists/list_composite.py
* kivy/examples/widgets/lists/list_cascade.py
* kivy/examples/widgets/lists/list_cascade_dict.py
* kivy/examples/widgets/lists/list_cascade_images.py
* kivy/examples/widgets/lists/list_ops.py
Many of the examples feature selection, some restricting selection to single
selection, where only one item at at time can be selected, and others allowing
multiple item selection. Many of the examples illustrate how selection in one
list can be connected to actions and selections in another view or another list.
Find your own way of reading the documentation here, examining the source code
for the example apps and running the examples. Some may prefer to read the
documentation through first, others may want to run the examples and view their
code. No matter what you do, going back and forth will likely be needed.
Basic Example
-------------
In its simplest form, we make a listview with 100 items::
from kivy.uix.listview import ListView
from kivy.uix.gridlayout import GridLayout
class MainView(GridLayout):
def __init__(self, **kwargs):
kwargs['cols'] = 2
super(MainView, self).__init__(**kwargs)
list_view = ListView(
item_strings=[str(index) for index in range(100)])
self.add_widget(list_view)
if __name__ == '__main__':
from kivy.base import runTouc | hApp
runTouchApp(MainView(width=800))
Or, we could declare the listview using the kv language::
from kivy.uix.modalview import ModalView
from kivy.uix.listview import ListView
from kivy.uix.gr | idlayout import GridLayout
from kivy.lang import Builder
Builder.load_string("""
<ListViewModal>:
size_hint: None, None
size: 400, 400
ListView:
size_hint: .8, .8
item_strings: [str(index) for index in range(100)]
""")
class ListViewModal(ModalView):
def __init__(self, **kwargs):
super(ListViewModal, self).__init__(**kwargs)
class MainView(GridLayout):
def __init__(self, **kwargs):
kwargs['cols'] = 1
super(MainView, self).__init__(**kwargs)
listview_modal = ListViewModal()
self.add_widget(listview_modal)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
Using an Adapter
-------------------
Behind the scenes, the basic example above uses the
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`. When the
constructor for the :class:`~kivy.uix.listview.ListView` sees that only a list
of
strings is provided as an argument (called item_strings), it creates an instance
of :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` using the
list of strings.
Simple in :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` means:
*without selection support*. It is a scrollable list of items that does not
respond to touch events.
To use a :class:`SimpleListAdaper` explicitly when creating a ListView instance,
do::
simple_list_adapter = SimpleListAdapter(
data=["Item #{0}".format(i) for i in range(100)],
cls=Label)
list_view = ListView(adapter=simple_list_adapter)
The instance of :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` has
a required data argument which contains data items to use for instantiating
Label views for the list view (note the cls=Label argument). The data items are
strings. Each item string is set by the
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` as the *text*
argument for each Label instantiation.
You can declare a ListView with an adapter in a kv file with special attention
given to the way longer python blocks are indented::
from kivy.uix.modalview import ModalView
from kivy.uix.listview import ListView
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivy.factory import Factory
# Note the special nature of indentation in the adapter declaration, where
# the adapter: is on one line, then the value side must be given at one
# level of indentation.
Builder.load_string("""
#:import label kivy.uix.label
#:import sla kivy.adapters.simplelistadapter
<ListViewModal>:
size_hint: None, None
size: 400, 400
ListView:
size_hint: .8, .8
adapter:
sla.SimpleListAdapter(
data=["Item #{0}".format(i) for i in range(100)],
cls=label.Label)
""")
class ListViewModal(ModalView):
def __init__(self, **kwargs):
super(ListViewModal, self).__init__(**kwargs)
class MainView(GridLayout):
def __init__(self, **kwargs):
kwargs['cols'] = 1
super(MainView, self).__init__(**kwargs)
listview_modal = ListViewModal()
self.add_widget(listview_modal)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
ListAdapter and DictAdapter
---------------------------
For many uses of a list, the data is more than a simple list of strings.
Selection functionality is also often needed.
The :class:`~kivy.adapters.listadapter.ListAdapter` and
:class:`~kivy.adapters.dictadapter.DictAdapter` cover these more elaborate
needs.
The :class:`~kivy.adapters.listadapter.ListAdapter` is the base class for
:class:`~kivy.adapters.dictadapter.DictAdapter`, so we can start with it.
See the :class:`~kivy.adapters.listadapter.ListAdapter` docs for details, but
here are synopses of its arguments:
* *data*: strings, class instances, dicts, etc. that form the basis data
for instantiating views.
* *cls*: a Kivy view that is to be instantiated for each list item. There
are several built-in types available, including ListItemLabel and
ListItemButton, or you can make your own class that mixes in the
required :class:`~kivy.uix.listview.SelectableView`.
* *template*: the name of a Kivy language (kv) template that defines the
Kivy view for each list item.
.. note::
Pick only one, cls or template, to provide as an argument.
* *args_converter*: a function that takes a data item object as input and
uses it to build and return an args dict, ready
to be used in a call to instantiate item views using the item view cls
or template. In the case of cls, the args dict acts as a
kwargs object. For a template, it is treated as a c |
all-umass/superman | superman/baseline/mario.py | Python | mit | 3,423 | 0.013439 | from __future__ import absolute_import, print_function
import numpy as np
import warnings
from numpy.polynomial.hermite import hermvander
from six.moves import xrange
from .common import Baseline
try:
from cvxopt import matrix as cvx_matrix, solvers
except ImportError:
from scipy.optimize import linprog
HAS_CVXOPT = False
else:
HAS_CVXOPT = True
np.set_printoptions(precision=4, suppress=True)
_callback_state = {'last_nit':0, 'last_phase':0}
def mario_baseline(bands, intensities, poly_order=10, max_iters=None,
verbose=False, tol=1e-2):
'''Solves a linear program: min_u f'u s.t. -P'u <= -s
Where u are coefficients of a Hermite polynomial.'''
bands = bands.astype(float)
intensities = intensities.astype(float)
if max_iters is None:
max_iters = len(bands) * 10
opts = dict(maxiter=max_iters, disp=verbose, tol=tol)
callback = _linprog_callback if verbose else None
# Flip intensities upside down.
maxval = intensities.max() + 500
s = maxval - intensities
# Keep trying to solve until we succeed.
for order in xrange(poly_order, 0, -1):
result, P = _mario_helper(bands, s, order, opts, callback)
if verbose:
print('With order %d:' % order, result['status'])
if result['x'] is not None:
break
else:
warnings.warn('mario_baseline didnt find a fit at any order')
return np.zeros_like(s)
baseline = P.dot(np.array(result['x']).ravel())
# Flip it back over.
return maxval - baseline
def _mario_helper(bands, s, poly_order, opts, callback):
# Build the polynomial basis over the bands.
P = hermvander(bands, poly_order-1)
f = P.sum(axis=0)
if HAS_CVXOPT:
solvers.options['show_progress'] = opts['disp']
solvers.options['maxiters'] = opts['maxiter']
solvers.options['abstol'] = opts['tol']
solvers.options['reltol'] = opts['tol']
solvers.options['feastol'] = 1e-100 # For some reason this helps.
try:
res = solvers.lp(cvx_matrix(f), cvx_matrix(-P), cvx_matrix(-s))
except ValueError as e:
# This can be thrown when poly_order is too large for the data size.
res = {'status': e.message, 'x': None}
return res, P
res = linprog(f, A_ub=-P, b_ub=-s, bounds=(-np.inf,np.inf), | options=opts,
callback=callback)
res = {'status': res.message, 'x': res.x if res.success else None}
return res, P
def _linprog_callback(xk, nit=0, phase=0, tableau=None, **kwargs):
obj = -tableau[-1, -1]
new_state = False
if _callback_state['last_phase'] != phase:
new_state = True
_ca | llback_state['last_phase'] = phase
if phase == 1:
if new_state:
print('--- Phase 1: Find a feasible point. ---')
print('Iter\tObjective')
_callback_state['last_nit'] = nit
print('%d\t%g' % (nit, obj))
else:
if new_state:
print('--- Phase 2: Minimize using simplex. ---')
print('Iter\tObjective')
print('%d\t%g' % (nit - _callback_state['last_nit'], obj))
class Mario(Baseline):
def __init__(self, poly_order=10, max_iters=None, verbose=False, tol=1e-2):
self.poly_order_ = poly_order
self.max_iters_ = max_iters
self.verbose_ = verbose
self.tol_ = tol
def _fit_one(self, bands, intensities):
return mario_baseline(bands, intensities, self.poly_order_,
self.max_iters_, self.verbose_, self.tol_)
def param_ranges(self):
return {'poly_order_': (1, 12, 'integer')}
|
theDevilsVoice/milton | summarize.py | Python | mit | 3,315 | 0.023228 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
#title : summarize.py
#description : summarize for each IP listed, all of the ports being used,
# and where possible to do a port range (don't want to
# list out thousands of individual port numbers)
#author :
#date : 8/18/2016
#version : 0.1
#usage : Place data files into the "data" directory to be processed
#notes :
'''
import os
import sys
import re
import itertools
from datetime import datetime
# set this to 1 to see all the messages
DEBUG = 0
data_dir = "data"
if not os.path.exists(data_dir):
print("Data directory does not exist, please create it")
exit
results_dir = "results"
if not os.path.exists(results_dir):
if DEBUG : print("Results directory does not exist, creating")
os.mkdir(results_dir)
def sanitize_ip(ip):
""" Check if IP address is any good.
"""
a = ip.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
def sanitize_ports(port_list):
"""
"""
result = []
mylist = port_list.split(",")
for port in mylist:
#if DEBUG : print ("PORT: " + port)
if 1 <= int(port) <= 65535 :
result.append(int(port))
return result # return list of ports as INT
def GroupRanges(items):
"""Yields 2-tuples of (start, end) ranges from a sequence of numbers.
Args:
items: an iterable of numbers, sorted ascendingly and without duplicates.
Yields:
2-tuples of (start, end) ranges. start and end will be the same
for ranges of 1 number
"""
myiter = iter(items)
#start = myiter.next()
start=next(myiter | )
end = start
for num in myiter:
if num == end + 1:
end = num
else:
yield (start, end)
start = num
end = num
yield (start, end)
def main():
"""main function"""
results_dict = {}
for filename in os.listdir(data_di | r):
filename = data_dir + "/" + filename
print("Processing " + filename)
try:
lines = [line.rstrip('\n') for line in open(filename)]
for each_line in lines:
ip, port = each_line.split(",")
if ip in results_dict:
""" no need to sanitize ip at this point """
ports = results_dict[ip]
new_ports = ports + "," + port
results_dict[ip] = new_ports
else:
""" sanitize the ip since it's new """
tmp = {ip: port}
if sanitize_ip(ip):
results_dict.update(tmp)
else:
print ("Skipping invalid IP address")
except Exception as e:
print("There was an issue with your data: ")
print(e)
"""post process the results"""
my_filename = results_dir + "/" + datetime.now().strftime("%Y%m%d-%H%M%S") + ".txt"
print ("Writing output to file: " + my_filename)
fh = open(my_filename, "w")
for key, value in results_dict.items():
mylist = GroupRanges(sanitize_ports(value))
tmp = {key: mylist}
if DEBUG : print ("FINAL RESULT: " + key + " list: " + str(list(mylist)))
my_line = key + " " + str(list(mylist)) + "\n"
fh.write(str(my_line))
#results_dict.update(tmp) # not needed
fh.close()
if __name__ == "__main__":
sys.exit(main())
|
ddong8/ihasy | lib/variables.py | Python | bsd-3-clause | 1,059 | 0.008499 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2017 ihasy.com
# Do have a faith in what you're doing.
# Make your life a story worth telling.
import t | ime
import re
import random
from htmlentity import unescape
from HTMLParser import HTMLParser
def date(timestamp, formatter):
return time.strftime(formatter, time.gmtime(float(timestamp)))
def build_uri(u | ri, param, value):
regx = re.compile("[\?&](%s=[^\?&]*)" % param)
find = regx.search(uri)
split = "&" if re.search(r"\?", uri) else "?"
if not find: return "%s%s%s=%s" % (uri, split, param, value)
return re.sub(find.group(1), "%s=%s" % (param, value), uri)
def strip_tags(html):
html = html.strip()
html = html.strip("\n")
result = []
parse = HTMLParser()
parse.handle_data = result.append
parse.feed(html)
parse.close()
return "".join(result)
def gen_random():
return random.random()
template_variables = {}
template_variables["build_uri"] = build_uri
template_variables["date"] = date
template_variables["strip_tags"] = strip_tags
|
0x00f/pelican | pelican/tools/pelican_import.py | Python | agpl-3.0 | 11,582 | 0.003626 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
import time
from codecs import open
from pelican.utils import slugify
def wp2fields(xml):
"""Opens a wordpress XML file, and yield pelican fields"""
try:
from BeautifulSoup import BeautifulStoneSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup" required to import Wordpress XML files.')
sys.exit(error)
xmlfile = open(xml, encoding='utf-8').read()
soup = BeautifulStoneSoup(xmlfile)
items = soup.rss.channel.findAll('item')
for item in items:
if item.fetch('w | p:status')[0].contents[0] == "publish":
try:
title = item.title.contents[0]
except IndexError:
continue
content = item.fetc | h('content:encoded')[0].contents[0]
filename = item.fetch('wp:post_name')[0].contents[0]
raw_date = item.fetch('wp:post_date')[0].contents[0]
date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
date = time.strftime("%Y-%m-%d %H:%M", date_object)
author = item.fetch('dc:creator')[0].contents[0].title()
categories = [cat.contents[0] for cat in item.fetch(domain='category')]
# caturl = [cat['nicename'] for cat in item.fetch(domain='category')]
tags = [tag.contents[0] for tag in item.fetch(domain='post_tag')]
yield (title, content, filename, date, author, categories, tags, "html")
def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields"""
try:
from BeautifulSoup import BeautifulStoneSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup" required to import Dotclear files.')
sys.exit(error)
in_cat = False
in_post = False
category_list = {}
posts = []
with open(file, 'r', encoding='utf-8') as f:
for line in f:
# remove final \n
line = line[:-1]
if line.startswith('[category'):
in_cat = True
elif line.startswith('[post'):
in_post = True
elif in_cat:
fields = line.split('","')
if not line:
in_cat = False
else:
# remove 1st and last ""
fields[0] = fields[0][1:]
# fields[-1] = fields[-1][:-1]
category_list[fields[0]]=fields[2]
elif in_post:
if not line:
in_post = False
break
else:
posts.append(line)
print("%i posts read." % len(posts))
for post in posts:
fields = post.split('","')
# post_id = fields[0][1:]
# blog_id = fields[1]
# user_id = fields[2]
cat_id = fields[3]
# post_dt = fields[4]
# post_tz = fields[5]
post_creadt = fields[6]
# post_upddt = fields[7]
# post_password = fields[8]
# post_type = fields[9]
post_format = fields[10]
# post_url = fields[11]
# post_lang = fields[12]
post_title = fields[13]
post_excerpt = fields[14]
post_excerpt_xhtml = fields[15]
post_content = fields[16]
post_content_xhtml = fields[17]
# post_notes = fields[18]
# post_words = fields[19]
# post_status = fields[20]
# post_selected = fields[21]
# post_position = fields[22]
# post_open_comment = fields[23]
# post_open_tb = fields[24]
# nb_comment = fields[25]
# nb_trackback = fields[26]
post_meta = fields[27]
# redirect_url = fields[28][:-1]
# remove seconds
post_creadt = ':'.join(post_creadt.split(':')[0:2])
author = ""
categories = []
tags = []
if cat_id:
categories = [category_list[id].strip() for id in cat_id.split(',')]
# Get tags related to a post
tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '')
if len(tag) > 1:
if int(tag[:1]) == 1:
newtag = tag.split('"')[1]
tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES )))
else:
i=1
j=1
while(i <= int(tag[:1])):
newtag = tag.split('"')[j].replace('\\','')
tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES )))
i=i+1
if j < int(tag[:1])*2:
j=j+2
"""
dotclear2 does not use markdown by default unless you use the markdown plugin
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
"""
if post_format == "markdown":
content = post_excerpt + post_content
else:
content = post_excerpt_xhtml + post_content_xhtml
content = content.replace('\\n', '')
post_format = "html"
yield (post_title, content, slugify(post_title), post_creadt, author, categories, tags, post_format)
def feed2fields(file):
"""Read a feed and yield pelican fields"""
import feedparser
d = feedparser.parse(file)
for entry in d.entries:
date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
if hasattr(entry, "updated_parsed") else None)
author = entry.author if hasattr(entry, "author") else None
tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None
slug = slugify(entry.title)
yield (entry.title, entry.description, slug, date, author, [], tags, "html")
def build_header(title, date, author, categories, tags):
"""Build a header from a list of fields"""
header = '%s\n%s\n' % (title, '#' * len(title))
if date:
header += ':date: %s\n' % date
if categories:
header += ':category: %s\n' % ', '.join(categories)
if tags:
header += ':tags: %s\n' % ', '.join(tags)
header += '\n'
return header
def build_markdown_header(title, date, author, categories, tags):
"""Build a header from a list of fields"""
header = 'Title: %s\n' % title
if date:
header += 'Date: %s\n' % date
if categories:
header += 'Category: %s\n' % ', '.join(categories)
if tags:
header += 'Tags: %s\n' % ', '.join(tags)
header += '\n'
return header
def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=False):
for title, content, filename, date, author, categories, tags, in_markup in fields:
if (in_markup == "markdown") or (out_markup == "markdown") :
ext = '.md'
header = build_markdown_header(title, date, author, categories, tags)
else:
out_markup = "rst"
ext = '.rst'
header = build_header(title, date, author, categories, tags)
filename = os.path.basename(filename)
# option to put files in directories with categories names
if dircat and (len(categories) == 1):
catname = slugify(categories[0])
out_filename = os.path.join(output_path, catname, filename+ext)
if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname))
else:
out_filename = os.path.join(output_path, filename+ext)
print(out_filename)
if in_markup == "html":
html_filename = os.path.join(output_path, filename+'.html')
with open(html_filename, 'w', encoding='utf-8') as fp:
# Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion
paragraphs = content.split('\n\n')
paragraphs = [u'<p>{}</p>'.format(p) for p in paragraphs]
new_content = ''.join(paragraphs)
|
Jannes123/inasafe | safe/impact_statistics/aggregator.py | Python | gpl-3.0 | 69,859 | 0.000115 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid - **Aggregator.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '19/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import sys
import logging
import time
import numpy
from collections import OrderedDict
from qgis.core import (
QgsMapLayer,
QgsGeometry,
QgsMapLayerRegistry,
QgsFeature,
QgsFeatureRequest,
QgsRectangle,
QgsPoint,
QgsField,
QgsFields,
QgsVectorLayer,
QgsVectorFileWriter,
QGis,
QgsSingleSymbolRendererV2,
QgsFillSymbolV2,
QgsCoordinateReferenceSystem)
# pylint: disable=no-name-in-module
from qgis.analysis import QgsZonalStatistics
# pylint: enable=no-name-in-module
from PyQt4 import QtGui, QtCore
from safe.storage.core import read_layer as safe_read_layer
from safe.storage.utilities import (
calculate_polygon_centroid,
safe_to_qgis_layer)
from safe.impact_statistics.zonal_stats import calculate_zonal_stats
from safe.utilities.clipper import clip_layer
from safe.defaults import get_defaults
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.gis import (
layer_attribute_names,
create_memory_layer,
is_polygon_layer)
from safe.utilities.styling import set_vector_graduated_style
from safe.common.utilities import (
temp_dir,
unique_filename,
feature_attributes_as_dict,
get_utm_epsg)
from safe.common.exceptions import ReadLayerError, PointsInputError
from safe.gis.polygon import (
in_and_outside_polygon as points_in_and_outside_polygon)
from safe.common.signals import (
DYNAMIC_MESSAGE_SIGNAL,
STATIC_MESSAGE_SIGNAL,
)
from safe import messaging as m
from safe.definitions import global_default_attribute, do_not_use_attribute
from safe.messaging import styles
from safe.common.exceptions import (
KeywordNotFoundError,
NoKeywordsFoundError,
InvalidParameterError,
KeywordDbError,
InvalidAggregatorError,
UnsupportedProviderError,
InvalidLayerError,
InsufficientParametersError)
from safe_extras.pydispatch import dispatcher
PROGRESS_UPDATE_STYLE = styles.PROGRESS_UPDATE_STYLE
INFO_STYLE = styles.INFO_STYLE
WARNING_STYLE = styles.WARNING_STYLE
LOGGER = logging.getLogger('InaSAFE')
# If inasafe is running as qgis plugin,
# it can import processing (from QGIS / sextante),
# pylint: disable=F0401
import processing
# pylint: enable=F0401
class Aggregator(QtCore.QObject):
"""The aggregator class facilitates aggregation of impact f | unction results.
"""
def __init__(self, extent, aggregation_layer):
"""Director for aggregation based operations.
:param aggregation_layer: Layer representing clipped aggregation
areas. This will be converted to a memory layer inside this class.
see self.layer
:type aggregation_layer: QgsVectorLayer, QgsMapLaye | r
"""
QtCore.QObject.__init__(self)
self.hazard_layer = None # Used in deintersect() method
self.exposure_layer = None # Used in deintersect() method
self.safe_layer = None # Aggregation layer in SAFE format
self.prefix = 'aggr_'
self.attributes = {}
self.attribute_title = None
self._sum_field_name = None
self.set_sum_field_name()
# use qgis or inasafe zonal stats
flag = bool(QtCore.QSettings().value(
'inasafe/use_native_zonal_stats', False, type=bool))
self.use_native_zonal_stats = flag
self._extent = extent
self._keyword_io = KeywordIO()
self._defaults = get_defaults()
self.error_message = None
self.target_field = None
# self.impact_layer_attributes is a list of list of dict
# [
# [{...},{...},{...}],
# [{...},{...},{...}]
# ]
# It contains lists of objects that are covered by
# aggregation polygons (one list for one polygon)
self.impact_layer_attributes = []
self.processing = processing
# If this flag is not True, no aggregation or postprocessing will run
# this is set as True by validateKeywords()
self.is_valid = False
self.show_intermediate_layers = False
# This is used to hold an *in memory copy* of the aggregation layer
# or None if the clip extents should be used.
self.layer = None
if aggregation_layer is None:
self.aoi_mode = True
# Will be completed in _prepareLayer just before deintersect call
self.layer = self._create_polygon_layer()
else:
self.aoi_mode = False
self.layer = aggregation_layer
self.statistics_type = None
self.statistics_classes = None
self.preprocessed_feature_count = None
# If keywords don't assigned with self.layer,
# set up dummy keywords
try:
_ = self.read_keywords(self.layer)
except NoKeywordsFoundError:
# No kw file was found for layer - create an empty one.
keywords = {}
self.write_keywords(
self.layer, keywords)
@property
def extent(self):
return self._extent
@extent.setter
def extent(self, value):
self._extent = value
# update layer extent to match impact layer if in aoi_mode
if self.aoi_mode:
try:
self.layer = self._extents_to_layer()
self.safe_layer = safe_read_layer(self.layer.source())
except (InvalidLayerError,
UnsupportedProviderError,
KeywordDbError):
raise
def read_keywords(self, layer, keyword=None):
"""It is a wrapper around self._keyword_io.read_keywords
:param layer: Layer you want to get the keywords for.
:type layer: QgsMapLayer
:param keyword: Optional specific keyword you want the value for.
:type keyword: str
:returns: KeywordIO.read_keywords object
:rtype: KeywordIO.read_keywords
:raises: All exceptions are propagated.
"""
try:
return self._keyword_io.read_keywords(layer, keyword=keyword)
except:
raise
def update_keywords(self, layer, keywords):
"""It is a wrapper around self._keyword_io.update_keywords
:param layer: Layer you want to get the keywords for.
:type layer: QgsMapLayer
:param keywords: Dict of keywords to apply to the existing keywords.
:type keywords: dict
:raises: All exceptions are propagated.
"""
try:
self._keyword_io.update_keywords(layer, keywords=keywords)
except:
raise
def get_statistics(self, layer):
"""It is a wrapper around self._keyword_io.read_keywords
:param layer: Layer you want to get the keywords for.
:type layer: QgsMapLayer
:returns: KeywordIO.get_statistics object
:rtype: KeywordIO.get_statistics
:raises: All exceptions are propagated.
"""
try:
return self._keyword_io.get_statistics(layer)
except:
raise
def copy_keywords(self, layer, out_filename):
"""It is a wrapper around self._keyword_io.copy_keywords
:param layer: Layer you want to get the keywords for.
:type layer: QgsMapLayer
:param out_filename: Output filename that the keywords should be
written to.
:type out_filename: str
:raises: All exceptions are propagated.
"""
try:
self._keyword_io.copy_keywords(layer, out_filename)
except:
raise
def write_keywords(self, layer |
stackforge/watcher | watcher/common/config.py | Python | apache-2.0 | 1,465 | 0 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS O | F ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from watcher.common import rpc
from watcher import version
def parse_args(argv, default_config_files=None, default_config_dirs=None):
default_config_files = (default_config_files or
cfg.find_config_files(project='watcher'))
def | ault_config_dirs = (default_config_dirs or
cfg.find_config_dirs(project='watcher'))
rpc.set_defaults(control_exchange='watcher')
cfg.CONF(argv[1:],
project='watcher',
version=version.version_info.release_string(),
default_config_dirs=default_config_dirs,
default_config_files=default_config_files)
rpc.init(cfg.CONF)
|
mozilla-metrics/hadoop-etls | fhr/v3/base_etl_job.py | Python | bsd-2-clause | 1,464 | 0.007514 | #!/usr/bin/env python
import sys, os
import codecs
import datetime
import mrjob.job
import mrjob.protocol
try: # workaround
from fhrdata import FHRData
import util
except ImportError:
pass
class BaseETLJob(mrjob.job.MRJob):
HADOOP_INPUT_FORMAT = 'SequenceFileAsTextInputFormat'
OUTPUT_PROTOCOL = mrjob.protocol.RawValueProtocol
def mark_invalid_input(self):
self.increment_counter("errors", "invalid_input_line")
def mark_invalid_json(self):
self.increment_counter("errors", "invalid_json")
def mark_invalid_report(self):
self.increment_counter("errors", "invalid_report")
def mark_invalid_date(self):
self.increment_counter("error", "invalid_date")
def configure_options(self):
super(BaseETLJob, self).configure_options()
self.add_passthrough_option(
'--field-separator', default=chr(1),
help="Specify field separator")
self.add_passthrough_option(
'--snapshot-date', default=datetime.datetime.now().strftime("%Y-%m-%d"),
help="Specify field separator")
def get_fhr_report(self | , line):
rec = raw_json = None
try:
raw_json = line.split("\t",1)[1]
| except:
self.mark_invalid_input()
return
try:
rec = FHRData(raw_json)
except:
self.mark_invalid_json()
return
return rec
|
utkarshsins/baadal-libvirt-python | examples/event-test.py | Python | gpl-2.0 | 22,504 | 0.004799 | #!/usr/bin/python -u
#
#
#
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import getopt
import os
import libvirt
import select
import errno
import time
import threading
# For the sake of demonstration, this example program includes
# an implementation of a pure python event loop. Most applications
# would be better off just using the default libvirt event loop
# APIs, instead of implementing this in python. The exception is
# where an application wants to integrate with an existing 3rd
# party event loop impl
#
# Change this to 'False' to make the demo use the native
# libvirt event loop impl
use_pure_python_event_loop = True
do_debug = False
def debug(msg):
global do_debug
if do_debug:
print(msg)
#
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque)
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque)
def __init__(self):
self.poll = select.poll()
self.pipetrick = os.pipe()
self.pendingWak | eup = False
self.runningPoll = False
self.nextHandleID = 1
| self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
self.poll.register(self.pipetrick[0], select.POLLIN)
# Calculate when the next timeout is due to occur, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return up to 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
self.runningPoll = True
try:
next = self.next_timeout()
debug("Next timeout due at %d" % next)
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
debug("Poll with a sleep of %d" % sleep)
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wakup. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
self.pendingWakeup = False
data = os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since scheduler timeslice
# means we could be ever so slightly early
if now >= (want-20):
debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
t.set_last_fired(now)
t.dispatch()
except (os.error, select.error), e:
if e.args[0] != errno.EINTR:
raise
finally:
self.runningPoll = False
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
if self.runningPoll and not self.pendingWakeup:
self.pendingWakeup = True
os.write(self.pipetrick[1], 'c'.encode("UTF-8"))
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an |
adexin/Python-Machine-Learning-Samples | Naive_bayes_mnist/nb.py | Python | mit | 2,416 | 0.001656 |
import numpy as np
import util
from datetime import datetime
from scipy.stats import norm
import better_exceptions
from scipy.stats import multivariate_normal as mvn
class NaiveBayers(object):
def __init__(self):
# Gaussian deviation
self.gaussians = dict()
# Class priors
self.priors = dict()
def fit(self, X, Y, smoothing=10e-3):
N, D = X.shape
# 1,2,3,4,5,6,7,8,9,0 - is labels
labels = set(Y)
for c in labels:
# get the current slice [0:number] where X in our class
current_x = X[Y == c]
# Compute mean and variance. Store in the dictionary by class key
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'var': np.var(current_x.T) + smoothing,
}
# Simple calculate prior probability. Divide current class by all classes
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
# Get the predictions
P = self.predict(X)
# Return mean of array
return np.mean(P == Y)
def predict(self, X):
# N - samples, D - features (classes)
N, D = X.shape
# Hyperparameter (10)
| K = len(self.gaussians)
# Fill by Zeros
P = np.zeros((N, K))
# for each class and mean/covariance
for c, g in self.ga | ussians.items():
mean, var = g['mean'], g['var']
log = np.log(self.priors[c])
# Calculate Log of the probability density function, all at once
P[:, c] = mvn.logpdf(X, mean=mean, cov=var) + log
return np.argmax(P, axis=1)
if __name__ == '__main__':
# Get train data
X, Y = util.get_data(40000)
Ntrain = len(Y) // 2
Xtest, Ytest = util.get_test_data(40000)
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
# Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
model = NaiveBayers()
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("Training time: ", (datetime.now() - t0))
t0 = datetime.now()
print("Training accuracy: ", model.score(Xtrain, Ytrain))
print("Time to compute train accuracy: ", (datetime.now() - t0), "Train size: ", len(Ytrain))
t0 = datetime.now()
print("Test accuracy: ", model.score(Xtest, Ytest))
print("Time to compute test accuracy: ", (datetime.now() - t0), "Test size: ", len(Ytest))
|
hackerberry/ooni-probe | old-to-be-ported-code/old-api/httpt.py | Python | bsd-2-clause | 3,102 | 0.002901 | """
This is a self genrated test created by scaffolding.py.
you will need to fill it up with all your necessities.
Safe hacking :).
"""
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from ooni.plugoo.tests import ITest, OONITest
from ooni.plugoo.assets import Asset
from ooni.protocols import http
from ooni.utils import log
class httptArgs(usage.Options):
optParameters = [['urls', 'f', None, 'Urls file'],
['url', 'u', 'http://torproject.org/', 'Test single site'],
['resume', 'r', 0, 'Resume at this index'],
['rules', 'y', None, 'Specify the redirect rules file']]
class httptTest(http.HTTPTest):
implements(IPlugin, ITest)
shortName = "httpt"
description = "httpt"
requirements = None
options = httptArgs
blocking = False
def testPattern(self, value, pattern, type):
if type == 'eq':
return value == pattern
elif type == 're':
import re
if re.match(pattern, value):
return True
else:
return False
else:
return None
def testPatterns(self, patterns, location):
test_result = False
if type(patterns) == list:
for pattern in patterns:
test_result |= self.testPattern(location, pattern['value'], pattern['type'])
else:
test_result |= self.testPattern(location, patterns['value'], patterns['type'])
return test_result
def testRules(self, rules, location):
result = {}
blocked = False
for rule, value in rules.items():
current_rule = {}
current_rule['name'] = value['name']
current_rule['patterns'] = value['patterns']
current_rule['test'] = self.testPatterns(value['patterns'], location)
blocked |= current_rule['test']
result[rule] = current_rule
result['blocked'] = blocked
return result
def processRedirect(self, location):
self.result['redirect'] = None
try:
rules_file = self.local_options['rules']
import yaml
rules = yaml.load(open(rules_file))
log.msg("Testing rules %s" % rules)
redirect = self.testRules(rules, location)
self.result['redirect'] = redirect
except TypeError:
log.msg("No rules file. Got a redirect, but nothing to do.")
def control(self, experiment_result, args):
print self.response
print self.request
# What you return here ends up inside | of the report.
log.msg("Running control")
return {}
def load_assets(self):
if self.local_options and self.local_options['urls']:
return {'url': Asset(self.local_options['urls'])}
else:
return {}
# We need to instantiate it otherwise getPlugins does not de | tect it
# XXX Find a way to load plugins without instantiating them.
#httpt = httptTest(None, None, None)
|
laprice/ducttape | setup.py | Python | isc | 433 | 0 | from setuptools import setup, find_ | packages
setup(
name='ducttape',
version='0.1',
url='http://github.com/adamrt/ducttape/',
license='ISC',
author='Adam Patterson',
author_email='adam@adamrt.com',
description='Server deployment tools based on fabric.',
packages=find_packages(),
include_package_data=True,
zip_safe=Fa | lse,
platforms='any',
install_requires=[
'fabric'
],
)
|
OCA/knowledge | document_page/__manifest__.py | Python | agpl-3.0 | 1,086 | 0 | # Copyright (C) 2004-2010 Tiny SPRL (<http://tin | y.be>).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Document Page",
"version": "13.0.1.1.0",
"category": "Knowledge Management",
"author": "OpenERP SA, Odoo Community Association (OCA)",
"images": [
"images/category_list.png",
"images/create_category.png",
"images/page_list.png",
"images/create_page.png",
"images/customer_invoice.jpeg",
"images/page_history.png",
],
"website": "https://github.co | m/OCA/knowledge",
"license": "AGPL-3",
"depends": ["mail", "knowledge"],
"data": [
"security/document_page_security.xml",
"security/ir.model.access.csv",
"wizard/document_page_create_menu.xml",
"wizard/document_page_show_diff.xml",
"views/document_page.xml",
"views/document_page_category.xml",
"views/document_page_history.xml",
"views/document_page_assets.xml",
"views/report_document_page.xml",
],
"demo": ["demo/document_page.xml"],
}
|
foss-ag/workshop_minecraft-python | src/astroids/Astroid.py | Python | gpl-3.0 | 2,132 | 0.001876 | import pygame
import random
class Astroid:
def __init__(self, x, y, scale):
"""
Create Astroid object.
:param x:
Start position X.
:param y:
Start position Y.
:param scale:
Astroid size scale.
"""
# astroid position
self.__x = x
self.__y = y
# astroid size scale
self.__scale = scale
# hit count
self.__hit_count = 0
# astroid image
self.__img = pygame.image.load('src/astroid.png')
# scale astroid image and get astroid rectangle
si | ze = self.__img.get_size()
self.__img = pygame.transform.scale(self.__img, (size[0] * self.__scale, siz | e[1] * self.__scale))
self.__astroid_rect = pygame.Rect(self.__img.get_rect())
@staticmethod
def create_astroid(state, size):
"""
Create new random astroid and add it to the astroids list in the game state.
:param state:
Game state with astroids list.
:param size:
Screen size.
"""
state.add_astroid(Astroid(size[0] - 5, random.randint(50, size[1] - 30), random.randint(1, 6)))
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
@property
def pos(self):
return (self.__x, self.__y)
@property
def scale(self):
return self.__scale
@property
def hit_count(self):
return self.__hit_count
@property
def image(self):
return self.__img
def get_rect(self):
"""
Get new astroid rectangle for new position.
:return:
Astroid rectangle
"""
self.__astroid_rect.top = self.__y
self.__astroid_rect.left = self.__x
return self.__astroid_rect
def increment_hit_count(self):
"""
Increment astroid hit count by 1.
"""
self.__hit_count += 1
def move(self, x):
"""
Update astroid position by x offset.
:param x:
x-offset
"""
self.__x += x
|
frmichel/vo-support-tools | CE/monitor-ce/processors/running_ratio_per_ce.py | Python | mit | 3,434 | 0.023879 | #!/usr/bin/python
#
# This tool exploits the data of csv files produced by script collect-ce-job-status.py,
# to compute the running ratio R/(R+W) for each CE, as a function of time.
#
# Results are stored in files named results/CE/<CE queue>.csv.
import sys
import os
import csv
from operator import itemgetter, attrgetter
import globvars
# -- | -----------------------------------------------------------------------
# Try to figure out good and bad CEs: compute the list of CE queues
# Input:
# dataFiles: list of tuples: (fileName, datetime, date, hour, rows, sum_VO_Waiting, sum_VO_Running)
# where
# - datetime is formated as "YYYY-MM-DD HH:MM:SS"
# - date is only the date part YYYY:MM:DD, and hour is | only the hour HH (used for filtering data in excel file)
# - rows is a dictionnary wich keys are the hostnames and values are another dictionnary with the following keys:
# 'Site'
# 'ImplName', 'ImplVer'
# 'CE_Total', 'VO_Total'
# 'CE_Running', 'VO_Running'
# 'CE_Waiting', 'VO_Waiting'
# 'CE_Running', 'VO_Running'
# 'CE_FreeSlots', 'VO_FreeSlots'
# 'CE_MaxTotal', 'VO_MaxTotal'
# 'CE_MaxWaiting', 'VO_MaxWaiting'
# 'CE_MaxRunning', 'VO_MaxRunning'
# 'CE_WRT', 'VO_WRT'
# 'CE_MaxTotal', 'VO_MaxTotal'
# 'CE_ERT', 'VO_ERT'
# 'CE_Status'
# -------------------------------------------------------------------------
def process(dataFiles):
# Global variables
DECIMAL_MARK = globvars.DECIMAL_MARK
DEBUG = globvars.DEBUG
OUTPUT_DIR = globvars.OUTPUT_DIR
MONCE = globvars.MONCE
# -------------------------------------------------------------------------
# Compute the running ratio per CE as a function of time
# -------------------------------------------------------------------------
print "Computing the ratio R/(R+W) per CE as a function of time..."
# Loop on all data files that were acquired in dataFiles, and build a new table 'queues' that consolidates data per CE queue
queues = {}
for (fileName, datetime, date, hour, fileRows, sum_VO_Waiting, sum_VO_Running) in dataFiles:
# Loop on all rows of the file
for (hostname, row) in fileRows.iteritems():
if hostname not in queues: # add only one entry for each CE queue
queues[hostname] = {'Site': row['Site']}
W = float(row['VO_Waiting'])
R = float(row['VO_Running'])
ratio = -1.0
if R+W != 0: ratio = R/(R+W)
queues[hostname][datetime] = { 'Waiting': row['VO_Waiting'], 'Running': row['VO_Running'], 'Ratio': ratio }
# Then for each CE, make a csv file that records the data per date: W, R, R/(R+W)
for (hostname, data) in queues.iteritems():
ceFileName = hostname.replace(':', '_').replace('/', '_')
outputFile = OUTPUT_DIR + os.sep + "CE" + os.sep + ceFileName + "_running_ratio.csv"
outputf = open(outputFile, 'wb')
writer = csv.writer(outputf, delimiter=';')
writer.writerow(["# Date time", "Waiting", "Running", "R/(R+W)"])
for (datetime, row) in data.iteritems():
if datetime != "Site":
strRatio = ""
if row['Ratio'] != -1.0:
strRatio = str(round(row['Ratio'], 4)).replace('.', globvars.DECIMAL_MARK)
writer.writerow([
datetime,
row['Waiting'],
row['Running'],
strRatio
])
outputf.close()
|
QiJune/Paddle | python/paddle/fluid/tests/unittests/test_merge_ids_op.py | Python | apache-2.0 | 1,489 | 0 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestMergeIdsOp(OpTest):
def setUp(self):
self.op_type = "merge_ids"
ids = np.array([[0], [2], [2], [3], [5], [5], [6]]).astype('int64')
x0 = np.array([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]]).astype('float32')
x1 = np.array([]).astype('float32')
| x | 2 = np.array([[0.4, 0.5], [0.4, 0.5], [0.5, 0.6],
[0.5, 0.6]]).astype('float32')
out = np.array([[0.1, 0.2], [0.4, 0.5], [0.4, 0.5], [0.2, 0.3],
[0.5, 0.6], [0.5, 0.6], [0.3, 0.4]]).astype('float32')
self.inputs = {'Ids': ids, "X": [('x0', x0), ('x1', x1), ('x2', x2)]}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
|
Southpaw-TACTIC/Team | src/python/Lib/site-packages/win32com/client/tlbrowse.py | Python | epl-1.0 | 8,026 | 0.03065 | import win32ui
import win32con
import win32api
import string
import commctrl
import pythoncom
from pywin.mfc import dialog
error = "TypeLib browser internal error"
FRAMEDLG_STD = win32con.WS_CAPTION | win32con.WS_SYSMENU
SS_STD = win32con.WS_CHILD | win32con.WS_VISIBLE
BS_STD = SS_STD | win32con.WS_TABSTOP
ES_STD = BS_STD | win32con.WS_BORDER
LBS_STD = ES_STD | win32con.LBS_NOTIFY | win32con.LBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL
CBS_STD = ES_STD | win32con.CBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL
typekindmap = {
pythoncom.TKIND_ENUM : 'Enumeration',
pythoncom.TKIND_RECORD : 'Record',
pythoncom.TKIND_MODULE : 'Module',
pythoncom.TKIND_INTERFACE : 'Interface',
pythoncom.TKIND_DISPATCH : 'Dispatch',
pythoncom.TKIND_COCLASS : 'CoClass',
pythoncom.TKIND_ALIAS : 'Alias',
pythoncom.TKIND_UNION : 'Union'
}
TypeBrowseDialog_Parent=dialog.Dialog
class TypeBrowseDialog(TypeBrowseDialog_Parent):
"Browse a type library"
IDC_TYPELIST = 1000
IDC_MEMBERLIST = 1001
IDC_PARAMLIST = 1002
IDC_LISTVIEW = 1003
def __init__(self, typefile = None):
TypeBrowseDialog_Parent.__init__(self, self.GetTemplate())
try:
if typefile:
self.tlb = pythoncom.LoadTypeLib(typefile)
else:
self.tlb = None
except pythoncom.ole_error:
self.MessageBox("The file does not contain type information")
self.tlb = None
self.HookCommand(self.CmdTypeListbox, self.IDC_TYPELIST)
self.HookCommand(self.CmdMemberListbox, self.IDC_MEMBERLIST)
def OnAttachedObjectDeath(self):
self.tlb = None
self.typeinfo = None
self.attr = None
return TypeBrowseDialog_Parent.OnAttachedObjectDeath(self)
def _SetupMenu(self):
menu = win32ui.CreateMenu()
flags=win32con.MF_STRING|win32con.MF_ENABLED
menu.AppendMenu(flags, win32ui.ID_FILE_OPEN, "&Open...")
menu.AppendMenu(flags, win32con.IDCANCEL, "&Close")
mainMenu = win32ui.CreateMenu()
mainMenu.AppendMenu(flags|win32con.MF_POPUP, menu.GetHandle(), "&File")
self.SetMenu(mainMenu)
self.HookCommand(self.OnFileOpen,win32ui.ID_FILE_OPEN)
def OnFileOpen(self, id, code):
openFlags = win32con.OFN_OVERWRITEPROMPT | win32con.OFN_FILEMUSTEXIST
fspec = "Type Libraries (*.tlb, *.olb)|*.tlb;*.olb|OCX Files (*.ocx)|*.ocx|DLL's (*.dll)|*.dll|All Files (*.*)|*.*||"
dlg = win32ui.CreateFileDialog(1, None, None, openFlags, fspec)
if dlg.DoModal() == win32con.IDOK:
try:
self.tlb = pythoncom.LoadTypeLib(dlg.GetPathName())
except pythoncom.ole_error:
self.MessageBox("The file does not contain type information")
self.tlb = None
self._SetupTLB()
def OnInitDialog(self):
self._SetupMenu()
self.typelb = self.GetDlgItem(self.IDC_TYPELIST)
self.memberlb = self.GetDlgItem(self.IDC_MEMBERLIST)
self.paramlb = self.GetDlgItem(self.IDC_PARAMLIST)
self.listview = self.GetDlgItem(self.IDC_LISTVIEW)
# Setup the listview columns
itemDetails = (commctrl.LVCFMT_LEFT, 100, "Item", 0)
self.listview.InsertColumn(0, itemDetails)
itemDetails = (commctrl.LVCFMT_LEFT, 1024, "Details", 0)
self.listview.InsertColumn(1, itemDetails)
if self.tlb is None:
self.OnFileOpen(None,None)
else:
self._SetupTLB()
return TypeBrowseDialog_Parent.OnInitDialog(self)
def _SetupTLB(self):
self.typelb.ResetContent()
self.memberlb.ResetContent()
self.paramlb.ResetContent()
self.typeinfo = None
self.attr = None
if self.tlb is None: return
n = self.tlb.GetTypeInfoCount()
for i in range(n):
self.typelb.AddString(self.tlb.GetDocumentation(i)[0])
def _SetListviewTextItems(self, items):
self.listview.DeleteAllItems()
index = -1
for item in items:
index = self.listview.InsertItem(index+1,item[0])
data = item[1]
if data is None: data = ""
self.listview.SetItemText(index, 1, data)
def SetupAllInfoTypes(self):
infos = self._GetMainInfoTypes() + self._GetMethodInfoTypes()
self._SetListviewTextItems(infos)
def _GetMainInfoTypes(self):
pos = self.typelb.GetCurSel()
if pos<0: return []
docinfo = self.tlb.GetDocumentation(pos)
infos = [('GUID', str(self.attr[0]))]
infos.append(('Help File', docinfo[3]))
infos.append(('Help Context', str(docinfo[2])))
try:
infos.append(('Type Kind', typekindmap[self.tlb.GetTypeInfoType(pos)]))
except:
pass
info = self.tlb.GetTypeInfo(pos)
attr = info.GetTypeAttr()
infos.append(('Attributes', str(attr)))
for j in range(attr[8]):
flags = info.GetImplTypeFlags(j)
refInfo = info.GetRefTypeInfo(info.GetRefTypeOfImplType(j))
doc = refInfo.GetDocumentation(-1)
attr = refInfo.GetTypeAttr()
typeKind = attr[5]
typeFlags = attr[11]
desc = doc[0]
desc = desc + ", Flags=0x%x, typeKind=0x%x, typeFlags=0x%x" % (flags, typeKind, typeFlags)
if flags & pythoncom.IMPLTYPEFLAG_FSOURCE:
desc = desc + "(Source)"
infos.append( ('Implements', desc))
return infos
def _GetMethodInfoTypes(self):
pos = self.memberlb.GetCurSel()
if pos<0: return []
realPos, isMethod = self._GetRealMemberPos(pos)
ret = []
if isMethod:
funcDesc = self.typeinfo.GetFuncDesc(realPos)
id = funcDesc[0]
ret.append(("Func Desc", str(funcDesc)))
else:
id = self.typeinfo.GetVarDesc(realPos)[0]
docinfo = self.typeinfo.GetDocumentation(id)
ret.append(('Help String', docinfo[1]))
ret.append(('Help Context', str(docinfo[2])))
return ret
def CmdTypeListbox(self, id, code):
if code == win32con.LBN_SELCHANGE:
pos = self.typelb.GetCurSel()
if pos >= 0:
self.memberlb.ResetContent()
self.typeinfo = self.tlb.GetTypeInfo(pos)
self.attr = self.typeinfo.GetTypeAttr()
for i in range(self.attr[7]):
id = self.typeinfo.GetVarDesc(i)[0]
self.memberlb.AddString(self.typeinfo.GetNames(id)[0])
for i in range(self.attr[6]):
id = self.typeinfo.GetFuncDesc(i)[0]
self.memberlb.AddString(self.typeinfo.GetNames(id)[0])
self.SetupAllInfoTypes()
return 1
def _GetRealMemberPos(self, pos):
pos = self.memberlb.GetCurSel()
if pos >= self.attr[7]:
return pos - self.attr[7], 1
elif pos >= 0:
return pos, 0
else:
raise error, "The position is not valid"
def CmdMemberListbox(self, id, code):
if code == win32con.LBN_SELCHANGE:
self.paramlb.ResetContent()
pos = self.memberlb.GetCurSel()
realPos, isMethod = self._GetRealMemberPos(pos)
if isMethod:
id = self.typeinfo.GetFuncDesc(realPos)[0]
names = self.typeinfo.GetNames(id)
for i in range(len(names)):
if i > 0:
self.paramlb.AddString(names[i])
self.SetupAllInfoTypes()
return 1
def GetTemplate(self):
"Return the template used to create this dialog"
w = 272 # Dialog width
h = 192 # Dialog height
style = FRAMEDLG_STD | win32con.WS_VISIBLE | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
template = [['Type Library Browser', (0, 0, w, h), style, None, (8, 'Helv')], ]
template.append([130, "&Type", -1, (10, 10, 62, 9), SS_STD | win32con.SS_LEFT])
template.append([131, None, self.IDC_TYPELIST, (10, 20, 80, 80), LBS_STD])
template.append([130, "&Members", -1, (100, 10, 62, 9), SS_STD | win32con.SS_LEFT])
template.append([131, None, self.IDC_MEMBERLIST, (100, 20, 80, 80), LBS_STD])
template.append([130, "&Parameters", -1, (190, 10, 62, 9), SS_STD | win32con.SS_LEFT])
template.appe | nd([131, None, self.IDC_PARAMLIST, (190, 20, 75, 80), LBS_STD])
lvStyle = SS_STD | commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE | commctrl.LVS_ALIGNLEFT | win32con.WS_BORDER | win32con.WS_TABSTOP
template.append(["SysListView32", "", self.IDC_LISTVIEW, (10, 110, 255, 65), lvStyle])
return template
if __name__=='__main__':
import sys
fname = None
try:
fname = sys.argv[1]
except:
pass
dlg = TypeBrowseDialog(fname)
t | ry:
win32api.GetConsoleTitle()
dlg.DoModal()
except:
dlg.CreateWindow(win32ui.GetMainFrame())
|
pkuwwt/pydec | pydec/math/tests/test_graph.py | Python | bsd-3-clause | 2,078 | 0.008662 | #from pydec.testing import *
#
#import numpy
#from scipy.sparse import coo_matrix, csr_matrix
#
#from pydec.math.graph import maximal_independent_set
#
#
#class test_maximal_independent_set(TestCase):
# def is_MIS(self,graph,mis):
# mis = set(mis)
# unmarked = set(range(graph.shape[0]))
# graph = graph.tocoo()
#
# for i,j in zip(graph.row,graph.col):
# if i == j:
# continue #ignore self loops
#
# if i in mis and j in mis:
# return False #not independent
#
# if i in mis and j in unmarked:
# unmarked.remove(j)
# if j in mis and i in unmarked:
# unmarked.remove(i)
#
# return (unmarked == mis) #check maximality
#
#
#
# def check_simple(self):
# """
# 2x2 regular mesh
# """
# A = numpy.matrix([[ 4., -1., -1., 0.],
# [-1., 4., 0., -1.],
# [-1., 0., 4., -1.],
# [ 0., -1., -1., 4.]])
#
# graph = csr_matrix(A)
#
# assert_equal(True,self.is_MIS(graph,maximal_independent_set(graph)))
#
| #
# def check_random(self):
# numpy.random.seed(0)
#
# def rand_sparse(m,n,nnz_per_row):
# """
# Return a sparse csr with a given number of random nonzero entries per row.
#
# The actual number of nonzeros may be less than expected due to overwriting.
# """
# nnz_per_row = min(n,nnz_per_row)
# |
# rows = numpy.arange(m).repeat(nnz_per_row)
# cols = numpy.random.random_integers(low=0,high=n-1,size=nnz_per_row*m)
# vals = numpy.random.random_sample(m*nnz_per_row)
# return coo_matrix((vals,(rows,cols)),(m,n)).tocsr()
#
# for n in [2,10,20,50,200]:
# G = rand_sparse(n,n,min(n/2,10))
# G = G + G.T
# assert_equal(True,self.is_MIS(G,maximal_independent_set(G)))
#
|
frappe/erpnext | erpnext/payroll/doctype/gratuity/test_gratuity.py | Python | gpl-3.0 | 7,019 | 0.02536 | # Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
import frappe
from frappe.utils import add_days, flt, get_datetime, getdate
from erpnext.hr.doctype.employee.test_employee import make_employee
from erpnext.hr.doctype.expense_claim.test_expense_claim import get_payable_account
from erpnext.payroll.doctype.gratuity.gratuity import get_last_salary_slip
from erpnext.payroll.doctype.salary_slip.test_salary_slip import (
make_deduction_salary_component,
make_earning_salary_component,
make_employee_salary_slip,
)
from erpnext.regional.united_arab_emirates.setup import create_gratuity_rule
test_dependencies = ["Salary Component", "Salary Slip", "Account"]
class TestGratuity(unittest.TestCase):
def setUp(self):
frappe.db.delete("Gratuity")
frappe.db.delete("Additional Salary", {"ref_doctype": "Gratuity"})
make_earning_salary_component(setup=True, test_tax=True, company_list=['_Test Company'])
make_deduction_salary_component(setup=True, test_tax=True, company_list=['_Test Company'])
def test_get_last_salary_slip_should_return_none_for_new_employee(self):
new_employee = make_employee("new_employee@salary.com", company='_Test Company')
salary_slip = get_last_salary_slip(new_employee)
assert salary_slip is None
def test_check_gratuity_amount_based_on_current_slab_and_additional_salary_creation(self):
employee, sal_slip = create_employee_and_get_last_salary_slip()
rule = get_gratuity_rule("Rule Under Unlimited Contract on termination (UAE)")
gratuity = create_gratuity(pay_via_salary_slip=1, employee=employee, rule=rule.name)
# work experience calculation
date_of_joining, relieving_date = frappe.db.get_value('Employee', employee, ['date_of_joining', 'relieving_date'])
employee_total_workings_days = (get_datetime(relieving_date) - get_datetime(date_of_joining)).days
experience = employee_total_workings_days/rule.total_working_days_per_year
gratuity.reload()
from math import floor
self.assertEqual(floor(experience), gratuity.current_work_experience)
#amount Calculation
component_amount = frappe.get_all("Salary Detail",
filters={
"docstatus": 1,
'parent': sal_slip,
"parentfield": "earnings",
'salary_component': "Basic Salary"
},
fields=["amount"])
''' 5 - 0 fraction is 1 '''
gratuity_amount = component_amount[0].amount * experience
gratuity.reload()
self.assertEqual(flt(gratuity_amount, 2), flt(gratuity.amount, 2))
# additional salary creation (Pay via salary slip)
self.assertTrue(frappe.db.exists("Additional Salary", {"ref_docname": gratuity.name}))
def test_check_gratuity_amount_based_on_all_previous_slabs(self):
employee, sal_slip = create_employee_and_get_last_salary_slip()
rule = get_gratuity_rule("Rule Under Limited Contract (UAE)")
set_mode_of_payment_account()
gratuity = create_gratuity(expense_account = 'Payment Account - _TC', mode_of_payment='Cash', employee=employee)
#work experience calculation
date_of_joining, relieving_date = frappe.db.get_value('Employee', employee, ['date_of_joining', 'relieving_date'])
employee_total_workings_days = (get_datetime(relieving_date) - get_datetime(date_of_joining)).days
experience = employee_total_workings_days/rule.total_working_days_per_year
gratuity.reload()
from math import floor
self.assertEqual(floor(experience), gratuity.current_work_experience)
#amount Calculation
component_amount = frappe.get_all("Salary Detail",
filters={
"docstatus": 1,
'parent': sal_slip,
"parentfield": "earnings",
'salary_component': "Basic Salary"
},
fields=["amount"])
''' range | Fraction
0-1 | 0
1-5 | 0.7
5-0 | 1
'''
gratuity_amount = ((0 * 1) + (4 * 0.7) + (1 * 1)) * component_amount[0].amount
gratuity.reload()
self.assertEqual(flt(gratuity_amount, 2), flt(gratuity.amount, 2))
self.assertEqual(gratuity.status, "Unpaid")
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
pay_entry = get_payment_entry("Gratuity", gratuity.name)
pay_entry.reference_no = "123467"
pay_entry.reference_date = getdate()
pay_entry.save()
pay_entry.submit()
gratuity.reload()
self.assertEqual(gratuity.status, "Paid")
self.assertEqual(flt(gratuity.paid_amount,2), flt(gratuity.amount, 2))
def tearDown(self):
frappe.db.rollback()
def get_gratuity_rule(name):
rule = frappe.db.exists("Gratuity Rule", name)
if not rule:
create_gratuity_rule()
rule = frappe.get_doc("Gratuity Rule", name)
rule.applicable_earnings_component = []
rule.append("applicable_earnings_component", {
"salary_component": "Basic Salary"
})
rule.save()
rule.reload()
return rule
def create_gratuity(**args):
if args:
args = frappe._dict(args)
gratuity = frappe.new_doc("Gratuity")
gratuity.employee = args.employee
gratuity.posting_date = getdate()
gratuity.gratuity_rule = args.rule or "Rule Under Limited Contract (UAE)"
gratuity.pay_via_salary_slip = args.pay_via_salary_slip or 0
if gratuity.pay_via_salary_slip:
gratuity.payroll_date = getdate()
gratuity.salary_component = "Performance Bonus"
else:
gratuity.expense_account = args.expense_account or 'Payment Account - _TC'
gratuity.payable_account = args.payable_account or get_payable_account("_Test Company")
gratuity.mode_of_payment = args.mode_of_payment or 'Cash'
gratuity.save()
gratuity.submit()
return gratuity
def set_mode_of_payment_account():
if not frappe.db.exists("Account", "Payment Account - _TC"):
mode_of_payment = create_account()
mode_of_payment = frappe.get_doc("Mode of Payment", "Cash")
mode_of_payment.accounts = []
mode_of_payment.append("accounts", {
"company": "_Test Company",
"default_account": "_Test Bank - _TC"
})
mode_of_payment.save()
def create_account():
return frappe.get_doc({
"doctype": "Account",
"company": "_Test Company",
"account_name": "Payment Account",
"root_type": "Asset",
"report_type": "Balance Sheet",
"currency": "INR",
"parent_account": "Bank Accounts - _TC",
"account_type": "Bank",
}).insert(ignore_permissions=True)
def create_employee_and_get_last_salary_slip():
employee = make_employee("test_employee@salary.com", company='_Test Company')
frappe.db.set_value("Employee", employee, "relieving_date", getdate())
frappe.db.set_value("Employee", employee, "date_of_joining", add_days(getdate(), - (6*365)))
if not frappe.db.exists("Salary Slip", {"employee":employee}):
salary_slip = make_employee_sa | lary_slip("test_employee@salary.com", "Monthly")
salary_slip.submit()
salary_slip = salary_slip.name
else:
salary_slip = get_last_salary_slip(employee)
if not frappe.db.get_value("Employee", "test_employee@salary.com", "holiday_list"):
from erpnext.payroll.doctype.salary_slip.test_salary_slip import make_holiday_list
make_holiday_list()
frappe.db.set_value("Company", '_Test Company', "default_holiday_list", "Salary Slip | Test Holiday List")
return employee, salary_slip
|
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_arm.py | Python | gpl-3.0 | 8,432 | 0.00759 | #!/usr/bin/env python
'''arm/disarm command handling'''
import time, os
from MAVProxy.modules.lib import mp_module
from pymavlink import mavutil
# note that the number of bits here is contrained by the float
# transport mechanism. 25 bits is the limit. Given the transport
# limit, if all bits are set, or all but the lowest bit it set, it is
# reasonable to set the mask to "all" for simplicity.
arming_masks = {
"all" : 1 << 0,
"baro" : 1 << 1,
"compass" : 1 << 2,
"gps" : 1 << 3,
"ins" : 1 << 4,
"params" : 1 << 5,
"rc" : 1 << 6,
"voltage" : 1 << 7,
"battery" : 1 << 8,
"airspeed": 1 << 9,
"logging" : 1 << 10,
"switch" : 1 << 11,
"gps_config": 1 << 12,
"system": 1 << 13,
"mission": 1 << 14,
"rangefinder": 1 << 15,
"unknown16": 1 << 16,
"unknown17": 1 << 17,
"unknown18": 1 << 18,
"unknown19": 1 << 19,
"unknown20": 1 << 20,
"unknown21": 1 << 21,
"unknown22": 1 << 22,
"unknown23": 1 << 23,
"unknown24": 1 << 24,
}
# on the assumption we may not always know about all arming bits, we
# use this "full" mask to transition from using 0x1 (all checks
# enabled) to having checks disabled by turning its bit off.
full_arming_mask = 0b1111111111111111111111110
class ArmModule(mp_module.MPModule):
def __init__(self, mpstate):
super(ArmModule, self).__init__(mpstate, "arm", "arm/disarm handling", public=True)
checkables = "<" + "|".join(arming_masks.keys()) + ">"
self.add_command('arm', self.cmd_arm, 'arm motors', ['check ' + self.checkables(),
'uncheck ' + self.checkables(),
'list',
'throttle',
'safetyon',
'safetystatus',
'safetyoff'])
self.add_command('disarm', self.cmd_disarm, 'disarm motors')
self.was_armed = False
def checkables(self):
return "<" + "|".join(arming_masks.keys()) + ">"
def cmd_arm(self, args):
'''arm commands'''
usage = "usage: arm <check|uncheck|list|throttle|safetyon|safetyoff|safetystatus|bits|prearms>"
if len(args) <= 0:
print(usage)
return
if args[0] == "check":
if (len(args) < 2):
print("usage: arm check " + self.checkables())
return
arming_mask = int(self.get_mav_param("ARMING_CHECK"))
name = args[1].lower()
if name == 'all':
arming_mask = 1
elif name in arming_masks:
arming_mask |= arming_masks[name]
else:
print("unrecognized arm check:", name)
return
if (arming_mask & ~0x1) == full_arming_mask:
arming_mask = 0x1
self.param_set("ARMING_CHECK", arming_mask)
return
if args[0] == "uncheck":
if (len(args) < 2):
print("usage: arm uncheck " + self.checkables())
return
arming_mask = int(self.get_mav_param("ARMING_CHECK"))
name = args[1].lower()
if name == 'all':
arming_mask = 0
elif name in arming_masks:
if arming_mask == arming_masks["all"]:
arming_mask = full_arming_mask
arming_mask &= ~arming_masks[name]
else:
print("unrecognized arm check:", args[1])
return
self.param_set("ARMING_CHECK", arming_mask)
return
if args[0] == "list":
arming_mask = int(self.get_mav_param("ARMING_CHECK"))
if arming_mask == 0:
print("NONE")
for name in sorted(arming_masks, key=lambda x : arming_masks[x]):
if arming_masks[name] & arming_mask:
print(name)
return
if args[0] == "bits":
for mask in sorted(arming_masks, key=lambda x : arming_masks[x]):
print("%s" % mask)
return
if args[0] == "prearms":
self.master.mav.command_long_send(
self.target_system, # target_system
self.target_component,
mavutil.mavlink.MAV_CMD_RUN_PREARM_CHECKS, # command
0, # confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
return
if args[0] == "throttle":
p2 = 0
if len(args) == 2 and args[1] == 'force':
p2 = 2989
self.master.mav.command_long_send(
self.target_system, # target_system
self.target_component,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM, # command
0, # confirmation
1, # param1 (1 to indicate arm)
p2, # param2 (all other params meaningless)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
return
if args[0] == "safetyon":
self.master.mav.set_mode_send(self.target_system,
mavutil.mavlink.MAV_MODE_FLAG_DECODE_POSITION_SAFETY,
1)
return
if args[0] == "safetystatus":
try:
sys_status = self.master.messages['SYS_STATUS']
except KeyError:
print("Unknown; no SYS_STATUS")
return
if sys_status.onboard_control_sensors_enabled & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_MOTOR_OUTPUTS:
print("Safety is OFF (vehicle is dangerous)")
else:
print("Safety is ON (vehicl | e allegedly safe)")
return
if args[0] == "safetyoff":
self.master.mav.set_mode_send(self.target_system,
| mavutil.mavlink.MAV_MODE_FLAG_DECODE_POSITION_SAFETY,
0)
return
print(usage)
def cmd_disarm(self, args):
'''disarm motors'''
p2 = 0
if len(args) == 1 and args[0] == 'force':
p2 = 21196
self.master.mav.command_long_send(
self.target_system, # target_system
0,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM, # command
0, # confirmation
0, # param1 (0 to indicate disarm)
p2, # param2 (all other params meaningless)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def all_checks_enabled(self):
''' returns true if the UAV is skipping any arming checks'''
arming_check = self.get_mav_param("ARMING_CHECK")
if arming_check is None:
# AntennaTracker doesn't have arming checks
return False
arming_mask = int(arming_check)
if arming_mask == 1:
return True
for bit in arming_masks.values():
if not arming_mask & bit and bit != 1:
return False
return True
def mavlink_packet(self, m):
mtype = m.get_type()
if mtype == 'HEARTBEAT' and m.type != mavutil.mavlink.MAV_TYPE_GCS:
armed = self.master.motors_armed()
if armed != self.was_armed:
self.was_armed = armed
if armed and not self.all_checks_enabled():
self.say("Arming checks disabled")
ice_enable = self.get_mav_param('ICE_ENABLE', 0)
if ice_enable == 1:
rc = self.master.messages["RC_CHANNELS"]
v = self.mav_param.get('ICE_START_CHAN', None)
if v is N |
rcgee/oq-hazardlib | openquake/hazardlib/source/area.py | Python | agpl-3.0 | 7,581 | 0.000132 | # The Hazard Library
# Copyright (C) 2012-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.source.area` defines :class:`AreaSource`.
"""
from copy import deepcopy
from openquake.hazardlib.geo import Point
from openquake.hazardlib.source.point import PointSource
from openquake.hazardlib.source.rupture import ParametricProbabilisticRupture
from openquake.baselib.slots import with_slots
@with_slots
class AreaSource(PointSource):
"""
Area source represents uniform seismicity occurring over a geographical
region.
:param polygon:
An instance of :class:`openquake.hazardlib.geo.polygon.Polygon`
that defines source's area.
:param area_discretization:
Float number, polygon area discretization spacing in kilometers.
See :meth:`openquake.hazardlib.source.area.AreaSource.iter_ruptures`.
Other parameters (except ``location``) are the same as for
:class:`~openquake.hazardlib.source.point.PointSource`.
"""
_slots_ = PointSource._slots_ + 'polygon area_discretization'.split()
MODIFICATIONS = set(())
RUPTURE_WEIGHT = 1 / 10.
def __init__(self, source_id, name, tectonic_region_type,
mfd, rupture_mesh_spacing,
magnitude_scaling_relationship, rupture_aspect_ratio,
temporal_occurrence_model,
# point-specific parameters (excluding location)
upper_seismogenic_depth, lower_seismogenic_depth,
nodal_plane_distribution, hypocenter_distribution,
# area-specific parameters
polygon, area_discretization):
super(AreaSource, self).__init__(
source_id, name, tectonic_region_type, mfd, rupture_mesh_spacing,
magnitude_scaling_relationship, rupture_aspect_ratio,
temporal_occurrence_model, upper_seismogenic_depth,
lower_seismogenic_depth, location=None,
nodal_plane_distribution=nodal_plane_distribution,
hypocenter_distribution=hypocenter_distribution,
)
self.polygon = polygon
self.area_discretization = area_discretization
def get_rupture_enclosing_polygon(self, dilation=0):
"""
Extends the area source polygon by ``dilation`` plus
:meth:`~openquake.hazardlib.source.point.PointSource._get_max_rupture_projection_radius`.
See :meth:`superclass method
<openquake.hazardlib.source.base.BaseSeismicSource.get_rupture_enclosing_polygon>`
for parameter and return value definition.
"""
max_rup_radius = self._get_max_rupture_projection_radius()
return self.polygon.dilate(max_rup_radius + dilation)
def iter_ruptures(self):
"""
See :meth:`openquake.hazardlib.source.base.BaseSeismicSource.iter_ruptures`
for description of parameters and return value.
Area sources are treated as a collection of point sources
(see :mod:`openquake.hazardlib.source.point`) with uniform parameters.
Ruptures of area source are just a union of ruptures
of those point sources. The actual positions of the implied
point sources form a uniformly spaced mesh on the polygon.
Polygon's method :meth:
`~openquake.hazardlib.geo.polygon.Polygon.discretize`
is used for creating a mesh of points on the source's area.
Constructor's parameter ``area_discretization`` is used as
polygon's discretization spacing (not to be confused with
rupture surface's mesh spacing which is as well provided
to the constructor).
The ruptures' occurrence rates are rescaled with respect to number
of points the polygon discretizes to.
"""
polygon_mesh = self.polygon.discretize(self.area_discretization)
rate_scaling_factor = 1.0 / len(polygon_mesh)
# take the very first point of the polygon mesh
[epicenter0] = polygon_mesh[0:1]
# generate "reference ruptures" -- all the ruptures that have the same
# epicenter l | ocation (first point of the polygon's mesh) but different
# magnitudes, nodal planes, hypocenters' depths a | nd occurrence rates
ref_ruptures = []
for (mag, mag_occ_rate) in self.get_annual_occurrence_rates():
for (np_prob, np) in self.nodal_plane_distribution.data:
for (hc_prob, hc_depth) in self.hypocenter_distribution.data:
hypocenter = Point(latitude=epicenter0.latitude,
longitude=epicenter0.longitude,
depth=hc_depth)
occurrence_rate = (mag_occ_rate
* float(np_prob) * float(hc_prob))
occurrence_rate *= rate_scaling_factor
surface = self._get_rupture_surface(mag, np, hypocenter)
ref_ruptures.append((mag, np.rake, hc_depth,
surface, occurrence_rate))
# for each of the epicenter positions generate as many ruptures
# as we generated "reference" ones: new ruptures differ only
# in hypocenter and surface location
for epicenter in polygon_mesh:
for mag, rake, hc_depth, surface, occ_rate in ref_ruptures:
# translate the surface from first epicenter position
# to the target one preserving it's geometry
surface = surface.translate(epicenter0, epicenter)
hypocenter = deepcopy(epicenter)
hypocenter.depth = hc_depth
rupture = ParametricProbabilisticRupture(
mag, rake, self.tectonic_region_type, hypocenter,
surface, type(self), occ_rate,
self.temporal_occurrence_model
)
yield rupture
def count_ruptures(self):
"""
See
:meth:`openquake.hazardlib.source.base.BaseSeismicSource.count_ruptures`
for description of parameters and return value.
"""
polygon_mesh = self.polygon.discretize(self.area_discretization)
return (len(polygon_mesh) *
len(self.get_annual_occurrence_rates()) *
len(self.nodal_plane_distribution.data) *
len(self.hypocenter_distribution.data))
def filter_sites_by_distance_to_source(self, integration_distance, sites):
"""
Overrides :meth:`implementation
<openquake.hazardlib.source.point.PointSource.filter_sites_by_distance_to_source>`
of the point source class just to call the :meth:`base class one
<openquake.hazardlib.source.base.BaseSeismicSource.filter_sites_by_distance_to_source>`.
"""
return super(PointSource, self).filter_sites_by_distance_to_source(
integration_distance, sites
)
|
kaapstorm/trc_me | src/trc_me/api/forms.py | Python | agpl-3.0 | 937 | 0.003202 | # This file is part of trc.me.
#
# trc.me is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# trc.me is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the i | mplied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# trc.me. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from trc_me.core.models import Tag
class TagForm(forms.ModelForm):
"""Used when creating tags.
tag.user is set to authenticated user.
"""
class Meta:
model = Tag
exclude | = ('user', 'img_width', 'img_height', 'created_at')
|
kunxi/docxgen | tests/__init__.py | Python | mit | 302 | 0.006623 | import re
from six.moves import zip
def check_tag(root, expected):
pattern = re.compile(r"{.*}([a-zA-Z]+)")
for tag, el in zip(expected, | root.iter()):
| m = pattern.match(el.tag)
assert m is not None
assert m.group(1) == tag, "Expect tag=%s, get %s" % (tag, m.group(1))
|
Hehwang/Leetcode-Python | code/524 Longest Word in Dictionary through Deleting.py | Python | mit | 654 | 0.035168 | class Solution:
def helper(self,target,string):
i,j=0,0
while i<len(target) and j<len(string):
if target[i]==s | tring[j]:
i+=1
j+=1
else:
i+=1
return j==len(string)
def findLongestWord(self, s, d):
"""
:type s: str
| :type d: List[str]
:rtype: str
"""
res=''
for string in d:
if self.helper(s,string):
if len(string)>len(res):
res=string
elif len(string)==len(res) and string<res:
res=string
return res |
imageworks/OpenShadingLanguage | testsuite/example-deformer/run.py | Python | bsd-3-clause | 387 | 0.007752 | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
command += run_app ("cmake --config Release data >> build.txt", | silent=True)
command += run_app ("cmake --build . >> build.txt", silen | t=True)
command += run_app ("bin/osldeformer >> out.txt")
|
ncliam/serverpos | openerp/addons/mrp/report/price.py | Python | agpl-3.0 | 11,687 | 0.008129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import openerp
from openerp.report.interface import report_rml
from openerp.tools import to_xml
from openerp.report import report_sxw
from datetime import datetime
from openerp.tools.translate import _
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, datas, context=None):
number = (datas.get('form', False) and datas['form']['number']) or 1
registry = openerp.registry(cr.dbname)
product_pool = registry.get('product.product')
product_uom_pool = registry.get('product.uom')
workcenter_pool = registry.get('mrp.workcenter')
user_pool = registry.get('res.users')
bom_pool = registry.get('mrp.bom')
pricelist_pool = registry.get('product.pricelist')
rml_obj=report_sxw.rml_parse(cr, uid, product_pool._name,context)
rml_obj.localcontext.update({'lang':context.get('lang',False)})
company_currency = user_pool.browse(cr, uid, uid).company_id.currency_id
company_currency_symbol = company_currency.symbol or company_currency.name
product_uom_digits = rml_obj.get_digits(dp='Product Unit of Measure')
purchase_price_digits = rml_obj.get_digits(dp='Product Price')
def process_bom( | bom, currency_id, factor=1):
xml = '<row>'
sum = 0
sum_strd = 0
prod = product_pool.browse(cr, uid, bom['product_id'])
prod_name = to_xml(bom['name'])
prod_qtty = factor * bom['product_qty']
product_uom = product_uom_pool.browse(cr, uid, bom['product_uom'], context=context)
product_uom_name = to_xml(product_uom.name)
main_sp_price, main_sp_name , main_str | d_price = '','',''
sellers, sellers_price = '',''
if prod.seller_id:
main_sp_name = '- <b>'+ to_xml(prod.seller_id.name) +'</b>\r\n'
pricelist = prod.seller_id.property_product_pricelist_purchase
price = pricelist_pool.price_get(cr,uid,[pricelist.id],
prod.id, number*prod_qtty or 1.0, prod.seller_id.id, {
'uom': prod.uom_po_id.id,
'date': time.strftime('%Y-%m-%d'),
})[pricelist.id]
main_sp_price = """<b>"""+rml_obj.formatLang(price, digits=purchase_price_digits)+' '+ (company_currency_symbol)+"""</b>\r\n"""
sum += prod_qtty*price
std_price = product_uom_pool._compute_price(cr, uid, prod.uom_id.id, prod.standard_price, to_uom_id=product_uom.id)
main_strd_price = str(std_price) + '\r\n'
sum_strd = prod_qtty*std_price
for seller_id in prod.seller_ids:
if seller_id.name.id == prod.seller_id.id:
continue
sellers += '- <i>'+ to_xml(seller_id.name.name) +'</i>\r\n'
pricelist = seller_id.name.property_product_pricelist_purchase
price = pricelist_pool.price_get(cr,uid,[pricelist.id],
prod.id, number*prod_qtty or 1.0, seller_id.name.id, {
'uom': prod.uom_po_id.id,
'date': time.strftime('%Y-%m-%d'),
})[pricelist.id]
sellers_price += """<i>"""+rml_obj.formatLang(price, digits=purchase_price_digits) +' '+ (company_currency_symbol) +"""</i>\r\n"""
xml += """<col para='yes'> """+ prod_name +""" </col>
<col para='yes'> """+ main_sp_name + sellers + """ </col>
<col f='yes'>"""+ rml_obj.formatLang(prod_qtty, digits=product_uom_digits) +' '+ product_uom_name +"""</col>
<col f='yes'>"""+ rml_obj.formatLang(float(main_strd_price), digits=purchase_price_digits) +' '+ (company_currency_symbol) +"""</col>
<col f='yes'>""" + main_sp_price + sellers_price + """</col>'"""
xml += '</row>'
return xml, sum, sum_strd
def process_workcenter(wrk):
workcenter = workcenter_pool.browse(cr, uid, wrk['workcenter_id'])
cost_cycle = wrk['cycle']*workcenter.costs_cycle
cost_hour = wrk['hour']*workcenter.costs_hour
total = cost_cycle + cost_hour
xml = '<row>'
xml += "<col para='yes'>" + to_xml(workcenter.name) + '</col>'
xml += "<col/>"
xml += """<col f='yes'>"""+rml_obj.formatLang(cost_cycle)+' '+ (company_currency_symbol) + """</col>"""
xml += """<col f='yes'>"""+rml_obj.formatLang(cost_hour)+' '+ (company_currency_symbol) + """</col>"""
xml += """<col f='yes'>"""+rml_obj.formatLang(cost_hour + cost_cycle)+' '+ (company_currency_symbol) + """</col>"""
xml += '</row>'
return xml, total
xml = ''
config_start = """
<config>
<date>""" + to_xml(rml_obj.formatLang(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),date_time=True)) + """</date>
<company>%s</company>
<PageSize>210.00mm,297.00mm</PageSize>
<PageWidth>595.27</PageWidth>
<PageHeight>841.88</PageHeight>
<tableSize>55.00mm,58.00mm,29.00mm,29.00mm,29.00mm</tableSize>
""" % to_xml(user_pool.browse(cr, uid, uid).company_id.name)
config_stop = """
<report-footer>Generated by Odoo</report-footer>
</config>
"""
workcenter_header = """
<lines style='header'>
<row>
<col>%s</col>
<col t='yes'/>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
</row>
</lines>
""" % (_('Work Center name'), _('Cycles Cost'), _('Hourly Cost'),_('Work Cost'))
prod_header = """
<row>
<col>%s</col>
<col>%s</col>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
</row>
""" % (_('Components'), _('Components suppliers'), _('Quantity'),_('Cost Price per Unit of Measure'), _('Supplier Price per Unit of Measure'))
for product in product_pool.browse(cr, uid, ids, context=context):
product_uom_name = to_xml(product.uom_id.name)
bom_id = bom_pool._bom_find(cr, uid, product_id=product.id, context=context)
title = "<title>%s</title>" %(_("Cost Structure"))
title += "<title>%s</title>" % (to_xml(product.name))
xml += "<lines style='header'>" + title + prod_header + "</lines>"
if not bom_id:
total_strd = number * product.standard_price
total = number * product_pool.price_get(cr, uid, [product.id], 'standard_price')[product.id]
xml += """<lines style='lines'><row>
<col para='yes'>-</col>
<col para='yes'>-</col>
<col para='yes'>-</col>
<col para='yes'>-</col>
<col para='yes'>-</col>
</row></lines>"""
|
Bugheist/website | website/migrations/0024_userprofile.py | Python | agpl-3.0 | 1,273 | 0.003142 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-03 14:44
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
def add_profiles(apps, schema_editor):
UserProfile = apps.get_model('website', 'UserProfile')
user_app, user_model = settings.AUTH_USER_MODEL.split('.')
User = apps.get_model(user_app, user_model)
for user in User.objects.all():
UserProfile(user=user).save()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('website', '0023_invitefriend'),
]
operatio | ns = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID') | ),
('user_avatar', models.ImageField(blank=True, null=True, upload_to=b'avatars/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='userprofile',
to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(add_profiles, reverse_code=migrations.RunPython.noop),
]
|
izquierdo/django-taggit | setup.py | Python | bsd-3-clause | 1,278 | 0.002347 | from setuptools import find_packages, setup
import taggit
with open('README.rst') as f:
readme = f.read()
setup(
name='django-taggit',
version='.'.join(str(i) for i in taggit.VERSION),
description='django-taggit is a reusable Django application for simple tagging.',
long_description=readme,
author='Alex Gaynor',
author_email='alex.gaynor@gmail.com',
url='http://github.com/alex/django-taggit/tree/master',
packages=find_packages(exclude=('tests*',)),
package_data = {
'taggit': [
| 'locale/*/LC_MESSAGES/*',
],
},
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environm | ent :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
include_package_data=True,
zip_safe=False,
setup_requires=[
'isort'
],
)
|
zhihaoSong/spring-practices | demo/src/main/java/demo/docker/entry.py | Python | apache-2.0 | 91 | 0.010989 | __author__ | = 'wentian'
test = "123"
print(test)
input_A = input("I | nput: ")
print(input_A) |
anryko/ansible | lib/ansible/modules/network/f5/bigip_device_trust.py | Python | gpl-3.0 | 12,094 | 0.000827 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_trust
short_description: Manage the trust relationships between BIG-IPs
description:
- Manage the trust relationships between BIG-IPs. Devices, once peered, cannot
be updated. If updating is needed, the peer must first be removed before it
can be re-added to the trust.
version_added: 2.5
options:
peer_server:
description:
- The peer address to connect to and trust for synchronizing configuration.
This is typically the management address of the remote device, but may
also be a Self IP.
type: str
required: True
peer_hostname:
description:
- The hostname that you want to associate with the device. This value will
be used to easily distinguish this device in BIG-IP configuration.
- When trusting a new device, if this parameter is not specified, the value
of C(peer_server) will be used as a default.
type: str
peer_user:
description:
- The API username of the remote peer device that you are trusting. Note
that the CLI user cannot be used unless it too has an API account. If this
value is not specified, then the value of C(user), or the environment
variable C(F5_USER) will be used.
type: str
peer_password:
description:
- The password of the API username of the remote peer device that you are
trusting. If this value is not specified, then the value of C(password),
or the environment variable C(F5_PASSWORD) will be used.
type: str
type:
description:
- Specifies whether the device you are adding is a Peer or a Subordinate.
The default is C(peer).
- The difference between the two is a matter of mitigating risk of
compromise.
- A subordinate device cannot sign a certificate for another device.
- In the case where the security of an authority device in a trust domain
is compromised, the risk of compromise is minimized for any subordinate
device.
- Designating devices as subordinate devices is recommended for device
groups with a large number of member devices, where the risk of compromise
is high.
type: str
choices:
- peer
- subordinate
default: peer
state:
description:
- When C(present), ensures the specified devices are trusted.
- When C(absent), removes the device trusts.
type: str
choices:
- absent
- present
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add trusts for all peer devices to Active device
bigip_device_trust:
peer_server: "{{ item.ansible_host }}"
peer_hostname: "{{ item.inventory_hostname }}"
peer_user: "{{ item.bigip_username }}"
peer_password: "{{ item.bigip_password }}"
provider:
server: lb.mydomain.com
user: admin
password: secret
loop: hostvars
when: inventory_hostname in groups['master']
delegate_to: localhost
'''
RETURN = r'''
peer_server:
description: The remote IP ad | dress of the t | rusted peer.
returned: changed
type: str
sample: 10.0.2.15
peer_hostname:
description: The remote hostname used to identify the trusted peer.
returned: changed
type: str
sample: test-bigip-02.localhost.localdomain
'''
import re
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'deviceName': 'peer_hostname',
'caDevice': 'type',
'device': 'peer_server',
'username': 'peer_user',
'password': 'peer_password'
}
api_attributes = [
'name',
'caDevice',
'device',
'deviceName',
'username',
'password'
]
returnables = [
'peer_server', 'peer_hostname'
]
updatables = []
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
@property
def peer_server(self):
if self._values['peer_server'] is None:
return None
if is_valid_ip(self._values['peer_server']):
return self._values['peer_server']
raise F5ModuleError(
"The provided 'peer_server' parameter is not an IP address."
)
@property
def peer_hostname(self):
if self._values['peer_hostname'] is None:
return self.peer_server
regex = re.compile(r'[^a-zA-Z0-9.\-_]')
result = regex.sub('_', self._values['peer_hostname'])
return result
@property
def type(self):
if self._values['type'] == 'peer':
return True
return False
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(params=changed)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def provided_password(self):
if self.want.password:
return self.password
if self.want.provider.get('password', None):
return self.want.provider.get('password')
if self.module.params.get('password', None):
return self.module.params.get('password')
def provided_username(self):
if self.want.username:
return self.username
if self.want.provider.get('user', None):
return self.provider.get('user')
if self.module.params.get('user', None):
return self.module.params.get('user')
def present(self):
if self.exists():
return False
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.peer_user is None:
self.want.update({'peer_user': self.provided_username()})
if self.want.peer_password is None:
self.want.update({'peer_password': self.provided_password()})
if self.want.peer_hostname is None:
self.want.update({'peer_hostname': self.want.peer_server})
if self.module.check_mode:
return True
|
zas/picard | picard/const/appdirs.py | Python | gpl-2.0 | 1,741 | 0.002298 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2021 Philipp Wolfer
#
# This program is free sof | tware; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is d | istributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import os.path
from PyQt5.QtCore import (
QCoreApplication,
QStandardPaths,
)
from picard import (
PICARD_APP_NAME,
PICARD_ORG_NAME,
)
# Ensure the application is properly configured for the paths to work
QCoreApplication.setApplicationName(PICARD_APP_NAME)
QCoreApplication.setOrganizationName(PICARD_ORG_NAME)
def config_folder():
return os.path.normpath(os.environ.get('PICARD_CONFIG_DIR', QStandardPaths.writableLocation(QStandardPaths.AppConfigLocation)))
def cache_folder():
return os.path.normpath(os.environ.get('PICARD_CACHE_DIR', QStandardPaths.writableLocation(QStandardPaths.CacheLocation)))
def plugin_folder():
# FIXME: This really should be in QStandardPaths.AppDataLocation instead,
# but this is a breaking change that requires data migration
return os.path.normpath(os.environ.get('PICARD_PLUGIN_DIR', os.path.join(config_folder(), 'plugins')))
|
rabernat/xgcm | xgcm/test/datasets.py | Python | mit | 13,360 | 0.001647 | from __future__ import print_function
from future.utils import iteritems
import pytest
import xarray as xr
import numpy as np
# example from comodo website
# https://web.archive.org/web/20160417032300/http://pycomodo.forge.imag.fr/norm.html
# netcdf example {
# dimensions:
# ni = 9 ;
# ni_u = 10 ;
# variables:
# float ni(ni) ;
# ni:axis = "X" ;
# ni:standard_name = "x_grid_index" ;
# ni:long_name = "x-dimension of the grid" ;
# ni:c_grid_dynamic_range = "2:8" ;
# float ni_u(ni_u) ;
# ni_u:axis = "X" ;
# ni_u:standard_name = "x_grid_index_at_u_location" ;
# ni_u:long_name = "x-dimension of the grid" ;
# ni_u:c_grid_dynamic_range = "3:8" ;
# ni_u:c_grid_axis_shift = -0.5 ;
# data:
# ni = 1, 2, 3, 4, 5, 6, 7, 8, 9 ;
# ni_u = 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5 ;
# }
N = 100
datasets = {
# the comodo example, with renamed dimensions
"1d_outer": xr.Dataset(
{"data_c": (["XC"], np.random.rand(9)), "data_g": (["XG"], np.random.rand(10))},
coords={
"XC": (
["XC"],
np.arange(1, 10),
{
"axis": "X",
"standard_name": "x_grid_index",
"long_name": "x-dimension of the grid",
"c_grid_dynamic_range": "2:8",
},
),
"XG": (
["XG"],
np.arange(0.5, 10),
{
"axis": "X",
"standard_name": "x_grid_index_at_u_location",
"long_name": "x-dimension of the grid",
"c_grid_dynamic_range": "3:8",
"c_grid_axis_shift": -0.5,
},
),
},
),
"1d_inner": xr.Dataset(
{"data_c": (["XC"], np.random.rand(9)), "data_g": (["XG"], np.random.rand(8))},
coords={
"XC": (
["XC"],
np.arange(1, 10),
{
"axis": "X",
"standard_name": "x_grid_index",
"long_name": "x-dimension of the grid",
"c_grid_dynamic_range": "2:8",
},
),
"XG": (
["XG"],
np.arange(1.5, 9),
{
"axis": "X",
"standard_name": "x_grid_index_at_u_location",
"long_name": "x-dimension of the grid",
"c_grid_dynamic_range": "3:8",
"c_grid_axis_shift": -0.5,
},
),
},
),
# my own invention
"1d_left": xr.Dataset(
{"data_g": (["XG"], np.random.rand(N)), "data_c": (["XC"], np.random.rand(N))},
coords={
"XG": (
["XG"],
2 * np.pi / N * np.arange(0, N),
{"axis": "X", "c_grid_axis_shift": -0.5},
),
"XC": (["XC"], 2 * np.pi / N * (np.arange(0, N) + 0.5), {"axis": "X"}),
},
),
"1d_right": xr.Dataset(
{"data_g": (["XG"], np.random.rand(N)), "data_c": (["XC"], np.random.rand(N))},
coords={
"XG": (
["XG"],
2 * np.pi / N * np.arange(1, N + 1),
{"axis": "X", "c_grid_axis_shift": 0.5},
),
"XC": (["XC"], 2 * np.pi / N * (np.arange(0, N) - 0.5), {"axis": "X"}),
},
),
"2d_left": xr.Dataset(
{
"data_g": (["YG", "XG"], np.random.rand(2 * N, N)),
"data_c": (["YC", "XC"], np.random.rand(2 * N, N)),
},
coords={
"XG": (
["XG"],
2 * np.pi / N * np.arange(0, N),
{"axis": "X", "c_grid_axis_shift": -0.5},
),
"XC": (["XC"], 2 * np.pi / N * (np.arange(0, N) + 0.5), {"axis": "X"}),
"YG": (
["YG"],
2 * np.pi / (2 * N) * np.arange(0, 2 * N),
{"axis": "Y", "c_grid_axis_shift": -0.5},
),
"YC": (
["YC"],
2 * np.pi / (2 * N) * (np.arange(0, 2 * N) + 0.5),
{"axis": "Y"},
),
},
),
}
# include periodicity
datasets_with_periodicity = {
"nonperiodic_1d_outer": (datasets["1d_outer"], False),
"nonperiodic_1d_inner": (datasets["1d_inner"], False),
"periodic_1d_left": (datasets["1d_left"], True),
"nonperiodic_1d_left": (datasets["1d_left"], False),
"periodic_1d_right": (datasets["1d_right"], True),
"nonperiodic_1d_right": (datasets["1d_right"], False),
"periodic_2d_left": (datasets["2d_left"], True),
"nonperiodic_2d_left": (datasets["2d_left"], False),
"xperiodic_2d_left": (datasets["2d_left"], ["X"]),
"yperiodic_2d_left": (datasets["2d_left"], ["Y"]),
}
expected_values = {
"nonperiodic_1d_outer": {"axes": {"X": {"center": "XC", "outer": "XG"}}},
"nonperiodic_1d_inner": {"axes": {"X": {"center": "XC", "inner": "XG"}}},
"periodic_1d_left": {"axes": {"X": {"center": "XC", "left": "XG"}}},
"nonperiodic_1d_left": {"axes": {"X": {"center": "XC", "left": "XG"}}},
"periodic_1d_right": {
"axes": {"X": {"center": "XC", "right": "XG"}},
"shift": True,
},
"nonperiodic_1d_right": {
"axes": {"X": {"center": "XC", "right": "XG"}},
"shift": True,
},
"periodic_2d_left": {
"axes": {
"X": {"center": "XC", "left": "XG"},
"Y": {"center": "YC", "left": "YG"},
}
},
"nonperiodic_2d_left": {
"axes": {
"X": {"center": "XC", "left": "XG"},
"Y": {"center": "YC", "left": "YG"},
}
},
"xperiodic_2d_left": {
"axes": {
"X": {"center": "XC", "left": "XG"},
"Y": {"center": "YC", "left": "YG"},
}
},
"yperiodic_2d_left": {
"axes": {
"X": {"center": "XC", "left": "XG"},
"Y": {"center": "YC", "left": "YG"},
}
},
}
@pytest.fixture(scope="module", params=datasets_with_periodicity.keys())
def all_datasets(request):
ds, periodic = datasets_with_periodicity[request.param]
return ds, periodic, expected_values[request.param]
@pytest.fixture(
scope="module",
params=[
"nonperiodic_1d_outer",
"nonperiodic_1d_inner",
"nonperiodic_1d_left",
"nonperiodic_1d_right",
],
)
def nonperiodic_1d(request):
ds, periodic = datasets_with_periodicity[request.param]
return ds, periodic, expected_values[request.param]
@pytest.fixture(scope="module", params=["periodic_1d_left", "pe | riodic_1d_right"])
def periodic_1d(request):
ds, periodic = datasets_with_periodicity[request.param]
return ds, periodic, expected_values[request.param]
@pyt | est.fixture(
scope="module",
params=[
"periodic_2d_left",
"nonperiodic_2d_left",
"xperiodic_2d_left",
"yperiodic_2d_left",
],
)
def all_2d(request):
ds, periodic = datasets_with_periodicity[request.param]
return ds, periodic, expected_values[request.param]
@pytest.fixture(scope="module", params=["periodic_2d_left"])
def periodic_2d(request):
ds, periodic = datasets_with_periodicity[request.param]
return ds, periodic, expected_values[request.param]
@pytest.fixture(
scope="module",
params=["nonperiodic_2d_left", "xperiodic_2d_left", "yperiodic_2d_left"],
)
def nonperiodic_2d(request):
ds, periodic = datasets_with_periodicity[request.param]
return ds, periodic, expected_values[request.param]
def datasets_grid_metric(grid_type):
"""Uniform grid test dataset.
Should eventually be extended to nonuniform grid"""
xt = np.arange(4)
xu = xt + 0.5
yt = np.arange(5)
yu = yt + 0.5
zt = np.arange(6)
zw = zt + 0.5
|
lyft/huxley | huxley/steps.py | Python | apache-2.0 | 6,736 | 0.002227 | # Copyright (c) 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
from huxley.consts import TestRunModes
from huxley.errors import TestError
from huxley.images import images_identical, image_diff
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
# Since we want consistent focus screenshots we steal focus
# when taking screenshots. To avoid races we lock during this
# process.
SCREENSHOT_LOCK = threading.RLock()
class TestStep(object):
def __init__(self, offset_time):
self.offset_time = offset_time
def execute(self, run):
raise NotImplementedError
class ClickTestStep(TestStep):
CLICK_ID = '_huxleyClick'
def __init__(self, offset_time, pos):
super(ClickTestStep, self).__init__(offset_time)
self.pos = pos
def execute(self, run):
print ' Clicking', self.pos
if run.d.name == 'phantomjs':
# PhantomJS 1.x does not support 'click()' so use Selenium
body = run.d.find_element_by_tag_name('body')
ActionChains(run.d).move_to_element_with_offset(body, self.pos[0], self.pos[1]).click().perform()
elif run.d.name == 'Safari':
el = run.d.execute_script('return document.elementFromPoint(%d, %d);' % (self.pos[0], self.pos[1]))
if el:
el.click()
else:
print ' warning, no element found at (%d, %d);' % (self.pos[0], self.pos[1])
else:
# Work around multiple bugs in WebDriver's implementation of click()
run.d.execute_script(
'document.elementFromPoint(%d, %d).click();' % (self.pos[0], self.pos[1])
)
run.d.execute_script(
'document.elementFromPoint(%d, %d).focus();' % (self.pos[0], self.pos[1])
)
class ScrollTestStep(TestStep):
SCROLL_OFFSET_ID = '_huxleyScroll'
def __init__(self, offset_time, pos):
super(ScrollTestStep, self).__init__(offset_time)
self.pos = pos
def execute(self, run):
print ' Scrolling', self.pos
run.d.execute_script(
'window.scrollTo(%d, %d);' % (self.pos[0], self.pos[1])
)
class KeyTestStep(TestStep):
KEYS_BY_JS_KEYCODE = {
33: Keys.PAGE_UP,
34: Keys.PAGE_DOWN,
35: Keys.END,
36: Keys.HOME,
37: Keys.LEFT,
38: Keys.UP,
39: Keys.RIGHT,
40: Keys.DOWN,
46: Keys.DELETE,
186: ";",
187: "=",
188: ",",
190: ".",
191: "/",
192: "`",
219: "[",
220: "\\",
221: "]",
222: "'",
}
KEYS_BY_JS_KEYCODE_SHIFT = dict(KEYS_BY_JS_KEYCODE.items() + {
48: ")",
49: "!",
50: "@",
51: "#",
52: "$",
53: "%",
54: "^",
55: "&",
56: "*",
57: "(",
186: ":",
187: "+",
188: "<",
190: ">",
191: "?",
192: "~",
219: "{",
220: "|",
221: "}",
222: "\"",
}.items())
KEY_ID = '_huxleyKey'
# param is [keyCode, shiftKey]
def __init__(self, offset_time, param):
super(KeyTestStep, self).__init__(offset_time)
# backwards compat. for old records where a string was saved
if isinstance(param, basestring):
self.key = param
else:
codes = self.KEYS_BY_JS_KEYCODE_SHIFT if param[1] else self.KEYS_BY_JS_KEYCODE
char = chr(param[0])
if not param[1]:
char = char.lower()
self.key = codes.get(param[0], char)
def execute(self, run):
if self.key == Keys.HOME:
print ' Scrolling to top'
run.d.execute_script('window.scrollTo(0, 0)')
elif self.key == Keys.END:
print ' Scrolling to bottom'
run.d.execute_script('window.scrollTo(0, document.body.clientHeight)')
else:
print ' Typing', self.key
id = run.d.execute_scri | pt('return document.ac | tiveElement.id;')
if id is None or id == '':
run.d.execute_script(
'document.activeElement.id = %r;' % self.KEY_ID
)
id = self.KEY_ID
run.d.find_element_by_id(id).send_keys(self.key)
class ScreenshotTestStep(TestStep):
def __init__(self, offset_time, run, index):
super(ScreenshotTestStep, self).__init__(offset_time)
self.index = index
def get_path(self, run):
return os.path.join(run.path, 'screenshot' + str(self.index) + '.png')
def execute(self, run):
print ' Taking screenshot', self.index
original = self.get_path(run)
new = os.path.join(run.path, 'last.png')
with SCREENSHOT_LOCK:
# Steal focus for a consistent screenshot
run.d.switch_to_window(run.d.window_handles[0])
# iOS insertion points are visible in screenshots
if run.d.name == 'Safari':
active = run.d.execute_script('a = document.activeElement; a.blur(); return a;')
if run.mode == TestRunModes.RERECORD:
run.d.save_screenshot(original)
else:
run.d.save_screenshot(new)
try:
if not images_identical(original, new, run.test.mask):
if run.save_diff:
diffpath = os.path.join(run.path, 'diff.png')
diff = image_diff(original, new, diffpath, run.diffcolor, run.test.mask)
raise TestError(
('Screenshot %s was different; compare %s with %s. See %s ' +
'for the comparison. diff=%r') % (
self.index, original, new, diffpath, diff
)
)
else:
raise TestError('Screenshot %s was different.' % self.index)
finally:
if not run.save_diff:
os.unlink(new)
|
Spiderlover/Toontown | toontown/town/TownLoader.py | Python | mit | 16,978 | 0.001944 | from pandac.PandaModules import *
from toontown.battle.BattleProps import *
from toontown.battle.BattleSounds import *
from toontown.distributed.ToontownMsgTypes import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import cleanupDialog
from direct.directnotify import DirectNotifyGlobal
from toontown.hood import Place
from direct.showbase import DirectObject
from direct.fsm import StateData
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import TownBattle
from toontown.toon import Toon
from toontown.toon.Toon import teleportDebug
from toontown.battle import BattleParticles
from direct.fsm import StateData
from toontown.building import ToonInterior
from toontown.hood import QuietZoneState
from toontown.hood import ZoneUtil
from direct.interval.IntervalGlobal import *
from toontown.dna.DNAParser import DNABulkLoader
class TownLoader(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('TownLoader')
zone2music = {
ToontownCentral : 'phase_9/audio/bgm/encntr_suit_ttc.ogg',
DonaldsDock : 'phase_9/audio/bgm/encntr_suit_dd.ogg',
DaisyGardens : 'phase_9/audio/bgm/encntr_suit_dg.ogg',
MinniesMelodyland : 'phase_9/audio/bgm/encntr_suit_mml.ogg',
TheBrrrgh : 'phase_9/audio/bgm/encntr_suit_tb.ogg',
DonaldsDreamland : 'phase_9/audio/bgm/encntr_suit_ddl.ogg'
}
def __init__(self, hood, parentFSMState, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.hood = hood
self.parentFSMState = parentFSMState
self.fsm = ClassicFSM.ClassicFSM('TownLoader', [State.State('start', self.enterStart, self.exitStart, ['quietZone', 'street', 'toonInterior']),
State.State('street', self.enterStreet, self.exitStreet, ['quietZone']),
State.State('toonInterior', self.enterToonInterior, self.exitToonInterior, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['street', 'toonInterior']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.branchZone = None
self.canonicalBranchZone = None
self.placeDoneEvent = 'placeDone'
self.townBattleDoneEvent = 'town-battle-done'
return
def loadBattleAnims(self):
Toon.loadBattleAnims()
def unloadBattleAnims(self):
Toon.unloadBattleAnims()
def load(self, zoneId):
self.zoneId = zoneId
self.parentFSMState.addChild(self.fsm)
self.loadBattleAnims()
self.branchZone = ZoneUtil.getBranchZone(zoneId)
self.canonicalBranchZone = ZoneUtil.getCanonicalBranchZone(zoneId)
self.music = base.loadMusic(self.musicFile)
self.activityMusic = base.loadMusic(self.activityMusicFile)
self.battleMusic = base.loadMusic(self.zone2music.get(ZoneUtil.getHoodId(zoneId), 'phase_9/audio/bgm/encntr_suit_ttc.ogg'))#'phase_3.5/audio/bgm/encntr_general_bg.ogg'))
self.townBattle = TownBattle.TownBattle(self.townBattleDoneEvent)
self.townBattle.load()
def unload(self):
self.unloadBattleAnims()
globalPropPool.unloadProps()
globalBattleSoundCache.clear()
BattleParticles.unloadParticles()
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
del self.fsm
del self.streetClass
self.landmarkBlocks.removeNode()
del self.landmarkBlocks
self.hood.dnaStore.resetSuitPoints()
self.hood.dnaStore.resetBattleCells()
del self.hood
del self.nodeDict
del self.zoneDict
del self.fadeInDict
del self.fadeOutDict
del self.nodeList
self.geom.removeNode()
del self.geom
self.townBattle.unload()
self.townBattle.cleanup()
del self.townBattle
del self.battleMusic
del self.music
del self.activityMusic
del self.holidayPropTransforms
self.deleteAnimatedProps()
cleanupDialog('globalDialog')
ModelPool.garbageCollect()
TexturePool.garbageCollect()
def enter(self, requestStatus):
teleportDebug(requestStatus, 'TownLoader.enter(%s)' % requestStatus)
self.fsm.enterInitialState()
teleportDebug(requestStatus, 'setting state: %s' % requestStatus['where'])
self.setState(requestStatus['where'], requestStatus)
def exit(self):
self.ignoreAll()
def setState(self, stateName, requestStatus):
self.fsm.request(stateName, [requestStatus])
def enterStart(self):
pass
def exitStart(self):
pass
def enterStreet(self, requestStatus):
teleportDebug(requestStatus, 'enterStreet(%s)' % requestStatus)
self.acceptOnce(self.placeDoneEvent, self.streetDone)
self.place = self.streetClass(self, self.fsm, self.placeDoneEvent)
self.place.load()
base.cr.playGame.setPlace(self.place)
self.place.enter(requestStatus)
def exitStreet(self):
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def streetDone(self):
self.requestStatus = self.place.doneStatus
status = self.place.doneStatus
if status['loader'] == 'townLoader' and ZoneUtil.getBranchZone(status['zoneId']) == self.branchZone and status['shardId'] == None:
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterToonInterior(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.handleToonInteriorDone)
self.place = ToonInterior.ToonInterior(self, self.fsm.getStateNamed('toonInterior'), self.placeDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
def exitToonInterior(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleToonInteriorDone(self):
status = self.place.doneStatus
if ZoneUtil.getBranchZone(status['zoneId']) == self.branchZone and status['shardId'] == None:
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterQuietZone(self, requestStatus):
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState.QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getRequestStatus()
self.fsm.request(status['where'], [status])
def enterFinal(self):
pass
def exitFinal(self):
pass
def createHood(self, dnaFile, loadStorage = 1):
if loadStorage:
files = ('phase_5/dna/storage_town.pdna', self.townStorageDNAFile)
dnaBulk = DNABulkLoader(self.hood.dnaStore, files)
dnaBulk.loadDNAFiles()
node = loader.loadDNAFile(self.hood.dnaStore, dnaFile)
self.notify.debug('done loading %s' % dnaFile)
if node.getNumParents() == 1:
self.geom = NodePath(node.getParent(0))
self.geom.reparentTo(hidden)
else:
self.geom = hidden.attachNewNode(node)
self.makeDictionaries(self.hood.dnaStore)
self.reparentLandmarkBlockNodes()
| self.renameFloorPolys(self.nodeList)
self.createAnimatedProps(self.nodeList)
self | .holidayPropTransforms = {}
|
bigoldboy/repository.bigoldboy | script.tvguide.fullscreen/play.py | Python | gpl-3.0 | 1,273 | 0.007855 | import sys
import xbmc,xbmcaddon
import sqlite3
from vpnapi import VPNAPI
ADDON = xbmcaddon.Addon(id='script.tvguide.fullscreen')
channel = sys.argv[1]
start = sys.argv[2]
path = xbmc.translatePath('special://profile/addon_data/script.tvguide.fullscreen/source.db')
try:
conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)
except Exception as detail:
xbmc.log("EXCEPTION: (script.tvguide.fullscreen) %s" % detail, xbmc.LOGERROR)
c = conn.cursor()
c.execute('SELECT stream_url FROM custom_stream_url WHERE channel=?', [channel])
row = c.fetchone()
if row:
url = row[0]
ADDON.setSetting('playing.channel',channel)
ADDON.setSetting('playing.start',start)
if xbmc.getCondVisibility("System.HasAddon(service.vpn.manager | )"):
try:
if ADDON.getSetting('vpnmgr.connect') == "true":
vpndefault = False
if ADDON.getSetting('vpnmgr.default') == "true":
vpndefault = True
api = VPNAPI()
if url[0:9] == 'plugin://':
api.filterAndSwitch(url, 0, vpndefault, True)
else:
if vpndefault: api.defaultVPN(True)
except:
pass
xbmc.e | xecutebuiltin('PlayMedia(%s)' % url) |
kytvi2p/Sigil | src/Resource_Files/plugin_launchers/python/launcher.py | Python | gpl-3.0 | 11,319 | 0.003534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Copyright (c) 2014 Kevin B. Hendricks, John Schember, and Doug Massay
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals, division, absolute_import, print_function
from compatibility_utils import PY3, text_type, utf8_str, unicode_str, unescapeit
from compatibility_utils import unicode_argv, add_cp65001_codec
# Sigil Python Script Launcher
#
# This launcher script is aways invoked by the script manager
# for python scripts (both Python 2.7 and 3.4 and later). It is passed in:
# ebook_root, output directory, script type, and path to target script location
#
# This launcher script will parse the opf and make available
# a wrapper and a type specific container object that acts as an
# interface to be used by target scripts to safely access the ebook files.
#
# The Wrapper acts as a record keeper of changed files while acting to
# shield the original files
#
# The Launcher script will then invoke the target's run() routine
# All target output to stdout and stderr is captured
# Upon completion simple xml formatted information is passed back
# to Sigil before the launcher script exits
import sys
import os, os.path
import codecs
import unipath
from unipath import pathof
from opf_parser import Opf_Parser
from wrapper import Wrapper
from bookcontainer import BookContainer
from inputcontainer import InputContainer
from outputcontainer import OutputContainer
from validationcontainer import ValidationContainer
from xml.sax.saxutils import escape as xmlescape
import traceback
add_cp65001_codec()
_DEBUG=False
SUPPORTED_SCRIPT_TYPES = ['input', 'output', 'edit', 'validation']
_XML_HEADER = '<?xml version="1.0" encoding="UTF-8"?>\n'
EXTRA_ENTITIES = {'"':'"', "'":"'"}
def escapeit(sval, EXTRAS=None):
if EXTRAS:
return xmlescape(unescapeit(sval), EXTRAS)
return xmlescape(unescapeit(sval))
# Wrap a stream so that output gets saved
# using utf-8 encoding
class SavedStream:
def __init__(self, stream, stype, ps):
self.stream = stream
self.encoding = | stream.encoding
self.ps = ps
self.stype = stype
if self.encoding == None:
self.encoding = 'utf-8'
def write(self, data):
if isinstance(data, text_type):
data = data.encode('utf-8')
elif self.encoding not in ['utf-8','UTF-8','cp65001','CP65001']:
udata = data.decode(self.encoding)
data = udata.encode('utf-8')
if self.stype | == 'stdout':
self.ps.stdouttext.append(data)
if PY3:
self.stream.buffer.write(data)
else:
self.stream.write(data)
else:
self.ps.stderrtext.append(data)
def __getattr__(self, attr):
if attr == 'mode':
return 'wb'
if attr == 'encoding':
return 'utf-8'
if attr == 'stream':
return self.stream
return getattr(self.stream, attr)
class ProcessScript(object):
def __init__(self, script_type, script_module, container):
self.container = container
self.script_module = script_module
self.script_type = script_type
self.exitcode = None
self.stdouttext = []
self.stderrtext = []
self.wrapout = []
def launch(self):
script_module = self.script_module
script_type = self.script_type
container = self.container
sys.stdout = SavedStream(sys.stdout, 'stdout', self)
sys.stderr = SavedStream(sys.stderr, 'stderr', self)
try:
target_script = __import__(script_module)
self.exitcode = target_script.run(container)
sys.stdout = sys.stdout.stream
sys.stderr = sys.stderr.stream
except Exception as e:
sys.stderr.write(traceback.format_exc())
sys.stderr.write("Error: %s\n" % e)
sys.stdout = sys.stdout.stream
sys.stderr = sys.stderr.stream
self.wrapout.append(_XML_HEADER)
self.wrapout.append('<wrapper type="%s">\n<result>failed</result>\n<changes/>\n' % script_type)
self.exitcode = -1
return
if script_type == "edit":
# write out the final updated opf to the outdir
container._w.write_opf()
# save the wrapper results to a file before exiting the thread
self.wrapout.append(_XML_HEADER)
self.wrapout.append('<wrapper type="%s">\n' % script_type)
self.wrapout.append('<result>success</result>\n')
self.wrapout.append('<changes>\n')
if script_type == "edit":
for ftype, id, href in container._w.deleted:
if ftype == 'manifest':
bookhref = 'OEBPS/' + href
mime = container._w.getmime(bookhref)
else:
bookhref = id
id = ""
mime = container._w.getmime(bookhref)
self.wrapout.append('<deleted href="%s" id="%s" media-type="%s" />\n' % (bookhref, id, mime))
if script_type in ['input', 'edit']:
for id in container._w.added:
if id in container._w.id_to_href:
href = container._w.id_to_href[id]
bookhref = 'OEBPS/' + href
mime = container._w.id_to_mime[id]
else:
bookhref = id
id = ""
mime = container._w.getmime(bookhref)
self.wrapout.append('<added href="%s" id="%s" media-type="%s" />\n' % (bookhref, id, mime))
if script_type == 'edit':
for id in container._w.modified:
if id in container._w.id_to_href:
href = container._w.id_to_href[id]
bookhref = 'OEBPS/' + href
mime = container._w.id_to_mime[id]
else:
bookhref = id
id = ""
mime = container._w.getmime(bookhref)
self.wrapout.append('<modified href="%s" id="%s" media-type="%s" />\n' % (bookhref, id, mime))
if script_type == 'validation':
for vres in container.results:
self.wrapout.append('<validationresult type="%s" filename="%s" linenumber="%s" message="%s" />\n' % (vres.restype, vres.filename, vres.linenumber, vres.message))
self.wrapout.append('</changes>\n')
self.exitcode = 0
return
def failed(script_type, msg):
wrapper = _XML_HEADER
if script_type is None:
wrapper += '<wrapper>\n<result>failed</result>\n<changes/>\n'
else:
wrapper += '<wrapper type="%s">\n<result>failed</result>\n<changes/>\n' % script_type
wrapper += '<msg>%s</msg>\n</wrapper>\n' % msg
# writ |
joac/klein | src/klein/__init__.py | Python | mit | 572 | 0.001748 | from __future__ import absolute_import, division
from klein.app import Klein, run, route, resource
from klein._plating import Plating
from ._version import __version__ | as _incremental_version
# Make it a str, for backwards compatibility
__version__ = _incremental_version.base()
__author__ = "The Klein contributors (see AUTHORS)"
__license__ = "MIT"
__copyright__ = "Copyright 2016 {0}".format(__author__)
__all__ = [
'Klein',
'Plating',
'__author__',
'__copyright__',
'__license__',
'__version__',
'resource',
'rou | te',
'run',
]
|
ubuntu/ubuntu-make | tests/data/duplicatedframeworks/samecategory.py | Python | gpl-3.0 | 1,683 | 0.001783 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Framework with another category module without any framework"""
import umake.frameworks
class ACategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Category A", description="Other category A description")
class FrameworkC(umake.frameworks.BaseFramework):
def __init__(self, **kwargs):
super().__init__(name="Framework C", description="Description for framework C",
**kwargs) |
def setup(self, install_path=None, auto_accept_license=False):
super().setup()
def remove(self):
super().remove()
class FrameworkD(umake.frameworks.BaseFramework):
def __init__(self, **kwargs):
super().__init__(name="Framework D", description="Description for framework D",
**kwargs)
def setup(self, install_path=None, auto_accept_license=False):
super().setup()
| def remove(self):
super().remove()
|
moreandres/bottleneck | setup.py | Python | gpl-2.0 | 1,379 | 0.026831 | #! /usr/bin/env python
import os
from setuptools import setup
readme = os.path.join(os.path.dirname(__file__), 'README.md')
setup(name = 'bottleneck',
version = '0.1.0',
description = 'performance report generator for OpenMP programs in GNU/Linux',
long_description = open(readme).read(),
author = 'Andres More',
author_email='more.andres@gmail.com',
url='https://github.com/moreandres/bottleneck.git',
packages= [ 'bottleneck' ],
entry_points = { 'console_scripts': [ 'bt = bottleneck.bottleneck:main' ] },
data_files = [ ( 'config', [ 'cfg/bt.cfg', 'cfg/bt.tex' ] ) ],
classifiers = [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2) | ',
'Operating System :: POSIX',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Quality Assurance',
'Topic :: System :: Benchmark',
'Topic :: U | tilities',
],
zip_safe = False,
test_suite = 'tests',
# include_package_data = True,
# install_requires=[ 'numpy', 'scipy', 'matplotlib' ],
)
|
cfelton/minnesota | setup.py | Python | gpl-3.0 | 1,770 | 0.013559 | #
# Copyright (c) 2013 Christopher L. Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along wit | h this program. If not, see <http://www.gnu.org/licenses/>.
try:
from setuptools import setup
from setuptools import find_packages
except ImportError:
from distutils.core import setup
from pkgutil import walk_packages
impo | rt mn
# many pypy installs don't have setuptools (?)
def _find_packages(path='.', prefix=''):
yield prefix
prefix = prefix + "."
for _, name, ispkg in walk_packages(path,
prefix,
onerror=lambda x: x):
if ispkg:
yield name
def find_packages():
return list(_find_packages(mn.__path__, mn.__name__))
setup(name = "minnesota",
version = "0.1pre",
description = "collection of HDL cores ",
license = "LGPL",
platforms = ["Any"],
keywords = "DSP HDL MyHDL FPGA FX2 USB",
packages = find_packages(),
# @todo need to add the examples and test directories,
# copy it over ...
)
|
watermelo/libcloud | libcloud/test/loadbalancer/test_dimensiondata.py | Python | apache-2.0 | 26,630 | 0.00154 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import DimensionDataVIPNode, DimensionDataPool
from libcloud.common.dimensiondata import DimensionDataPoolMember
from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm
from libcloud.loadbalancer.drivers.dimensiondata \
import DimensionDataLBDriver as DimensionData
from libcloud.loadbalancer.types import State
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import LoadBalancerFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionDataTests(unittest.TestCase):
def setUp(self):
DimensionData.connectionCls.conn_classes = (None, DimensionDataMockHttp)
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
with self.assertRaises(InvalidCredsError):
self.driver.list_balancers()
def test_create_balancer(self):
self.driver.ex_set_current_network_domain('1234')
members = []
members.append(Member(
id=None,
ip='1.2.3.4',
port=80))
balancer = self.driver.create_balancer(
name='test',
port=80,
protocol='http',
algorithm=Algorithm.ROUND_ROBIN,
members=members,
ex_listener_ip_address='5.6.7.8')
self.assertEqual(balancer.name, 'test')
self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea')
self.assertEqual(balancer.ip, '165.180.12.22')
self.assertEqual(balancer.port, 80)
self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54')
self.assertEqual(balancer.extra['network_domain_id'], '1234')
self.assertEqual(balancer.extra['listener_ip_address'], '5.6.7.8')
def test_create_balancer_with_defaults(self):
self.driver.ex_set_current_network_domain('1234')
balancer = self.driver.create_balancer(
name='test',
port=None,
protocol=None,
algorithm=None,
members=None)
self.assertEqual(balancer.name, 'test')
self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea')
self.assertEqual(balancer.ip, '165.180.12.22')
self.assertEqual(balancer.port, None)
self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54')
self.assertEqual(balancer.extra['network_domain_id'], '1234')
def test_create_balancer_no_members(self):
self.driver.ex_set_current_network_domain('1234')
members = None
balancer = self.driver.create_balancer(
name='test',
port=80,
protocol='http',
algorithm=Algorithm.ROUND_ROBIN,
members=members)
self.assertEqual(balancer.name, 'test')
self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea')
self.assertEqual(balancer.ip, '165.180.12.22')
self.assertEqual(balancer.port, 80)
self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54')
self.assertEqual(balancer.extra['network_domain_id'], '1234')
def test_create_balancer_empty_members(self):
self.driver.ex_set_current_network_domain('1234')
members = []
balancer = self.driver.create_balancer(
name='test',
port=80,
protocol='http',
algorithm=Algorithm.ROUND_ROBIN,
members=members)
self.assertEqual(balancer.name, 'test')
self.assertEqual(balancer.id, '8334f461-0df0-42d5-97eb-f4678eb26bea')
self.assertEqual(balancer.ip, '165.180.12.22')
self.assertEqual(balancer.port, 80)
self.assertEqual(balancer.extra['pool_id'], '9e6b496d-5261-4542-91aa-b50c7f569c54')
self.assertEqual(balancer.extra['network_domain_id'], '1234')
def test_list_balancers(self):
bal = self.driver.list_balancers()
self.assertEqual(bal[0].name, 'myProduction.Virtual.Listener')
self.assertEqual(bal[0].id, '6115469d-a8bb-445b-bb23-d23b5283f2b9')
self.assertEqual(bal[0].port, '8899')
self.assertEqual(bal[0].ip, '165.180.12.22')
self.assertEqual(bal[0].state, State.RUNNING)
def test_balancer_list_members(self):
extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7',
'network_domain_id': '1234'}
balancer = LoadBalancer(
id='234',
name='test',
state=State.RUNNING,
ip='1.2.3.4',
port=1234,
driver=self.driver,
extra=extra
)
members = self.driver.balancer_list_members(balancer)
self.assertEqual(2, len(members))
self.assertEqual(members[0].ip, '10.0.3.13')
self.assertEqual(members[0].id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0')
self.assertEqual(members[0].port, 9889)
def test_balancer_attach_member(self):
extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7',
'network_domain_id': '1234'}
balancer = LoadBalancer(
id='234',
name='test',
state=State.RUNNING,
ip='1.2.3.4',
port=1234,
driver=self.driver,
extra=extra
)
member = Member(
id=None,
ip='112.12.2.2',
port=80,
balancer=balancer,
extra=None)
member = self.driver.balancer_attach_member(balancer, member)
self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0')
def test_balancer_attach_member_without_port(self):
extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7',
'network_domain_id': '1234'}
balancer = LoadBalancer(
| id='234',
name='test',
state=State.RUNNING,
ip='1.2.3.4',
port=1234,
driver=self.driver,
ext | ra=extra
)
member = Member(
id=None,
ip='112.12.2.2',
port=None,
balancer=balancer,
extra=None)
member = self.driver.balancer_attach_member(balancer, member)
self.assertEqual(member.id, '3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0')
self.assertEqual(member.port, None)
def test_balancer_detach_member(self):
extra = {'pool_id': '4d360b1f-bc2c-4ab7-9884-1f03ba2768f7',
'network_domain_id': '1234'}
balancer = LoadBalancer(
id='234',
name='test',
state=State.RUNNING,
ip='1.2.3.4',
port=1234,
driver=self.driver,
extra=extra
)
member = Member(
id='3dd806a2-c2c8-4c0c-9a4f-5219ea9266c0',
ip='112.12.2.2',
port=80,
balancer=balancer,
extra=None)
result = self.driver.balancer_detach_member(balancer, member)
self.assertEqual(result, |
ProjectSWGCore/NGECore2 | scripts/mobiles/spawnareas/dantooine_south_mixed_3.py | Python | lgpl-3.0 | 356 | 0.022472 | # Spawn Area file created with PSWG P | lanetary Spawn Tool
import sys
from java.util import Vector
def addSpawnArea(core):
mixedGroups = Vector()
mixedGroups.add('quenker_lair_group_1')
mixedGroups.add('huurton_lair_group_1')
mixedGroups.add('dantooine_quenker')
core.spawnServic | e.addMixedSpawnArea(mixedGroups, 1495, -3502, 2396, 'dantooine')
return
|
Shalantor/Connect4 | server/matchMakingThread.py | Python | mit | 4,679 | 0.013251 | #This will be the thread responsible for the matchmaking which operates as follows:
#There are four lists where the players are divided into based on their rank.
#List 1 is for ranks 0,1,2.
#List 2 is for ranks 3,4,5.
#List 3 is for ranks 6,7,8.
#List 4 is for ranks 9,10.
#When a player waits for a match too long, this thread will start looking for
#players in adjacent lists, first in the higher category list and then in the
#lower one.
#Each player has a dictionary associated with him, which will store his info
#and some other parameters, like his network info to connect to him.
#This thread support only 2 operations:
# 1) Add to match making lists
# 2) Terminate itself
MAX_LOOPS = 10
MAX_WAIT = 10
import Queue,time,random
#inputQueue is for getting players from account threads
#outputQueue is for sending match tokens to the thread that handles the matches
#exitQueue is used for exiting the thread
def mmThread(inputQueue,exitQueue,outputQueue):
#Lists for all difficulties
noviceList = []
apprenticeList = []
adeptList = []
expertList = []
#put them in one list
playerList = [noviceList,apprenticeList,adeptList,expertList]
#This list contains the players that have waited for too long in their Queue
needRematch = []
while True:
loopCounter = 0
#Check for exit signal
try:
exit = exitQueue.get(False)
if exit:
break
except:
pass
#loop over new entries at most MAX_LOOPS ti | mes then do it again
while loopCounter < MAX_LOOPS:
try:
#Get new player and add him to a list according to his rank
newPlayer = inputQueue.get(False)
playerR | ank = newPlayer.get('rank')
listIndex = playerRank // 3
newPlayer['entryTime'] = time.time()
playerList[listIndex].append(newPlayer)
print 'MMTHREAD : Got user '
print 'MMTHREAD: USER RANK IS %d ' % playerRank
except Queue.Empty:
break
loopCounter += 1
#First check for players in the rematch Queue
for player in needRematch[:]:
position = player.get('rank') // 3
foundMatch = False
#Check for empty list
if len(playerList[position]) == 0 or playerList[position][0] != player:
continue
#Check for enemy player one list above this player
if position + 1 < len(playerList) and len(playerList[position+1]) >= 1:
foundMatch = True
firstPlayer = playerList[position].pop(0)
secondPlayer = playerList[position+1].pop(0)
needRematch.remove(player)
elif (position - 1 >= 0) and len(playerList[position-1]) >= 1:
#Else check for enemy player one list below this player
foundMatch = True
firstPlayer = playerList[position].pop(0)
secondPlayer = playerList[position-1].pop(0)
needRematch.remove(player)
#Add player tokens to Queue for game play thread
if foundMatch:
bothPlayers = [firstPlayer,secondPlayer]
data = {'turn':0,'players':bothPlayers}
print'Add new Player token'
outputQueue.put(data)
#Match players in same list
for category in playerList:
while True:
try:
#Try to pop two players from the list
#If successfull, put their token into game play thread Queue
firstPlayer = None
secondPlayer = None
firstPlayer = category.pop(0)
secondPlayer = category.pop(0)
bothPlayers = [firstPlayer,secondPlayer]
turn = random.randint(0,1)
data = {'turn':turn,'players':bothPlayers}
print'Add new Player token'
outputQueue.put(data)
except:
#Else if only one player is found , but him back
if secondPlayer == None and firstPlayer != None:
category.insert(0,firstPlayer)
break
#Check for players that didnt find a match for a long time and alert thread
for i in range(0,3):
if len(playerList[i]) > 0:
if time.time() - playerList[i][0].get('entryTime') >= MAX_WAIT:
needRematch.append(playerList[i][0])
print 'match making thread out'
|
JacobFischer/Joueur.py | games/stumped/job.py | Python | mit | 3,479 | 0.003162 | # Job: Information about a beaver's job.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from typing import Optional
from games.stumped.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Job(GameObject):
"""The class representing the Job in the Stumped game.
Information about a beaver's job.
"""
def __init__(self):
"""Initializes a Job with basic logic as provided by the Creer code generator.
"""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._actions = 0
self._carry_limit = 0
self._chopping = 0
self._cost = 0
self._damage = 0
self._distraction_power = 0
self._health = 0
self._moves = 0
self._munching = 0
self._title = ""
@property
def actions(self) -> int:
"""int: The number of actions this Job can make per turn.
"""
return self._actions
@property
def carry_limit(self) -> int:
"""int: How many combined resources a beaver with this Job can hold at once.
"""
return self._carry_limit
@property
def chopping(self) -> int:
"""int: Scalar for how many branches this Job harvests at once.
"""
return self._chopping
@property
def cost(self) -> int:
"""int: How much food this Job costs to recruit.
"""
return self._cost
@property
def damage(self) -> int:
"""int: The amount of damage this Job does per attack.
"""
return self._damage
@property
def distraction_power(self) -> int:
"""int: How many turns a beaver attacked by this Job is distracted by.
"""
return self._distraction_power
@property
def health(self) -> int:
"""int: The amount of starting health this Job has.
"""
return self._health
@property
def moves(self) -> int:
"""int: The number of moves this Job can make per turn.
"""
return self._moves
@property
def munching(self) -> int:
"""int: Scalar for how much food this Job harvests at once.
"""
return self._munching
@property
def title(self) -> str:
"""str: The Job title.
"""
return self._title
def recruit(self, tile: 'games.stumped.tile.Tile') -> Optional['games.stumped.beaver.Beaver']:
"""Recruits a Beaver of this Job to a lodge.
Args:
tile (games.stumped.tile.Tile): The Tile that is a lodge owned by you that you wish to spawn the Beaver of this Job on.
Returns:
games.stumped.beaver.Beaver or None: The recruited Beaver i | f successful, None otherwise.
"""
return self._run_on_server('recruit', | {
'tile': tile
})
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
|
net-titech/VidSum | src/packages/aistats-flid/code/functions/dpp.py | Python | mit | 2,448 | 0.001225 | from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.special import expit
import numpy.linalg
from functions.function import Function
class DPP(Function):
def __init__(self, V, K=None):
self.V = V
if K is None:
self.K = np.eye(len(V))
else:
assert len(K.shape) == 2
assert K.shape[0] == K.shape[1]
self.K = np.copy(K)
def __call__(self, A):
assert False # TODO
return np.sum(self.s[A]) - self.logz
@property
def parameters(self):
return [self.K]
def _estimate_LL(self, data):
ll = 0
n = self.K.shape[0]
K_diag = np.diag(self.K)
diag_idxs = np.diag_indices(n)
K = np.copy(self.K)
for sample in data:
K[diag_idxs] = K_diag - 1.
K[(diag_idxs[0][sample], diag_idxs[1][sample])] += 1.
ll += np.log(np.abs(np.linalg.det(K)))
ll /= len(data)
return ll
def _get_proposal_marginal(self, given):
"""
Computes a proposal for adding an item according to the marginal
probabiltiy. Returns items sorted by likelihood.
"""
V = list(self.V)
candidates = np.delete(np.array(V), given)
given = set(given)
K = np.copy(self.K)
P_given = np.linalg.det(K[np.ix_(list(given), list(given))])
probs = np.zeros(len(candidates))
for i, el in enumerate(candidates):
S = list(given.union(set([el])))
P_S = np.linalg.det(K[np.ix_(S, S)])
probs[i] = P_S / P_given
return candidates[np.argsort(probs)[::-1]]
def _get_proposal(self, given):
"""
Computes a proposal for adding an item according to the non-marginal
probabiltiy. Returns items sorted by likelihood.
"""
V = list(self.V)
candidates = np.delete(np.array(V), given | )
given = set(given)
n = self.K.shape[0]
K_diag = np.diag(self.K)
diag_idxs = np.diag_indices(n)
K = np.copy(self.K)
probs = np.zeros(len(candidates))
for i, el in enumerate(candidates):
S = list(given.union(set([ | el])))
K[diag_idxs] = K_diag - 1.
K[(diag_idxs[0][S], diag_idxs[1][S])] += 1.
probs[i] = np.abs(np.linalg.det(K))
return candidates[np.argsort(probs)[::-1]]
|
Dunkas12/BeepBoopBot | lib/youtube_dl/extractor/telequebec.py | Python | gpl-3.0 | 1,419 | 0.002819 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
smuggle_url,
)
class TeleQuebecIE(InfoExtractor):
_VALID_URL = r'https?://zonevideo\.telequebec\.tv/media/(?P<id>\d+)'
_TEST = {
'url': 'http://zonevideo.telequebec.tv/media/20984/le-couronnement-de-new-york/couronnement-de-new-york',
'md5': 'fe95a0957e5707b1b01f5013e725c90f',
'info_dic | t': {
'id': '20984',
'ext': 'mp4',
'title': 'Le couronnement de New York',
| 'description': 'md5:f5b3d27a689ec6c1486132b2d687d432',
'upload_date': '20160220',
'timestamp': 1455965438,
}
}
def _real_extract(self, url):
media_id = self._match_id(url)
media_data = self._download_json(
'https://mnmedias.api.telequebec.tv/api/v2/media/' + media_id,
media_id)['media']
return {
'_type': 'url_transparent',
'id': media_id,
'url': smuggle_url('limelight:media:' + media_data['streamInfo']['sourceId'], {'geo_countries': ['CA']}),
'title': media_data['title'],
'description': media_data.get('descriptions', [{'text': None}])[0].get('text'),
'duration': int_or_none(media_data.get('durationInMilliseconds'), 1000),
'ie_key': 'LimelightMedia',
}
|
alex-ip/agdc | api/source/test/python/datacube/api/test_query.py | Python | bsd-3-clause | 12,587 | 0.003019 | #!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
__author__ = "Simon Oldfield"
import logging
from datacube.api import parse_date_min, parse_date_max, Satellite, DatasetType
from datacube.api.query import list_cells_as_list, list_tiles_as_list
from data | cube.api.query import list_cells_vector_file_as_list
from datacube.api.query import MONTHS_BY_SEASON, Season
fro | m datacube.api.query import LS7_SLC_OFF_EXCLUSION, LS7_SLC_OFF_ACQ_MIN
from datacube.api.query import LS8_PRE_WRS_2_EXCLUSION, LS8_PRE_WRS_2_ACQ_MAX
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
_log = logging.getLogger()
TEST_CELL_X = 120
TEST_CELL_Y = -25
TEST_YEAR = 2005
TEST_YEAR_STR = str(TEST_YEAR)
TEST_MONTHS = MONTHS_BY_SEASON[Season.SUMMER]
TEST_VECTOR_FILE = "Mainlands.shp"
TEST_VECTOR_LAYER = 0
TEST_VECTOR_FEATURE = 4
def test_list_cells_120_020_2005_ls578(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_no_ls7_slc(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
exclude=[LS7_SLC_OFF_EXCLUSION],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_no_ls8_pre_wrs_2(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
exclude=[LS8_PRE_WRS_2_EXCLUSION],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_summer(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
months=TEST_MONTHS,
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_tiles_120_020_2005_ls578(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and ds in tile.datasets for ds in dataset_types)
def test_list_tiles_120_020_2005_ls578_no_ls7_slc(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
exclude=[LS7_SLC_OFF_EXCLUSION],
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
dataset = tile.datasets[DatasetType.ARG25]
assert dataset
_log.info("Found ARG25 dataset [%s]", dataset.path)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and (dataset.satellite != Satellite.LS7 or tile.end_datetime.date() <= LS7_SLC_OFF_ACQ_MIN))
def test_list_tiles_120_020_2005_ls578_no_ls8_pre_wrs_2(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
exclude=[LS8_PRE_WRS_2_EXCLUSION],
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.