repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
Chilledheart/chromium
tools/perf/page_sets/alexa1-10000.py
Python
bsd-3-clause
1,340
0.005224
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import os from telemetry.page import page as page_module from telemetry.page import shared_page_state from telemetry import story __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) # Generated on 2013-09-03 13:59:53.459117 by rmistry using # create_page_set.py
. _TOP_10000_ALEXA_FILE = os.path.join(__location__, 'alexa1-10000-urls.
json') class Alexa1To10000Page(page_module.Page): def __init__(self, url, page_set): super(Alexa1To10000Page, self).__init__( url=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) def RunPageInteractions(self, action_runner): with action_runner.CreateGestureInteraction('ScrollAction'): action_runner.ScrollPage() class Alexa1To10000PageSet(story.StorySet): """ Top 1-10000 Alexa global. Generated on 2013-09-03 13:59:53.459117 by rmistry using create_page_set.py. """ def __init__(self): super(Alexa1To10000PageSet, self).__init__() with open(_TOP_10000_ALEXA_FILE) as f: urls_list = json.load(f) for url in urls_list: self.AddStory(Alexa1To10000Page(url, self))
yola/hashcache
hashcache/hashcache.py
Python
mit
1,100
0.008182
from django.core.cache import cache as cache_impl from django.utils.encoding import smart_str import hashlib class Hashcache(object): """ Wrapper for django.core.cache.cache that hashes the keys to avoid key length errors. Maybe eventually it will do other cool things. You can optionally pass your own cache module to the initializer as long as it conforms to the get/set interface of t
he django cache module. >>> from yolango.util.hashcache import Hashcache >>> cache = Hashcache() >>> cache.set('my_key', 'hello, world!', 30) >>> cache.get('my_key') 'hello, world!' """
def __init__(self, cache = cache_impl): assert cache self.cache = cache def get(self, key): """Hash the key and retrieve it from the cache""" return self.cache.get(self._hashed(key)) def set(self, key, *args): """Hash the key and set it in the cache""" return self.cache.set(self._hashed(key), *args) def _hashed(self, key): return hashlib.new("md5", smart_str(key)).hexdigest()
ic-labs/glamkit-sponsors
sponsors/__init__.py
Python
mit
52
0
defaul
t_app_config = '%s.apps.AppConfig' %
__name__
Orav/kbengine
kbe/src/lib/python/Lib/test/test_importlib/namespace_pkgs/project3/parent/child/three.py
Python
lgpl-3.0
29
0
attr
= 'parent child t
hree'
SunPower/PVMismatch
pvmismatch/contrib/gen_coeffs/two_diode.py
Python
bsd-3-clause
11,016
0.002179
""" Two diode model equations. """ import numpy as np from pvmismatch.contrib.gen_coeffs import diode def fdidv(isat1, isat2, rs, rsh, ic, vc, vt): """ Derivative of IV curve and its derivatives w.r.t. Isat1, Isat2, Rs, Rsh, Ic, Vc and Vt. :param isat1: diode 1 saturation current [A] :param isat2: diode 2 saturation current [A] :param rs: series resistance [ohms] :param rsh: shunt resistance [ohms] :param ic: cell current [A] :param vc: cell voltage [V] :param vt: thermal voltage (kB * Tc / qe = 26[mV] at Tc=298K) [V] :return: derivative of IV curve and its derivatives """ vd, _ = diode.fvd(vc, ic, rs) # vd = vc + ic * rs vstar = vd / vt rstar = rsh / rs exp_vstar, exp_vstar_2 = np.exp(vstar), np.exp(0.5 * vstar) v_sat1_sh, v_sat2_sh = isat1 * rsh, isat2 * rsh v_sat1_sh_exp_vstar = v_sat1_sh * exp_vstar v_sat2_sh_exp_vstar_2 = 0.5 * v_sat2_sh * exp_vstar_2 vsum = v_sat1_sh_exp_vstar + v_sat2_sh_exp_vstar_2 + vt vsum_rstar = vsum + vt * rstar combiterm1 = v_sat1_sh_exp_vstar + 0.5*v_sat2_sh_exp_vstar_2 combiterm2 = isat1*exp_vstar + 0.5*isat2*exp_vstar_2 combiterm3 = vsum / vsum_rstar - 1.0 combiterm4 = vsum_rstar * rs combiterm5 = rstar * combiterm3 / vsum_rstar combiterm6 = combiterm1 * combiterm3 / vt combiterm7 = 1.0 / combiterm4 # dI/dV = derivative of IV curve didv = -vsum / combiterm4 # jacobian didv_isat1 = exp_vstar * combiterm5 didv_isat2 = 0.5 * exp_vstar_2 * combiterm5 didv__r_s = combiterm7 * (combiterm6 * ic + vsum**2.0 / combiterm4) didv_rsh = combiterm7 * (combiterm2 * combiterm3 + vt * vsum / combiterm4) didv_ic = combiterm6 / vsum_rstar didv_vc = (didv + 1.0 / rs) * didv_ic jac = np.array([ didv_isat1, didv_isat2, didv__r_s, didv_rsh, didv_ic, didv_vc ]) return didv, jac def fdpdv(isat1, isat2, rs, rsh, ic, vc, vt): """ Derivative of PV curve and its derivatives w.r.t. Isat1, Isat2, Rs, Rsh, Ic, Vc and Vt. :param isat1: diode 1 saturation current [A] :param isat2: diode 2 saturation current [A] :param rs: series resistance [ohms] :param rsh: shunt resistance [ohms] :param ic: cell current [A] :param vc: cell voltage [V] :param vt: thermal voltage (kB * Tc / qe = 26[mV] at Tc=298K) [V] :return: derivative of PV curve and its derivatives """ didv, _ = fdidv(isat1, isat2, rs, rsh, ic, vc, vt) vd, _ = diode.fvd(vc, ic, rs) # vd = vc + ic * rs dpdv = didv * vc + ic dpdv_isat1 = 2.0*rs*rsh*vc*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )*np.exp(vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 - 2.0*rsh*vc*np.exp(vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) dpdv_isat2 = rs*rsh*vc*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )*np.exp(0.5*vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 - rsh*vc*np.exp(0.5*vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) dpdv_rs = -vc*( 2.0*isat1*rsh*ic*np.exp(vd/vt)/vt + 0.5*isat2*rsh*ic*np.exp(0.5*vd/vt)/vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) - vc*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )*( -2.0*isat1*rs*rsh*ic*np.exp(vd/vt)/vt - 2.0*isat1*rsh*np.exp(vd/vt) - 0.5*isat2*rs*rsh*ic*np.exp(0.5*vd/vt)/vt - isat2*rsh*np.exp(0.5*vd/vt) - 2.0*vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 dpdv_rsh = -vc*( 2.0*isat1*np.exp(vd/vt) + isat2*np.exp(0.5*vd/vt) )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) - vc*( -2.0*isat1*rs*np.exp(vd/vt) - isat2*rs*np.exp(0.5*vd/vt) - 2.0*vt )*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 dpdv_ic = -vc*( 2.0*isat1*rs*rsh*np.exp(vd/vt)/vt + 0.5*isat2*rs*rsh*np.exp(0.5*vd/vt)/vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) - vc*( -2.0*isat1*rs**2*rsh*np.exp(vd/vt)/vt - 0.5*isat2*rs**2*rsh*np.exp(0.5*vd/vt)/vt )*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 + 1.0 dpdv_vc = -vc*( 2.0*isat1*rsh*(rs*didv + 1)*np.exp(vd/vt)/vt + 0.5*isat2*rsh*(rs*didv + 1)*np.exp(0.5*vd/vt)/vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) - vc*( -2.0*isat1*rs*rsh*(rs*didv + 1)*np.exp(vd/vt)/vt - 0.5*isat2*rs*rsh*(rs*didv + 1)*np.exp(0.5*vd/vt)/vt )*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 - ( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) + didv jac = np.array([ dpdv_isat1, dpdv_isat2, dpdv_rs, dpdv_rsh, dpdv_ic, dpdv_vc ]) return dpdv, jac def fjrsh(isat1, isat2, rs, rsh, vt, isc): """ Shunt resistance residual and its derivatives w.r.t. Isat1, Isat2, Rs and Rsh. :param isat1: diode 1 saturation current [A] :param isat2: diode 2 saturation current [A] :param rs: series resistance [ohms] :param rsh: shunt resistance [ohms] :param vt: thermal voltage (kB * Tc / qe = 26[mV] at Tc=298K) [V] :param isc: short circuit current [A] :return: Rsh residual and its derivatives Shunt resistance is assumed to be equal to the inverse of the slope of the IV curve at short circuit. .. math:: Rsh = \\frac{ -1 }{ \\left. \\frac{dI}{dV} \\right|_{V=0} } This assumption is valid when
[put condition here]. """ didv, _ = fdidv(isat1, isat2, rs, rsh, ic=isc, vc=0, vt=vt) vd, _ = diode.fvd(0.0, isc, rs) # vd = vc + ic * rs
= 0.0 + isc * rs # frsh = rsh + 1/didv frsh = vd * (1.0/rsh + didv) dfrsh_isat1 = vd*( 2.0*rs*rsh*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )*np.exp(vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 - 2.0*rsh*np.exp(vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) ) dfrsh_isat2 = vd*( rs*rsh*( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )*np.exp(0.5*vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt )**2 - rsh*np.exp(0.5*vd/vt)/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) ) dfrsh_rs = ( vd*( -( 2.0*isat1*rsh*isc*np.exp(vd/vt)/vt + 0.5*isat2*rsh*isc*np.exp(0.5*vd/vt)/vt )/( 2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt ) - ( 2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt )*( -2.0*isat1*rs*rsh*isc*np.exp(vd/vt)/vt - 2.0*isat1*rsh*np.exp(vd/vt) - 0.5*isat2*r
yast/yast-python-bindings
examples/CheckBoxFrame1.py
Python
gpl-2.0
729
0.00823
# encoding: utf-8 from yast import import_module import_module('UI') from yast import * class CheckBoxFrame1Client: def main(self): UI.OpenDialog( VBox( MarginBox( 1, 0.5, CheckBoxFrame(
"E&xpert Settings", True, VBox( HBox( InputField("&Server"), ComboBox("&Mode", ["Automatic", "Manual", "Debug"])
), Left(CheckBox("&Logging")), InputField("&Connections") ) ) ), PushButton("&OK") ) ) UI.UserInput() UI.CloseDialog() CheckBoxFrame1Client().main()
prozorro-sale/openprocurement.auctions.dgf
openprocurement/auctions/dgf/tests/blanks/bidder_blanks.py
Python
apache-2.0
36,337
0.003444
# -*- coding: utf-8 -*- from copy import deepcopy from openprocurement.auctions.dgf.tests.base import ( test_auction_data, test_features_auction_data, test_financial_organization, test_financial_auction_data, test_bids, test_financial_bids, test_organization ) from openprocurement.api.tests.base import JSON_RENDERER_ERROR # AuctionBidderResourceTest def create_auction_bidder_inva
lid(self): response = self.app.post_json('/auctions/some_id/bids', { 'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}}, status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'
], [ {u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'} ]) request_path = '/auctions/{}/bids'.format(self.auction_id) response = self.app.post(request_path, 'data', status=415) self.assertEqual(response.status, '415 Unsupported Media Type') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'} ]) response = self.app.post( request_path, 'data', content_type='application/json', status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ JSON_RENDERER_ERROR ]) response = self.app.post_json(request_path, 'data', status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Data not available', u'location': u'body', u'name': u'data'} ]) response = self.app.post_json( request_path, {'not_data': {}}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Data not available', u'location': u'body', u'name': u'data'} ]) response = self.app.post_json(request_path, {'data': { 'invalid_field': 'invalid_value'}}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Rogue field', u'location': u'body', u'name': u'invalid_field'} ]) response = self.app.post_json(request_path, { 'data': {'tenderers': [{'identifier': 'invalid_value'}]}}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': {u'identifier': [ u'Please use a mapping for this field or Identifier instance instead of unicode.']}, u'location': u'body', u'name': u'tenderers'} ]) response = self.app.post_json(request_path, { 'data': {'tenderers': [{'identifier': {}}]}}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertIn({u"location": u"body", u"name": u"qualified", u"description": [u"This field is required."]}, response.json['errors']) if self.initial_organization == test_financial_organization: self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'eligible'}, response.json['errors']) self.assertIn({u'description': [ {u'additionalIdentifiers': [u'This field is required.'], u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.']}, u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}, response.json['errors']) else: self.assertIn({u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.']}, u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}, response.json['errors']) response = self.app.post_json(request_path, {'data': {'tenderers': [{ 'name': 'name', 'identifier': {'uri': 'invalid_value'}}]}}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertIn({u"location": u"body", u"name": u"qualified", u"description": [u"This field is required."]}, response.json['errors']) if self.initial_organization == test_financial_organization: self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'eligible'}, response.json['errors']) self.assertIn({u'description': [ {u'additionalIdentifiers': [u'This field is required.'], u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'], u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}, response.json['errors']) else: self.assertIn({u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'], u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}, response.json['errors']) if self.initial_organization == test_financial_organization: response = self.app.post_json(request_path, { 'data': {'tenderers': [self.initial_organization], 'qualified': True, 'eligible': True}}, status=422) else: response = self.app.post_json(request_path, {'data': {'tenderers': [self.initial_organization], 'qualified': True}}, status=422) self.assertEqual(response.status, '422 Unprocessable Entity') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'}, response.json['errors']) if self.initial_organization == test_financial_organization: response = self.app.post_json(request_path, { 'data': {'tenderers': [self.initial_organization], "value": {"amount": 500, 'valueAddedTaxIncluded': False},
LLNL/spack
var/spack/repos/builtin/packages/py-azure-mgmt-appconfiguration/package.py
Python
lgpl-2.1
985
0.00203
# Co
pyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class PyAzureMgmtAppconfiguration(PythonPackage): """Microsoft Azure App Configuration Management Client Library for Python. """ homepage = "https://github.com/Azure/azure-sdk-for-python" py
pi = "azure-mgmt-appconfiguration/azure-mgmt-appconfiguration-0.5.0.zip" version('0.5.0', sha256='211527511d7616a383cc196956eaf2b7ee016f2367d367924b3715f2a41106da') version('0.4.0', sha256='85f6202ba235fde6be274f3dec1578b90235cf31979abea3fcfa476d0b2ac5b6') depends_on('py-setuptools', type='build') depends_on('py-msrest@0.5.0:', type=('build', 'run')) depends_on('py-msrestazure@0.4.32:1', type=('build', 'run')) depends_on('py-azure-common@1.1:1', type=('build', 'run')) depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
EnSpec/SpecDAL
specdal/tests/test_groupby.py
Python
mit
1,667
0.0024
import os import sys import numpy as np import pandas as pd import pandas.util.testing as pdt import unittest sys.path.insert(0, os.path.abspath("../../")) from specdal.spectrum import Spectrum from specdal.collection import Collection class GroupByTests(unittest.TestCase): def setUp(self): # total 36 spectra self.c = Collection(name='For Groups') for a in ('A', 'B', 'C'): for b in ('a', 'b', 'c'): for c in ('0', '1'): for d in ('0001', '0002', '0003', '0004'): self.c.append(Spectrum('_'.join([a, b, c, d]))) # print([s.name for s in self.c.spectra]) def test_grou
ps(self): groups = self.c.groupby(separator='_', indices=[0, 2]) for s in groups['A_0'].spectra: print(s.name) ''' def test_num_groups(self):
groups = self.c.groupby(separator='_', indices=[0]) self.assertEqual(len(groups), 3) groups = self.c.groupby(separator='_', indices=[1]) self.assertEqual(len(groups), 3) groups = self.c.groupby(separator='_', indices=[2]) self.assertEqual(len(groups), 4) groups = self.c.groupby(separator='_', indices=[0, 1]) self.assertEqual(len(groups), 9) groups = self.c.groupby(separator='_', indices=[0, 2]) self.assertEqual(len(groups), 12) groups = self.c.groupby(separator='_', indices=[1, 2]) self.assertEqual(len(groups), 12) groups = self.c.groupby(separator='_', indices=[0, 1, 2]) self.assertEqual(len(groups), 36) ''' def main(): unittest.main() if __name__ == '__main__': main()
simon3z/virt-deploy
virtdeploy/errors.py
Python
gpl-2.0
1,201
0
# # Copyright 2015 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor
, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # from __future__ import absolute_import class VirtDeployException(Exception): def __init__(self, message="Unknown error"): self.message = message def __str__(self): return self.message class InstanceNotFound(VirtDeployException): def __init__(self, name
): super(InstanceNotFound, self).__init__( 'No such instance: {0}'.format(name))
zkota/pyblio-1.3
Legacy/Iterator.py
Python
gpl-2.0
2,270
0.01674
# This file is part of pybliographer # # Copyright (C) 1998-2004 Frederic GOBRY # Email : gobry@pybliographer.org # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # # TODO: get rid of all of this, and use standard iterators / generators class Iterator: base = None title = "Some Selection" def iterator (self): ''' loop method, so that we can for example call a method by passing indifferently a database or a database iterator... ''' return self def __iter__ (self): retval = self.first () while retval != None: yield retval retval = self.next() raise StopIteration def set_position (self, pos=0): self._position = 0 def get_position (self): return self._position def first (self): self.set_position (0) return self.next () class DBIterator (Iterator): ''' This class defines a database iterator ''' def __init__ (self, database): self.keys = database.keys () self.base = database self.database = database self.count = 0 return def __iter__ (self): self._position = 0 for k in self.keys: yield self.database [k] self._pos
ition += 1 def first (self): self.count = 0 return self.next () def next (self
): try: entry = self.database [self.keys [self.count]] except IndexError: entry = None self.count = self.count + 1 return entry
cmunk/protwis
build_gpcr/management/commands/build_drugs.py
Python
apache-2.0
109
0.009174
from build.management.comm
ands.build_drugs import Command as BuildDru
gs class Command(BuildDrugs): pass
pajlada/pajbot
pajbot/modules/maxmsglength.py
Python
mit
4,608
0.002821
import logging from datetime import timedelta from pajbot.managers.handler import HandlerManager from pajbot.modules import BaseModule from pajbot.modules import ModuleSetting log = logging.getLogger(__name__) class MaxMsgLengthModule(BaseModule): ID = __name__.split(".")[-1] NAME = "Maximum Message Length" DESCRIPTION = "Times out users who post messages that contain too many characters." CATEGORY = "Moderation" SETTINGS = [ ModuleSetting( key="max_msg_length", label="Max message length (Online chat)", type="number", required=True, placeholder="", default=400, constraints={"min_value": 1, "max_value": 500}, ), ModuleSetting( key="max_msg_length_offline", label="Max message length (Offline chat)", type="number", required=True, placeholder="", default=400, constraints={"min_value": 1, "max_value": 500}, ), ModuleSetting( key="timeout_length", label="Timeout length", type="number", required=True, placeholder="Timeout length in seconds", default=120, constraints={"min_value": 1, "max_value": 1209600}, ), ModuleSetting( key="bypass_level", label="Level to bypass module", type="number", required=True, placeholder="", default=500, constraints={"min_value": 100, "max_value": 1000}, ), ModuleSetting( key="timeout_reason", label="Timeout Reason", type="text", required=False, placeholder="", default="Message too long", constraints={}, ), ModuleSetting( key="whisper_timeout_reason", label="Whisper Timeout Reason | Available arguments: {punishment}", type="text", required=False, placeholder="", default="You have been {punishment} because your message was too long.", constraints={}, ), ModuleSetting( key="disable_warnings", label="Disable warning timeouts", type="boolean", required=True, default=False, ), ] def on_message(self, source, message, whisper, **rest): if whisper: return if source.level >= self.settings["bypass_level"] or source.moderator: return if self.bot.is_online: if len(message) > self.settings["max_msg_length"]: if self.set
tings["disable_warnings"] is True: self.bot.timeout(source, self.settings["timeout_length"], reason=self.settings["timeout_reason"]) else: duration, punishment = self.bot.timeout_warn( source, self.settings["timeout_length"], reason=self.settings["timeout_reason"] ) """ We only send a notification to the user if he has spent more than
one hour watching the stream. """ if duration > 0 and source.time_in_chat_online >= timedelta(hours=1): self.bot.whisper(source, self.settings["whisper_timeout_reason"].format(punishment=punishment)) return False else: if len(message) > self.settings["max_msg_length_offline"]: if self.settings["disable_warnings"] is True: self.bot.timeout(source, self.settings["timeout_length"], reason=self.settings["timeout_reason"]) else: duration, punishment = self.bot.timeout_warn( source, self.settings["timeout_length"], reason=self.settings["timeout_reason"] ) """ We only send a notification to the user if he has spent more than one hour watching the stream. """ if duration > 0 and source.time_in_chat_online >= timedelta(hours=1): self.bot.whisper(source, self.settings["whisper_timeout_reason"].format(punishment=punishment)) return False def enable(self, bot): HandlerManager.add_handler("on_message", self.on_message, priority=150, run_if_propagation_stopped=True) def disable(self, bot): HandlerManager.remove_handler("on_message", self.on_message)
takahashikenichi/mozc
src/build_tools/copy_qt_frameworks_mac.py
Python
bsd-3-clause
4,010
0.008229
# -*- coding: utf-8 -*- # Copyright 2010-2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Copy Qt frameworks to the target application's frameworks dir
ectory. Typical usage: % python copy_qt_frameworks.py --qtdir=/path/to/qtdir/ \ --target=/path/to/target.app/Contents/Frameworks/ """ __author__ = "horo" import optparse import os from copy_file import CopyFiles from util import PrintErrorAndExit from util import RunOrDie def ParseOption(): """Parse command line options.""" parser = optparse.OptionParser() parser.add_option('--qtdir', dest='qtdir') parser.add_option('--target', dest='target') (opts, _) = parse
r.parse_args() return opts def main(): opt = ParseOption() if not opt.qtdir: PrintErrorAndExit('--qtdir option is mandatory.') if not opt.target: PrintErrorAndExit('--target option is mandatory.') qtdir = os.path.abspath(opt.qtdir) target = os.path.abspath(opt.target) # Copies QtCore. For codesign, Info.plist should be copied to Resources/. CopyFiles(['%s/lib/QtCore.framework/Versions/4/QtCore' % qtdir], '%s/QtCore.framework/Versions/4/QtCore' % target) CopyFiles(['%s/lib/QtCore.framework/Contents/Info.plist' % qtdir], '%s/QtCore.framework/Resources/' % target) # Copies QtGui. For codesign, Info.plist should be copied to Resources/. CopyFiles(['%s/lib/QtGui.framework/Versions/4/QtGui' % qtdir], '%s/QtGui.framework/Versions/4/QtGui' % target) CopyFiles(['%s/lib/QtGui.framework/Contents/Info.plist' % qtdir], '%s/QtGui.framework/Resources/' % target) # Copies Resources of QtGui CopyFiles(['%s/lib/QtGui.framework/Versions/4/Resources' % qtdir], '%s/QtGui.framework/Resources' % target, recursive=True) # Changes QtGui id cmd = ["install_name_tool", "-id", "@executable_path/../Frameworks/QtGui.framework/Versions/4/QtGui", "%s/QtGui.framework/Versions/4/QtGui" % target] RunOrDie(cmd) # Changes QtCore id cmd = ["install_name_tool", "-id", "@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore", '%s/QtCore.framework/Versions/4/QtCore' % target] RunOrDie(cmd) # Changes the reference to QtCore framework from QtGui cmd = ["install_name_tool", "-change", "%s/lib/QtCore.framework/Versions/4/QtCore" % qtdir, "@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore", "%s/QtGui.framework/Versions/4/QtGui" % target] RunOrDie(cmd) if __name__ == '__main__': main()
ibc/MediaSoup
worker/deps/gyp/test/external-cross-compile/src/tochar.py
Python
isc
317
0.003155
#!/usr/bin/python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is go
verned by a BSD-style license that can be # found in the LICENSE file. import sys src = open(sys.argv[1])
dst = open(sys.argv[2], 'w') for ch in src.read(): dst.write('%d,\n' % ord(ch)) src.close() dst.close()
googleapis/python-data-qna
samples/generated_samples/dataqna_generated_dataqna_v1alpha_question_service_create_question_sync.py
Python
apache-2.0
1,661
0.000602
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this
file except in compliance with the License. # You may obtain a copy of the
License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateQuestion # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dataqna # [START dataqna_generated_dataqna_v1alpha_QuestionService_CreateQuestion_sync] from google.cloud import dataqna_v1alpha def sample_create_question(): # Create a client client = dataqna_v1alpha.QuestionServiceClient() # Initialize request argument(s) question = dataqna_v1alpha.Question() question.scopes = ['scopes_value_1', 'scopes_value_2'] question.query = "query_value" request = dataqna_v1alpha.CreateQuestionRequest( parent="parent_value", question=question, ) # Make the request response = client.create_question(request=request) # Handle the response print(response) # [END dataqna_generated_dataqna_v1alpha_QuestionService_CreateQuestion_sync]
kchodorow/tensorflow
tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_variables_test.py
Python
apache-2.0
6,122
0.00539
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stochastic graphs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import distributions from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor from tensorflow.contrib.bayesflow.python.ops import stochastic_variables from tensorflow.contrib.bayesflow.python.ops import variational_inference from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test sv = stochastic_variables st = stochastic_tensor vi = variational_inference dist = distributions class StochasticVariablesTest(test.TestCase): def testStochasticVariables(self): shape = (10, 20) with variable_scope.variable_scope( "stochastic_variables", custom_getter=sv.make_stochastic_variable_getter( dist_cls=dist.NormalWithSoftplusScale)): v = variable_scope.get_variable("sv", shape) self.assertTrue(isinstance(v, st.StochasticTensor)) self.assertTrue(isinstance(v.distribution, dist.NormalWithSoftplusScale)) self.assertEqual( {"stochastic_variables/sv_loc", "stochastic_variables/sv_scale"}, set([v.op.name for v in variables.global_variables()])) self.assertEqual( set(variables.trainable_variables()), set(variables.global_variables())) v = ops.convert_to_tensor(v) self.assertEqual(list(shape), v.get_shape().as_list()) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) self.assertEqual(shape, sess.run(v).shape) def testStochasticVariablesWithConstantInitializer(self): shape = (10, 20) with variable_scope.variable_scope( "stochastic_variables", custom_getter=sv.make_stochastic_variable_getter( dist_cls=dist.NormalWithSoftplusScale, dist_kwargs={"validate_args": True}, param_initializers={ "loc": np.ones(shape) * 4., "scale": np.ones(shape) * 2. })): v = variable_scope.get_variable("sv") for var in variables.global_variables(): if "loc" in var.name: mu_var = var if "scale" in var.name: sigma_var = var v = ops.convert_to_tensor(v) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var)) self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var)) self.assertEqual(shape, sess.run(v).shape) def testStochasticVariablesWithCallableInitializer(self): shape = (10, 20) def sigma_init(shape, dtype, partition_info): _ = partition_info return array_ops.ones(shape, dtype=dtype) * 2. with variable_scope.variable_scope( "stochastic_variables", custom_getter=sv.make_stochastic_variable_getter( dist_cls=dist.NormalWithSoftplusScale, dist_kwargs={"validate_args": True}, param_initializers={ "loc": np.ones( shape, dtype=np.float32) * 4., "scale": sigma_init })): v = variable_scope.get_variable("sv", shape) for var in variables.global_variables(): if "loc" in var.name: mu_var = var if "scale" in var.name: sigma_var = var v = ops.convert_to_tensor(v) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var)) self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var)) self.assertEqual(shape, sess.run(v).shape) def testStochasticVariablesWithPrior(self): shape = (10, 20) prior = dist.Normal(0., 1.) with variable_scope.variable_scope( "stochastic_variables", custom_getter=sv.make_stochastic_variable_getter( dist_cls=dist.NormalWithSoftplusScale, prior=prior)): w = variable_scope.get_variable("weights", shape) x = random_ops.random_uniform((8, 10)) y = math_ops.matmul(x, w) prior_map = vi._find_variational_and_priors(y, None) self.assertEqual(prior_map[w], prior) elbo = vi.elbo(y, keep_batch_dim=False) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) sess.run(elbo) def testStochasticVariablesWithCallablePriorInitializer(self): def prior_init(shape, dtype): return dist.Normal( array_ops.zeros(shape, dtype), array_ops.ones(shape, dtype)) with variable_scop
e.variable_scope( "stochastic_variables", custom_getter=sv.make_stochastic_variable_getter( dist_cls=dist.
NormalWithSoftplusScale, prior=prior_init)): w = variable_scope.get_variable("weights", (10, 20)) x = random_ops.random_uniform((8, 10)) y = math_ops.matmul(x, w) prior_map = vi._find_variational_and_priors(y, None) self.assertTrue(isinstance(prior_map[w], dist.Normal)) elbo = vi.elbo(y, keep_batch_dim=False) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) sess.run(elbo) if __name__ == "__main__": test.main()
kanellov/openstack_project_create
openstack_project_create/receivers.py
Python
mit
162
0.006173
fro
m django.dispatch import receiver from signals import sch_create_project @receiver(project_created) def project_networking(sender, **kwargs): return T
rue
UdK-VPT/Open_eQuarter
mole/extensions/eval_present_heritage/oeq_UPH_Wall.py
Python
gpl-2.0
2,921
0.008216
# -*- coding: utf-8 -*- import os,math from qgis.core import NULL from mole import oeq_global from mole.project import config from mole.extensions import OeQExtension from mole.stat_corr import rb_present_wall_uvalue_AVG_by_building_age_lookup, nrb_present_wall_uvalue_by_building_age_lookup, rb_contemporary_wall_uvalue_by_building_age_lookup, nrb_contemporary_wall_uvalue_by_building_age_lookup def calculation(self=None, parameters={},feature = None): from math import floor, ceil from PyQt4.QtCore import QVariant wl_uph = NULL #differentiation between RB and NRB (for now in case of contemporary U-Values RB=NRB. After getting NRB data for contemporary case code must be adaptet)
if parameters['BLD_USAGE'] == "RB": if pa
rameters['HERIT_STAT'] == "0": if not oeq_global.isnull(parameters['YOC']): wl_uph = rb_present_wall_uvalue_AVG_by_building_age_lookup.get(parameters['YOC']) else: if not oeq_global.isnull(parameters['YOC']): wl_uph = rb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC']) elif parameters['BLD_USAGE'] == "NRB": if parameters['HERIT_STAT'] == "0": if not oeq_global.isnull(parameters['YOC']): wl_uph = nrb_present_wall_uvalue_by_building_age_lookup.get(parameters['YOC']) else: if not oeq_global.isnull(parameters['YOC']): wl_uph = nrb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC']) else: if parameters['HERIT_STAT'] == "0": if not oeq_global.isnull(parameters['YOC']): wl_uph = (((rb_present_wall_uvalue_AVG_by_building_age_lookup.get(parameters['YOC'])) + ( nrb_present_wall_uvalue_by_building_age_lookup.get(parameters['YOC']))) / 2) else: if not oeq_global.isnull(parameters['YOC']): wl_uph = (((rb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC'])) + ( nrb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC']))) / 2) return {'WL_UPH': {'type': QVariant.Double, 'value': wl_uph}} extension = OeQExtension( extension_id=__name__, category='Evaluation', subcategory='U-Values Present Heritage', extension_name='Wall Quality (U_Value, Present Heritage)', layer_name= 'U Wall Present Heritage', extension_filepath=os.path.join(__file__), colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'), field_id='WL_UPH', source_type='none', par_in=['YOC','BLD_USAGE','HERIT_STAT'], sourcelayer_name=config.data_layer_name, targetlayer_name=config.data_layer_name, active=True, show_results=['WL_UPH'], description=u"Calculate the present heritage U-Value of the Building's wall", evaluation_method=calculation) extension.registerExtension(default=True)
teppchan/tkintertips
py/label/align.py
Python
mit
166
0.024096
#!/usr/bin/env python import Tkinter as Tk roo
t=Tk.Tk() l=Tk.Label(root, text="Python\nPerl\n
C", justify="right", ) l.pack() root.mainloop()
vls/python_utils
log_handlers.py
Python
unlicense
1,677
0.003578
from logging.handlers import BaseRotatingHandler import string import time import datetime import os class TimePatternRotatingHandler(BaseRotatingHandler
): def __init__(self, filename, when, encoding=None, delay=0): self.when = string.upper(when) self.fname_pat = filename self.mock_dt = None self.computeNextRollover() BaseRotatingHandler.__init__(self, self.filename, 'a', encoding, delay) def get_now_dt(self):
if self.mock_dt is not None: return self.mock_dt return datetime.datetime.now() def computeNextRollover(self): now = self.get_now_dt() if self.when == 'MONTH': dtfmt = '%Y-%m' dt = (now.replace(day=1) + datetime.timedelta(days=40)).replace(day=1, hour=0, minute=0, second=0) rolloverAt = time.mktime(dt.timetuple()) elif self.when == 'DAY': dtfmt = '%Y-%m-%d' dt = (now + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0) rolloverAt = time.mktime(dt.timetuple()) self.rolloverAt = rolloverAt self.dtfmt = dtfmt self.filename = os.path.abspath(self.fname_pat % (now.strftime(self.dtfmt))) #print now, self.filename def shouldRollover(self, record): now = self.get_now_dt() t = time.mktime(now.timetuple()) #print t, self.rolloverAt if t >= self.rolloverAt: return 1 return 0 def doRollover(self): if self.stream: self.stream.close() self.computeNextRollover() self.baseFilename = self.filename self.stream = self._open()
Williams224/davinci-scripts
kstaretappipig/MC_12_MagDown_kstar_rho_kpipipi.py
Python
mit
12,721
0.027121
#-- GAUDI jobOptions generated on Wed Jun 10 17:31:51 2015 #-- Contains event types : #-- 11104041 - 117 files - 2010995 events - 432.61 GBytes #-- Extra information about the data processing phases: #-- Processing Pass Step-124834 #-- StepId : 124834 #-- StepName : Reco14a for MC #-- ApplicationName : Brunel #-- ApplicationVer
sion : v43r2p7 #-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py #-- DDDB : fromPreviousStep #-- CONDDB : fromPreviousStep #-- ExtraPackages : AppConfig.v3r164 #-- Visible : Y #-- Processing Pass Step-124620 #-- StepId : 124620 #-- StepName : Digi13 with G4 dE/dx #--
ApplicationName : Boole #-- ApplicationVersion : v26r3 #-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py #-- DDDB : fromPreviousStep #-- CONDDB : fromPreviousStep #-- ExtraPackages : AppConfig.v3r164 #-- Visible : Y #-- Processing Pass Step-124632 #-- StepId : 124632 #-- StepName : TCK-0x409f0045 Flagged for Sim08 2012 #-- ApplicationName : Moore #-- ApplicationVersion : v14r8p1 #-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py #-- DDDB : fromPreviousStep #-- CONDDB : fromPreviousStep #-- ExtraPackages : AppConfig.v3r164 #-- Visible : Y #-- Processing Pass Step-124630 #-- StepId : 124630 #-- StepName : Stripping20-NoPrescalingFlagged for Sim08 #-- ApplicationName : DaVinci #-- ApplicationVersion : v32r2p1 #-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py #-- DDDB : fromPreviousStep #-- CONDDB : fromPreviousStep #-- ExtraPackages : AppConfig.v3r164 #-- Visible : Y #-- Processing Pass Step-125577 #-- StepId : 125577 #-- StepName : Sim08a - 2012 - MD - Pythia8 #-- ApplicationName : Gauss #-- ApplicationVersion : v45r3 #-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py #-- DDDB : Sim08-20130503-1 #-- CONDDB : Sim08-20130503-1-vc-md100 #-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r11 #-- Visible : Y from Gaudi.Configuration import * from GaudiConf import IOHelper IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000001_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000002_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000003_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000004_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000005_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000006_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000007_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000008_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000009_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000010_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000011_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000012_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000013_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000014_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000015_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000016_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000017_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000018_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000019_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000020_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000021_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000022_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000023_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000024_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000025_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000026_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000027_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000028_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000029_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000030_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000031_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000032_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000033_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000034_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000035_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000036_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000037_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000038_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000039_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000040_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000041_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000042_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000043_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000044_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000045_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000046_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000047_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000048_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000049_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000050_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000051_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000052_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000053_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000054_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000055_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000056_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000057_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000058_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000059_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000060_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000061_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000062_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000063_1.allstreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_000000
Stiivi/brewery
examples/merge_multiple_files/merge_multiple_files.py
Python
mit
2,107
0.005695
""" Brewery Example - merge multiple CSV files Input: Multiple CSV files with different fields, but with common subset of fields. Output: Single CSV file with all fields from all files and with additional column with origin file name Run: $ python merge_multiple_files.py Afterwards display the CSV file: $ cat merged.csv | brewery pipe pretty_printer And see the field completeness (data quality dimension): $ cat merged.csv | brewery pipe audit pretty_printer """ import brewery from brewery import ds import sys # List of sources - you might want to keep this list in a json file sources = [ {"file": "grants_2008.csv", "fields": ["receiver", "amount", "date"]}, {"file": "grants_2009.csv", "fields": ["id", "receiver", "amount", "contract_number", "date"]}, {"file": "grants_2010.csv", "fields": ["receiver", "subject", "requested_amount", "amount", "date"]} ] # Create list of all fields and add filename to store information # about origin of data records all_fields = brewery.FieldList(["file"]) # Go through source definitions and collect the fields for source in sources: for field in source["fields"]: if field not in all_fields: all_fields.append(field) # Create and initialize a data target out = ds.CSVDataTarget("merged.csv") out.fields = brewery.FieldList(all_fields) out.initialize() # Append all sources for source in sources: path
= source["file"] # Initialize data source: skip reading of headers - we are preparing them ourselves # use XLSDataSource for XLS files # We ignore the fields in the header, because we have set-up fields
# previously. We need to skip the header row. src = ds.CSVDataSource(path,read_header=False,skip_rows=1) src.fields = ds.FieldList(source["fields"]) src.initialize() for record in src.records(): # Add file reference into ouput - to know where the row comes from record["file"] = path out.append(record) # Close the source stream src.finalize()
psicobyte/ejemplos-python
cap5/p54.py
Python
gpl-3.0
77
0
#
!/usr/bin/python # -*- coding: utf-8 -
*- un_numero = 75 otro_numero = -134
hycis/TensorGraph
tensorgraph/trainobject.py
Python
apache-2.0
3,080
0.003896
from .stopper import EarlyStopper from .progbar import ProgressBar from .utils import split_arr from .data_iterator import SequentialIterator from tensorflow.python.framework import ops import tensorflow as tf import logging logging.basicConfig(format='%(module)s.%(funcName)s %(lineno)d:%(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def train(session, feed_dict, train_cost_sb, valid_cost_sb, optimizer, epoch_look_back=5, max_epoch=100, percent_decrease=0, train_valid_ratio=[5,1], batchsize=64, randomize_split=False): """ Example training object for training a dataset """ train_arrs = [] valid_arrs = [] phs = [] for ph, arr in feed_dict.items(): train_arr, valid_arr = split_arr(arr, train_valid_ratio, randomize=randomize_split) phs.append(ph) train_arrs.append(train_arr) valid_arrs.append(valid
_arr) iter_train = SequentialIterator(*train_arrs, batchsize=batchsize) iter_valid = SequentialIterator(*valid_arrs
, batchsize=batchsize) es = EarlyStopper(max_epoch, epoch_look_back, percent_decrease) # required for BatchNormalization layer update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS) with ops.control_dependencies(update_ops): train_op = optimizer.minimize(train_cost_sb) init = tf.global_variables_initializer() session.run(init) epoch = 0 while True: epoch += 1 ##############################[ Training ]############################## print('\n') logger.info('<<<<<[ epoch: {} ]>>>>>'.format(epoch)) logger.info('..training') pbar = ProgressBar(len(iter_train)) ttl_exp = 0 mean_train_cost = 0 for batches in iter_train: fd = dict(zip(phs, batches)) train_cost, _ = session.run([train_cost_sb, train_op], feed_dict=fd) mean_train_cost += train_cost * len(batches[0]) ttl_exp += len(batches[0]) pbar.update(ttl_exp) print('') mean_train_cost /= ttl_exp logger.info('..average train cost: {}'.format(mean_train_cost)) ##############################[ Validating ]############################ logger.info('..validating') pbar = ProgressBar(len(iter_valid)) ttl_exp = 0 mean_valid_cost = 0 for batches in iter_valid: fd = dict(zip(phs, batches)) valid_cost = session.run(valid_cost_sb, feed_dict=fd) mean_valid_cost += valid_cost * len(batches[0]) ttl_exp += len(batches[0]) pbar.update(ttl_exp) print('') mean_valid_cost /= ttl_exp logger.info('..average valid cost: {}'.format(mean_valid_cost)) if es.continue_learning(mean_valid_cost, epoch=epoch): logger.info('best epoch last update: {}'.format(es.best_epoch_last_update)) logger.info('best valid last update: {}'.format(es.best_valid_last_update)) else: logger.info('training done!') break
arju88nair/projectCulminate
venv/lib/python3.5/plat-x86_64-linux-gnu/_sysconfigdata_m.py
Python
apache-2.0
21,469
0.032559
# system configuration generated and used by the sysconfig module build_time_vars = {'ABIFLAGS': 'm', 'AC_APPLE_UNIVERSAL_BUILD': 0, 'AIX_GENUINE_CPLUSPLUS': 0, 'AR': 'x86_64-linux-gnu-gcc-ar', 'ARFLAGS': 'rc', 'ASDLGEN': 'python3.5 ../Parser/asdl_c.py', 'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py', 'AST_ASDL': '../Parser/Python.asdl', 'AST_C': 'Python/Python-ast.c', 'AST_C_DIR': 'Python', 'AST_H': 'Include/Python-ast.h', 'AST_H_DIR': 'Include', 'BASECFLAGS': '-Wno-unused-result -Wsign-compare', 'BASECPPFLAGS': '', 'BASEMODLIBS': '', 'BINDIR': '/usr/bin', 'BINLIBDEST': '/usr/lib/python3.5', 'BLDLIBRARY': '-lpython3.5m', 'BLDSHARED': 'x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 ' '-Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro', 'BUILDEXE': '', 'BUILDPYTHON': 'python', 'BUILD_GNU_TYPE': 'x86_64-unknown-linux-gnu', 'BYTESTR_DEPS': '\\', 'CC': 'x86_64-linux-gnu-gcc -pthread', 'CCSHARED': '-fPIC', 'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall ' '-Wstrict-prototypes -g -fstack-protector-strong -Wformat ' '-Werror=format-security -g -flto -fuse-linker-plugin ' '-ffat-lto-objects', 'CFLAGSFORSHARED': '-fPIC', 'CFLAGS_NODIST': '', 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in ' 'Makefile.pre.in', 'CONFIGURE_CFLAGS': '-g -fstack-protector-strong -Wformat ' '-Werror=format-security', 'CONFIGURE_CFLAGS_NODIST': '-Werror=declaration-after-statement', 'CONFIGURE_CPPFLAGS': '-Wdate-time -D_FORTIFY_SOURCE=2', 'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' " "'--enable-loadable-sqlite-extensions' " "'--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' " "'--without-ensurepip' '--with-system-expat' " "'--with-system-libmpdec' '--with-system-ffi' '--with-fpectl' " "'CC=x86_64-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector-strong " "-Wformat -Werror=format-security ' " "'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' " "'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2'", 'CONFINCLUDEDIR': '/usr/include', 'CONFINCLUDEPY': '/usr/include/python3.5m', 'COREPYTHONPATH': ':plat-x86_64-linux-gnu', 'COVERAGE_INFO': '/build/python3.5-moRWPp/python3.5-3.5.2/build-shared/coverage.info', 'COVERAGE_REPORT': '/build/python3.5-moRWPp/python3.5-3.5.2/build-shared/lcov-report', 'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov ' 'report"', 'CPPFLAGS': '-I. -IInclude -I../Include -Wdate-time -D_FORTIFY_SOURCE=2', 'CXX': 'x86_64-linux-gnu-g++ -pthread', 'DESTDIRS': '/usr /usr/lib /usr/lib/python3.5 /usr/lib/python3.5/lib-dynload', 'DESTLIB': '/usr/lib/python3.5', 'DESTPATH': '', 'DESTSHARED': '/usr/lib/python3.5/lib-dynload', 'DIRMODE': 755, 'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in ' 'Makefile.pre.in Include Lib Misc Ext-dummy', 'DISTDIRS': 'Include Lib Misc Ext-dummy', 'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h ' 'pyconfig.h.in Makefile.pre.in', 'DLINCLDIR': '.', 'DLLLIBRARY': '', 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, 'DYNLOADFILE': 'dynload_shlib.o', 'ENABLE_IPV6': 1, 'ENSUREPIP': 'no', 'EXE': '', 'EXEMODE': 755, 'EXTRAMACHDEPPATH': '', 'EXTRATESTOPTS': '', 'EXT_SUFFIX': '.cpython-35m-x86_64-linux-gnu.so', 'FILEMODE': 644, 'FLOCK_NEEDS_LIBBSD': 0, 'GETPGRP_HAVE_ARG': 0, 'GETTIMEOFDAY_NO_TZ': 0, 'GNULD': 'yes', 'GRAMMAR_C': 'Python/graminit.c', 'GRAMMAR_H': 'Include/graminit.h', 'GRAMMAR_INPUT': '../Grammar/Grammar', 'HAVE_ACCEPT4': 1, 'HAVE_ACOSH': 1, 'HAVE_ADDRINFO': 1, 'HAVE_ALARM': 1, 'HAVE_ALIGNED_REQUIRED': 0, 'HAVE_ALLOCA_H': 1, 'HAVE_ALTZONE': 0, 'HAVE_ASINH': 1, 'HAVE_ASM_TYPES_H': 1, 'HAVE_ATANH': 1, 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, 'HAVE_BLUETOOTH_BLUETOOTH_H': 1, 'HAVE_BLUETOOTH_H': 0, 'HAVE_BROKEN_MBSTOWCS': 0, 'HAVE_BROKEN_NICE': 0, 'HAVE_BROKEN_PIPE_BUF': 0, 'HAVE_BROKEN_POLL': 0, 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, 'HAVE_BROKEN_SEM_GETVALUE': 0, 'HAVE_BROKEN_UNSETENV': 0, 'HAVE_BUILTIN_ATOMIC': 1, 'HAVE_C99_BOOL': 1, 'HAVE_CHFLAGS': 0, 'HAVE_CHOWN': 1, 'HAVE_CHROOT': 1, 'HAVE_CLOCK': 1, 'HAVE_CLOCK_GETRES': 1, 'HAVE_CLOCK_GETTIME': 1, 'HAVE_COMPUTED_GOTOS': 1, 'HAVE_CONFSTR': 1, 'HAVE_CONIO_H': 0, 'HAVE_COPYSIGN': 1, 'HAVE_CTERMID': 1, 'HAVE_CTERMID_R': 0, 'HAVE_CURSES_H': 1, 'HAVE_CURSES_IS_TERM_RESIZED': 1, 'HAVE_CURSES_RESIZETERM': 1, 'HAVE_CURSES_RESIZE_TERM': 1, 'HAVE_DECL_ISFINITE': 1, 'HAVE_DECL_ISINF': 1, 'HAVE_DECL_ISNAN': 1, 'HAVE_DECL_TZNAME': 0, 'HAVE_DEVICE_MACROS': 1, 'HAVE_DEV_PTC': 0, 'HAVE_DEV_PTMX': 1, 'HAVE_DIRECT_H': 0, 'HAVE_DIRENT_D_TYPE': 1, 'HAVE_DIRENT_H': 1, 'HAVE_DIRFD': 1, 'HAVE_DLFCN_H': 1, 'HAVE_DLOPEN': 1, 'HAVE_DUP2': 1, 'HAVE_DUP3': 1, 'HAVE_DYNAMIC_LOADING': 1, 'HAVE_ENDIAN_H': 1, 'HAVE_EPOLL': 1, 'HAVE_EPOLL_CREATE1': 1, 'HAVE_ERF': 1, 'HAVE_ERFC': 1, 'HAVE_ERRNO_H': 1, 'HAVE_EXECV': 1, 'HAVE_EXPM1': 1, 'HAVE_FACCESSAT': 1, 'HAVE_FCHDIR': 1, 'HAVE_FCHMOD': 1, 'HAVE_FCHMODAT': 1, 'HAVE_FCHOWN': 1, 'HAVE_FCHOWNAT': 1, 'HAVE_FCNTL_H': 1, 'HAVE_FDATASYNC': 1, 'HAVE_FDOPENDIR': 1, 'HAVE_FEXECVE': 1, 'HAVE_FINITE': 1, 'HAVE_FLOCK': 1, 'HAVE_FORK': 1, 'HAVE_FORKPTY': 1, 'HAVE_FPATHCONF': 1, 'HAVE_FSEEK64': 0, 'HAVE_FSEEKO': 1, 'HAVE_FSTATAT': 1, 'HAVE_FSTATVFS': 1, 'HAVE_FSYNC': 1, 'HAVE_FTELL64': 0, 'HAVE_FTELLO': 1, 'HAVE_FTIME': 1, 'HAVE_FTRUNCATE': 1, 'HAVE_FUTIMENS': 1, 'HAVE_FUTIMES': 1, 'HAVE_FUTIMESAT': 1, 'HAVE_GAI_STRERROR': 1, 'HAVE_GAMMA': 1, 'HAVE_GCC_ASM_FOR_MC68881': 0, 'HAVE_GCC_ASM_FOR_X64': 1, 'HAVE_GCC_ASM_FOR_X87': 1, 'HAVE_GCC_UINT128_T': 1, 'HAVE_GETADDRINFO': 1, 'HAVE_GETC_UNLOCKED': 1, 'HAVE_GETENTROPY': 0, 'HAVE_GETGROUPLIST': 1, 'HAVE_GETGROUPS': 1, 'HAVE_GETHOSTBYNAME': 0, 'HAVE_GETHOSTBYNAME_R': 1, 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, 'HAVE_GETITIMER': 1, 'HAVE_GETLOADAVG': 1, 'HAVE_GETLOGIN': 1, 'HAVE_GETNAMEINFO': 1, 'HAVE_GETPAGESIZE': 1, 'HAVE_GETPEERNAME': 1, 'HAVE_GETPGID': 1, 'HAVE_GETPGRP': 1, 'HAVE_GETPID': 1, 'HAVE_GETPRIORITY': 1, 'HAVE_GETPWENT': 1, 'HAVE_GETRANDOM': 0, 'HAVE_GETRANDOM_SYSCALL': 1, 'HAVE_GETRESGID': 1, 'HAVE_GETRESUID': 1, 'HAVE_GETSID': 1, 'HAVE_GETSPENT': 1, 'HAVE_GETSPNAM': 1, 'HAVE_GETTIMEOFDAY': 1, 'HAVE_GETWD': 1, 'HAVE_GLIBC_MEMMOVE_BUG': 0, 'HAVE_GRP_H': 1, 'HAVE_HSTRERROR': 1, 'HAVE_HTOLE64': 1, 'HAVE_HYPOT': 1, 'HAVE_IEEEFP_H': 0, 'HAVE_IF_NAMEINDEX': 1, 'HAVE_INET_ATON': 1, 'HAVE_INET_PTON': 1, 'HAVE_INITGROUPS': 1, 'HAVE_INT32_T': 1, 'HAVE_INT64_T': 1, 'HAVE_INTTYPES_H': 1, 'HAVE_IO_H': 0, 'H
AVE_IPA_PURE_CONST_BUG': 0, 'HAVE_KILL': 1, 'HAVE_KILLPG': 1, 'HAVE_KQUEUE': 0, 'HAVE_LANGINFO_H': 1, 'HAVE_LARGEF
ILE_SUPPORT': 0, 'HAVE_LCHFLAGS': 0, 'HAVE_LCHMOD': 0, 'HAVE_LCHOWN': 1, 'HAVE_LGAMMA': 1, 'HAVE_LIBDL': 1, 'HAVE_LIBDLD': 0, 'HAVE_LIBIEEE': 0, 'HAVE_LIBINTL_H': 1, 'HAVE_LIBREADLINE': 1, 'HAVE_LIBRESOLV': 0, 'HAVE_LIBSENDFILE': 0, 'HAVE_LIBUTIL_H': 0, 'HAVE_LINK': 1, 'HAVE_LINKAT': 1, 'HAVE_LINUX_CAN_BCM_H': 1, 'HAVE_LINUX_CAN_H': 1, 'HAVE_LINUX_CAN_RAW_FD_FRAMES': 1, 'HAVE_LINUX_CAN_RAW_H': 1, 'HAVE_LINUX_NETLINK_H': 1, 'HAVE_LINUX_RANDOM_H': 1, 'HAVE_LINUX_TIPC_H': 1, 'HAVE_LOCKF': 1, 'HAVE_LOG1P': 1, 'HAVE_LOG2': 1, 'HAVE_LONG_DOUBLE': 1, 'HAVE_LONG_LONG': 1, 'HAVE_LSTAT': 1, 'HAVE_LUTIMES': 1, 'HAVE_MAKEDEV': 1, 'HAVE_MBRTOWC': 1, 'HAVE_MEMMOVE': 1, 'HAVE_MEMORY_H': 1, 'HAVE_MEMRCHR': 1, 'HAVE_MKDIRAT': 1, 'HAVE_MKFIFO': 1, 'HAVE_MKFIFOAT': 1, 'HAVE_MKNOD': 1, 'HAVE_MKNODAT': 1, 'HAVE_MKTIME': 1, 'HAVE_MMAP': 1, 'HAVE_MREMAP': 1, 'HAVE_NCURSES_H': 1, 'HAVE_NDIR_H': 0, 'HAVE_NETPACKET_PACKET_H': 1, 'HAVE_NET_IF_H': 1, 'HAVE_NICE': 1, 'HAVE
dietrichc/streamline-ppc-reports
examples/adwords/v201406/basic_operations/add_ad_groups.py
Python
apache-2.0
2,698
0.004077
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example adds ad groups to a given campaign. To get ad groups, run get_ad_groups.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README.. Tags: AdGroupService.mutate """ __author__ = ('api.kwinter@gmail.com (Kevin Winter)' 'Joseph DiLallo') import uuid from googleads import adwords CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE' def main(client, campaign_id): # Initialize appropriate service. ad_group_service = client.GetService('AdGroupService', version='v201406') # Construct operations and add ad groups. operations = [{ 'operator': 'ADD', 'operand': { 'campaignId': campaign_id, 'name': 'Earth to Mars Cruises #%s' % uuid.uuid4(), 'status': 'ENABLED', 'biddingStrategyCo
nfiguration': { 'bids': [ { 'xsi_type': 'CpcBid', 'bid': { 'microAmount': '1000000' }, } ] } } }, { 'operator': 'ADD', 'operand': { 'campaignId': campaign_id, 'name': 'Earth
to Venus Cruises #%s' % uuid.uuid4(), 'status': 'ENABLED', 'biddingStrategyConfiguration': { 'bids': [ { 'xsi_type': 'CpcBid', 'bid': { 'microAmount': '1000000' } } ] } } }] ad_groups = ad_group_service.mutate(operations) # Display results. for ad_group in ad_groups['value']: print ('Ad group with name \'%s\' and id \'%s\' was added.' % (ad_group['name'], ad_group['id'])) if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, CAMPAIGN_ID)
t3dev/odoo
addons/website_slides_survey/models/slide_channel.py
Python
gpl-3.0
488
0.004098
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details
. from odoo import fields, models class Channel(models.Model): _inherit = 'slide.channel' nbr_certification = fields.Integer("Number of Certifications", compute='_compute_slides_statistics', store=True) class Category(models.Model): _inherit = 'slide.category' nbr_certification = fields.Integer("Number of Certifications", compute='_count_presentations', store=Tru
e)
mvidalgarcia/indico
indico/modules/users/models/suggestions.py
Python
mit
2,186
0.002287
# This file is part of Indico. # Copyright (C) 2002 - 2019 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from indico.core.db import db from indico.util.string import format_repr, return_ascii class SuggestedCategory(db.Model): __tablename__ = 'suggested_categories' __table_args__ = {'schema': 'users'} user_id = db.Column( db.Integer, db.ForeignKey('users.users.id'), primary_key=True, index=True, autoincrement=False ) category_id = db.Column( db.Integer, db.ForeignKey('categories.categories.id'), primary_key=True, index=True, autoincreme
nt=False ) is_ignored = db.Column(
db.Boolean, nullable=False, default=False ) score = db.Column( db.Float, nullable=False, default=0 ) category = db.relationship( 'Category', lazy=False, backref=db.backref( 'suggestions', lazy=True, cascade='all, delete-orphan' ) ) # relationship backrefs: # - user (User.suggested_categories) @return_ascii def __repr__(self): return format_repr(self, 'user_id', 'category_id', 'score', is_ignored=False) @classmethod def merge_users(cls, target, source): """Merge the suggestions for two users. :param target: The target user of the merge. :param source: The user that is being merged into `target`. """ target_suggestions = {x.category: x for x in target.suggested_categories} for suggestion in source.suggested_categories: new_suggestion = target_suggestions.get(suggestion.category) or cls(user=target, category=suggestion.category) new_suggestion.score = max(new_suggestion.score, suggestion.score) new_suggestion.is_ignored = new_suggestion.is_ignored or suggestion.is_ignored db.session.flush()
robocomp/learnbot
learnbot_dsl/learnbotCode/VisualBlock.py
Python
gpl-3.0
29,715
0.00313
from __future__ import print_function, absolute_import from PySide2 import QtGui,QtCore,QtWidgets from math import * import pickle, os, json import learnbot_dsl.guis.EditVar as EditVar from learnbot_dsl.learnbotCode.Block import * from learnbot_dsl.learnbotCode.Language import getLanguage from learnbot_dsl.learnbotCode.toQImage import * from learnbot_dsl.learnbotCode.Parser import parserLearntBotCodeOnlyUserFuntion from learnbot_dsl.blocksConfig import pathImgBlocks from learnbot_dsl.learnbotCode import getAprilTextDict class KeyPressEater(QtCore.QObject): def eventFilter(self, obj, event): if isinstance(event, QtGui.QMouseEvent) and event.buttons() & QtCore.Qt.RightButton: return True return False def toLBotPy(inst, ntab=1, offset=0): text = inst[0] if inst[1]["TYPE"] in [USERFUNCTION, LIBRARY]: text = inst[0] + "()" else: inst[1]["VISUALBLOCK"].startOffset = offset if inst[1]["TYPE"] is CONTROL: if inst[1]["VARIABLES"] is not None: text = inst[0] + "(" for var in inst[1]["VARIABLES"]: text += var + ", " text = text[0:-2] + "" text += ")" if inst[1]["TYPE"] is FUNTION: text = "function." + inst[0] + "(" if inst[1]["VARIABLES"] is not None: for var in inst[1]["VARIABLES"]: text += var + ", " text = text[0:-2] + "" text += ")" elif inst[1]["TYPE"] is VARIABLE: text = inst[0] if inst[1]["VARIABLES"] is not None: text += " = " for var in inst[1]["VARIABLES"]: text += var if inst[1]["RIGHT"] is not None: text += " " text += toLBotPy(inst[1]["RIGHT"], ntab, len(text) + offset) if inst[1]["BOTTOMIN"] is not None: text += ":\n" + "\t" * ntab text += toLBotPy(inst[1]["BOTTOMIN"], ntab + 1, len(text) + offset) if inst[0] in ["while", "while True"]: text += "\n" + "\t" * (ntab - 1) + "end" if inst[0] == "else" or (inst[0] in ["if", "elif"] and (inst[1]["BOTTOM"] is None or ( inst[1]["BOTTOM"] is not None and inst[1]["BOTTOM"][0] not in ["elif", "else"]))): text += "\n" + "\t" * (ntab - 1) + "end" inst[1]["VISUALBLOCK"].endOffset = len(text)-1 + offset if inst[1]["BOTTOM"] is not None: text += "\n" + "\t" * (ntab - 1) text += toLBotPy(inst[1]["BOTTOM"], ntab, len(text) + offset) return text def EuclideanDist(p1, p2): p = p1 - p2 return sqrt(pow(p.x(), 2) + pow(p.y(), 2)) class VarGui(QtWidgets.QDialog, EditVar.Ui_Dialog): def init(self): self.setupUi(self) def getTable(self): return self.table def setSlotToDeleteButton(self, fun): self.deleteButton.clicked.connect(fun) self.okButton.clicked.connect(self.close) class VisualBlock(QtWidgets.QGraphicsPixmapItem, QtWidgets.QWidget): def __init__(self, parentBlock, parent=None, scene=None): self.startOffset = None self.endOffset = None self._notifications = [] self.parentBlock = parentBlock self.__typeBlock = self.parentBlock
.typeBlock self.__type = self.parentBlock.type self.id = self.parentBlock.id self.connections = self.parentBlock.connections
self.highlighted = False for c in self.connections: c.setParent(self.parentBlock) self.dicTrans = parentBlock.dicTrans self.shouldUpdate = True if len(self.dicTrans) is 0: self.showtext = self.parentBlock.name else: self.showtext = self.dicTrans[getLanguage()] QtWidgets.QGraphicsPixmapItem.__init__(self) QtWidgets.QWidget.__init__(self) def foo(x): return 32 # Load Image of block im = cv2.imread(self.parentBlock.file, cv2.IMREAD_UNCHANGED) r, g, b, a = cv2.split(im) rgb = cv2.merge((r, g, b)) hsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV) h, s, v = cv2.split(hsv) h = h + self.parentBlock.hue s = s + 160 hsv = cv2.merge((h, s, v)) im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) r, g, b = cv2.split(im) self.cvImg = cv2.merge((r, g, b, a)) self.cvImg = np.require(self.cvImg, np.uint8, 'C') # if self.parentBlock.type is VARIABLE: # self.showtext = self.parentBlock.name + " "+ self.showtext img = generateBlock(self.cvImg, 34, self.showtext, self.parentBlock.typeBlock, None, self.parentBlock.type, self.parentBlock.nameControl) qImage = toQImage(img) # Al multiplicar por 0 obtenemos facilmente un ndarray inicializado a 0 # similar al original try: self.header = copy.copy(self.cvImg[0:39, 0:149]) self.foot = copy.copy(self.cvImg[69:104, 0:149]) except: pass self.img = QtGui.QPixmap(qImage) self.scene = scene self.setFlags(QtWidgets.QGraphicsItem.ItemIsMovable) self.setZValue(1) self.setPos(self.parentBlock.pos) self.scene.activeShouldSave() self.updatePixmap() self.timer = QtCore.QTimer() self.timer.timeout.connect(self.update) self.posmouseinItem = None self.DialogVar = None self.popMenu = None self.create_dialogs() self.sizeIn = 0 self.shouldUpdateConnections = False def addNotification(self, notification): tooltip = self.toolTip() if tooltip: tooltip += '<hr />' tooltip += notification.simpleHtml() self.setToolTip(tooltip) self._notifications.append(notification) def clearNotifications(self): self._notifications = [] self.setToolTip('') def notifications(self): return self._notifications def highlight(self): self.highlighted = True self.updateImg(force=True) self.updatePixmap() def unhighlight(self): self.highlighted = False self.updateImg(force=True) self.updatePixmap() def updatePixmap(self): self.setPixmap(self.img) def create_dialogs(self): if self.DialogVar is not None: del self.DialogVar vars = self.parentBlock.vars self.DialogVar = VarGui() self.DialogVar.init() self.DialogVar.setSlotToDeleteButton(self.delete) self.tabVar = self.DialogVar.getTable() self.tabVar.verticalHeader().setVisible(False) self.tabVar.horizontalHeader().setVisible(True) self.tabVar.setColumnCount(4) self.tabVar.setRowCount(len(vars)) self.tableHeader = [] #QtCore.QStringList() self.tableHeader.append(self.tr('Name')) self.tableHeader.append(self.tr('Constant')) self.tableHeader.append(self.tr('Set to')) self.tableHeader.append(self.tr('Type')) self.tabVar.setHorizontalHeaderLabels(self.tableHeader) self.tabVar.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) # i = 0 for i, var in zip(range(len(vars)),vars): try: if getLanguage() in var.translate: self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.translate[getLanguage()])) else: self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name)) except: self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name)) if var.type in ["float","int", "string"]: edit = QtWidgets.QLineEdit() if var.type == "float": edit.setValidator(QtGui.QDoubleValidator()) self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('number'))) elif var.type == "int": edit.setValidator(QtGui.QIntValidator()) self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('number'))) else: self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('string')))
nailor/filesystem
filesystem/test/test_copyonwrite_mkdir.py
Python
mit
686
0.002915
from __future__ import with_statement from nose.tools import ( eq_ as eq, ) from filesystem.test.util import ( maketemp, assert_raises, ) import errno import os import filesystem import filesystem.copyonwrite def test_mkdir(): tmp = maketemp() filesystem.copyonwrite.path(filesystem.path(tmp)).child('foo').mkdir() foo = os.path.join(tmp
, 'foo') assert not os.path.isdir(foo) def test_mkdir_bad_exists(): tmp = maketemp() p = filesystem.copyonwrite.path(filesystem.path(tmp)).child('foo') with p.open('w') as f: f.write('bar') e = assert_raises( OSError, p.mkdir,
) eq(e.errno, errno.EEXIST)
reflection/elasticsearch-dsl-py
elasticsearch_dsl/query.py
Python
apache-2.0
5,587
0.00358
from .utils import DslBase, BoolMixin, _make_dsl_class from .function import SF, ScoreFunction __all__ = [ 'Q', 'Bool', 'Boosting', 'Common', 'ConstantScore', 'DisMax', 'Filtered', 'FunctionScore', 'Fuzzy', 'FuzzyLikeThis', 'FuzzyLikeThisField', 'GeoShape', 'HasChild', 'HasParent', 'Ids', 'Indices', 'Match', 'MatchAll', 'MatchPhrase', 'MatchPhrasePrefix', 'MoreLikeThis', 'MoreLikeThisField', 'MultiMatch', 'Nested', 'Prefix', 'Query', 'QueryString', 'Range', 'Regexp', 'SF', 'ScoreFunction', 'SimpleQueryString', 'SpanFirst', 'SpanMulti', 'SpanNear', 'SpanNot', 'SpanOr', 'SpanTerm', 'Template', 'Term', 'Terms', 'TopChildren', 'Wildcard' ] def Q(name_or_query='match_all', **params): # {"match": {"title": "python"}} if isinstance(name_or_query, dict): if params: raise ValueError('Q() cannot accept parameters when passing in a dict.') if len(name_or_query) != 1: raise ValueError('Q() can only accept dict with a single query ({"match": {...}}). ' 'Instead it got (%r)' % name_or_query) name, params = name_or_query.copy().popitem() return Query.get_dsl_class(name)(**params) # MatchAll() if isinstance(name_or_query, Query): if params: raise ValueError('Q() cannot accept parameters when passing in a Query object.') return name_or_query # s.query = Q('filtered', query=s.query) if hasattr(name_or_query, '_proxied'): return name_or_query._proxied # "match", title="python" return Query.get_dsl_class(name_or_query)(**params) class Query(DslBase): _type_name = 'query' _type_shortcut = staticmethod(Q) name = None class MatchAll(Query): name = 'match_all' def __add__(self, other): return other._clone() __and__ = __rand__ = __radd__ = __add__ def __or__(self, other): return self __ror__ = __or__ EMPTY_QUERY = MatchAll() class Bool(BoolMixin, Query): name = 'bool' _param_defs = { 'must': {'type': 'query', 'multi': True}, 'should': {'type': 'query', 'multi': True}, 'must_not': {'type': 'query', 'multi': True}, } def __and__(self, other): q = self._clone() if isinstance(other, self.__class__): q.must += other.must q.must_not += other.must_not q.should = [] for qx in (self, other): min_should_match = getattr(qx, 'minimum_should_match', 0 if any((qx.must, qx.must_not)) else 1) # all subqueries are required if len(qx.should) <= min_should_match: q.must.extend(qx.should) # not all of them are required, use it and remember min_should_match elif not q.should: q.minimum_should_match = min_should_match q.should = qx.should
# not all are required, add a should list to the must with proper min_should_match else: q.must.append(Bool(should=qx.should, minimum_should_match=min_should_match)) else: q.must.append(other) return q __rand__ = __and__ # register this as Bool for Query Query._bool = Bool class FunctionScore(Query): name = 'function_score' _param_defs = { 'query': {'type': 'query'}, 'filter': {'type': 'fi
lter'}, 'functions': {'type': 'score_function', 'multi': True}, } def __init__(self, **kwargs): if 'functions' in kwargs: pass else: fns = kwargs['functions'] = [] for name in ScoreFunction._classes: if name in kwargs: fns.append({name: kwargs.pop(name)}) super(FunctionScore, self).__init__(**kwargs) QUERIES = ( # compound queries ('boosting', {'positive': {'type': 'query'}, 'negative': {'type': 'query'}}), ('constant_score', {'query': {'type': 'query'}, 'filter': {'type': 'filter'}}), ('dis_max', {'queries': {'type': 'query', 'multi': True}}), ('filtered', {'query': {'type': 'query'}, 'filter': {'type': 'filter'}}), ('indices', {'query': {'type': 'query'}, 'no_match_query': {'type': 'query'}}), # relationship queries ('nested', {'query': {'type': 'query'}}), ('has_child', {'query': {'type': 'query'}}), ('has_parent', {'query': {'type': 'query'}}), ('top_children', {'query': {'type': 'query'}}), # compount span queries ('span_first', {'match': {'type': 'query'}}), ('span_multi', {'match': {'type': 'query'}}), ('span_near', {'clauses': {'type': 'query', 'multi': True}}), ('span_not', {'exclude': {'type': 'query'}, 'include': {'type': 'query'}}), ('span_or', {'clauses': {'type': 'query', 'multi': True}}), # core queries ('common', None), ('fuzzy', None), ('fuzzy_like_this', None), ('fuzzy_like_this_field', None), ('geo_shape', None), ('ids', None), ('match', None), ('match_phrase', None), ('match_phrase_prefix', None), ('more_like_this', None), ('more_like_this_field', None), ('multi_match', None), ('prefix', None), ('query_string', None), ('range', None), ('regexp', None), ('simple_query_string', None), ('span_term', None), ('template', None), ('term', None), ('terms', None), ('wildcard', None), ) # generate the query classes dynamicaly for qname, params_def in QUERIES: qclass = _make_dsl_class(Query, qname, params_def) globals()[qclass.__name__] = qclass
barentsen/k2flix
k2flix/__init__.py
Python
mit
99
0.010101
#!/usr/bin/env python # -*- coding: utf-8 -*- from .version import __version__ from .core im
port *
maas/python-libmaas
maas/client/viscera/tests/test_boot_sources.py
Python
agpl-3.0
5,431
0.000368
"""Test for `maas.client.viscera.boot_sources`.""" import random from testtools.matchers import Equals, MatchesStructure from .. import boot_sources from ...testing import make_name_without_spaces, TestCase from ..testing import bind def make_origin(): # Create a new origin with BootSources and BootSource. The former refers # to the latter via the origin, hence why it must be bound. return bind(boot_sources.BootSources, boot_sources.BootSource) class TestBootSource(TestCase): def test__string_representation_includes_url_keyring_info_only(self): source = boot_sources.BootSource( { "url": "http://images.maas.io/ephemeral-v3/daily/", "keyring_filename": ( "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg" ), "keyring_data": "", } ) self.assertThat( repr(source), Equals( "<BootSource keyring_data=%(keyring_data)r " "keyring_filename=%(keyring_filename)r url=%(url)r>" % (source._data) ), ) def test__read(self): source_id = random.randint(0, 100) url = "http://images.maas.io/ephemeral-v3/daily/" keyring_filename = "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg" BootSource = make_origin().BootSource BootSource._handler.read.return_value = { "id": source_id, "url": url, "keyring_filename": keyring_filename, "keyring_data": "", } source = BootSource.read(source_id) BootSource._handler.read.assert_called_once_with(id=source_id) self.assertThat( source, MatchesStructure.byEquality( id=source_id, url=url, keyring_filename=keyring_filename, keyring_data="", ), ) def test__delete(self): source_id = random.randint(0, 100) BootSource = make_origin().BootSource source = BootSource( { "id": source_id, "url": "http://images.maas.io/ephemeral-v3/daily/", "keyring_filename": ( "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg" ), "keyring_data": "", } ) source.delete() BootSource._handler.delete.assert_called_once_with(id=source_id) class TestBootSources(TestCase): def test__read(self): BootSources = make_origin().BootSources BootSources._handler.read.return_value = [ {"id": random.randint(0, 9)}, {"id": random.randint(10, 19)}, ] sources = BootSources.read() self.assertEquals(2, len(sources)) def test__create_calls_create_with_keyring_filename(self): source_id = random.randint(0, 100) url = "http://images.maas.io/ephemeral-v3/daily/" keyring_filename = "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg" BootSources = make_origin().BootSources BootSources._handler.create.return_value = { "id": source_id, "url": url,
"keyring_filename": keyring_filename, "keyring_data": "", } source = BootSources.create(url, keyring_filename=keyring_filename) BootSources._handler.create.assert_called_once_with( url=url, keyring_filename=keyring_filename, keyring_data="" ) self.assertThat( source, Matche
sStructure.byEquality( id=source_id, url=url, keyring_filename=keyring_filename, keyring_data="", ), ) def test__create_calls_create_with_keyring_data(self): source_id = random.randint(0, 100) url = "http://images.maas.io/ephemeral-v3/daily/" keyring_data = make_name_without_spaces("data") BootSources = make_origin().BootSources BootSources._handler.create.return_value = { "id": source_id, "url": url, "keyring_filename": "", "keyring_data": keyring_data, } source = BootSources.create(url, keyring_data=keyring_data) BootSources._handler.create.assert_called_once_with( url=url, keyring_filename="", keyring_data=keyring_data ) self.assertThat( source, MatchesStructure.byEquality( id=source_id, url=url, keyring_filename="", keyring_data=keyring_data ), ) def test__create_calls_create_with_unsigned_url(self): source_id = random.randint(0, 100) url = "http://images.maas.io/ephemeral-v3/daily/streams/v1/index.json" BootSources = make_origin().BootSources BootSources._handler.create.return_value = { "id": source_id, "url": url, "keyring_filename": "", "keyring_data": "", } source = BootSources.create(url) BootSources._handler.create.assert_called_once_with( url=url, keyring_filename="", keyring_data="" ) self.assertThat( source, MatchesStructure.byEquality( id=source_id, url=url, keyring_filename="", keyring_data="" ), )
oscarlazoarjona/fast
fast/config.py
Python
gpl-3.0
1,912
0
# -*- coding: utf-8 -*- # *********************************************************************** # Copyright (C) 2014 - 2017 Oscar Gerardo Lazo Arjona * # <oscar.lazo@correo.nucleares.unam.mx> * # * # This file is part of FAST. * # * # FAST is free software: you can redistribute it and/or modify * # it under the terms of the GNU General Public License as published by * # the Free Software Foundation, either version 3 of the License, or * # (at your option) any later version. * # * # FAST is distributed in the hope that it will be useful, * # but WITHOUT ANY WARRANTY; without even the implied warranty of * # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # GNU General Public License for more details. * # * # You should have received a copy of the GNU General Public License * # along with FAST. If not, see <http://www.gnu.org/licenses/>. * # * # *
********************************************************************** """The basic configuration of FAST.""" from fast import __file__ # Whether to use parallelization through OpenMP. parallel = True parallel = False # Whether to use NETCDF binary files
for data communication. use_netcdf = True use_netcdf = False # An integer between 0 and 2 to control which tests are ran. run_long_tests = 0 # The install directory for FAST: fast_path = __file__[:-len("__init__.pyc")]
JamesLinEngineer/RKMC
addons/plugin.video.phstreams/resources/lib/modules/proxy.py
Python
gpl-2.0
2,896
0.013812
# -*- coding: utf-8 -*- ''' Exodus Add-on Copyright (C) 2016 Exodus This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import urllib,random from resources.lib.modules import client def request(url, check): try: result = client.request(url) if check in str(result): return result.decode('iso-8859-1').encode('utf-8') result = client.request(get() + urllib.quote_plus(url)) if check in str(result): return result.decode('iso-8859-1').encode('utf-8') result = client.request(get() + urllib.quote_plus(url)) if check in str(
result): return result.decode('iso-8859-1').encode('utf-8') except: pass def get(): return random.choice([ 'http://4freeproxy.com/browse.php?b=20&u=', 'https://www.3proxy.us/index.php?hl=2e5&q=', 'https://www.4proxy.us/index.php?hl=2e5&q=', 'http://www.accessmeproxy.
net/browse.php?b=20&u=', 'http://buka.link/browse.php?b=20&u=', 'http://fastrow.win/browse.php?b=20&u=', 'http://free-proxyserver.com/browse.php?b=20&u=', 'http://www.ipunblocker.com/browse.php?b=20&u=', 'http://www.mybriefonline.xyz/browse.php?b=20&u=', 'http://www.navigate-online.xyz/browse.php?b=20&u=', 'http://protectproxy.com/browse.php?b=20&u=', 'http://proxite.net/browse.php?b=20&u=', 'http://proxydash.com/browse.php?b=20&u=', 'http://www.proxywebsite.us/browse.php?b=20&u=', 'http://proxy-server.co/browse.php?b=20&u=', 'http://www.ruby-group.xyz/browse.php?b=20&u=', 'http://securefor.com/browse.php?b=20&u=', 'http://www.singleclick.info/browse.php?b=20&u=', 'http://www.socialcommunication.xyz/browse.php?b=20&u=', 'http://tbjr6.net/browse.php?b=20&u=', 'http://un404.com/browse.php?b=20&u=', 'http://www.unblockmyweb.com/browse.php?b=20&u=', 'http://unblockthatsite.net/ahora.php?b=20&u=', 'http://unblock-youtube.org/browse.php?b=20&u=', 'http://webproxy.stealthy.co/browse.php?b=20&u=', 'http://www.whyproxy.com/browse.php?b=20&u=', 'http://www.xxlproxy.com/index.php?hl=3e4&q=', 'http://zend2.com//open12.php?b=20&u=', 'https://zendproxy.com/bb.php?b=20&u=', 'https://zproxy.de/anon.php?b=20&u=' ])
tech-teach/microservice-topology
tasks/core/cmetrics_run_test.py
Python
mit
878
0.003417
from ctyp
es import * import json import ast NN = CDLL('./libNN.so') for distance in range(15): file_rows = open("Data/tecator.csv", 'r').read().split('\n') file_con
tent = [ float(value) for row in file_rows for value in row.split(',') if value != '' ] numfil = len(file_rows) - 1 numcol = len(file_rows[0].split(',')) file_content_c = ( (c_float * len(file_content))(*file_content) ) NN.main.restype=c_char_p print(NN.main(8, distance, file_content_c, numfil, numcol)) ''' NN.main.restype=c_char_p response = json.loads( str( NN.main(8, 0, file_content_c) ).replace("'", '"') ) response = { key.encode(): value.encode() if isinstance(value, unicode) else value for key, value in response.items() } print(response) '''
solashirai/edx-platform
lms/djangoapps/ccx/tests/test_models.py
Python
agpl-3.0
10,002
0.0007
""" tests for the models """ import json from datetime import datetime, timedelta from django.utils.timezone import UTC from mock import patch from nose.plugins.attrib import attr from student.roles import CourseCcxCoachRole from student.tests.factories import ( AdminFactory, ) from util.tests.test_date_utils import fake_ugettext from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import ( CourseFactory, check_mongo_calls ) from .factories import ( CcxFactory, ) from ..overrides import override_field_for_ccx @attr('shard_1') class TestCCX(ModuleStoreTestCase): """Unit tests for the CustomCourseForEdX model """ def setUp(self): """common setup for all tests""" super(TestCCX, self).setUp() self.course = CourseFactory.create() self.coach = AdminFactory.create() role = CourseCcxCoachRole(self.course.id) role.add_users(self.coach) self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach) def set_ccx_override(self, field, value): """Create a field override for the test CCX on <field> with <value>""" override_field_for_ccx(self.ccx, self.course, field, value) def test_ccx_course_is_correct_course(self): """verify that the course property of a ccx returns the right course""" expected = self.course actual = self.ccx.course self.assertEqual(expected, actual) def test_ccx_course_caching(self): """verify that caching the propery works to limit queries""" with check_mongo_calls(1): # these statements are used entirely to demonstrate the # instance-level caching of these values on CCX objects. The # check_mongo_calls context is the point here. self.ccx.course # pylint: disable=pointless-statement with check_mongo_calls(0): self.ccx.course # pylint: disable=pointless-statement def test_ccx_start_is_correct(self): """verify that the start datetime for a ccx is correctly retrieved Note that after setting the start field override microseconds are truncated, so we can't do a direct comparison between before and after. For this reason we test the difference between and make sure it is less than one second. """ expected = datetime.now(UTC()) self.set_ccx_override('start', expected) actual = self.ccx.start # pylint: disable=no-member diff = expected - actual self.assertLess(abs(diff.total_seconds()), 1) def test_ccx_start_caching(self): """verify that caching the start property works to limit queries""" now = datetime.now(UTC()) self.set_ccx_override('start', now) with check_mongo_calls(1): # these statements are used entirely to demonstrate the # instance-level caching of these values on CCX objects. The # check_mongo_calls context is the point here. self.ccx.start # pylint: disable=pointless-statement, no-member with check_mongo_calls(0): self.ccx.start # pylint: disable=pointless-statement, no-member def test_ccx_due_without_override(self): """verify that due returns None when the field has not been set""" actual = self.ccx.due # pylint: disable=no-member self.assertIsNone(actual) def test_ccx_due_is_correct(self): """verify that the due datetime for a ccx is correctly retrieved""" expected = datetime.now(UTC()) self.set_ccx_override('due', expected) actual = self.ccx.due # pylint: disable=no-member diff = expected - actual self.assertLess(abs(diff.total_seconds()), 1) def test_ccx_due_caching(self): """verify that caching the due property works to limit queries""" expected = datetime.now(UTC()) self.set_ccx_override('due', expected) with check_mongo_calls(1): # these statements are used entirely to demonstrate the # instance-level caching of these values on CCX objects. The # check_mongo_calls context is the point here. self.ccx.due # pylint: disable=pointless-statement, no-member with check_mongo_calls(0): self.ccx.due # pylint: disable=pointless-statement, no-member def test_ccx_has_started(self): """verify that a ccx marked as starting yesterday has started""" now = datetime.now(UTC()) delta = timedelta(1) then = now - delta self.set_ccx_override('start', then) self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member def test_ccx_has_not_started(self): """verify that a ccx marked as starting tomorrow has not started""" now = datetime.now(UTC()) delta = timedelta(1) then = now + delta self.set_ccx_override('start', then) self.assertFal
se(self.ccx.has_started()) # pylint: disable=no-member def test_ccx_has_ended(self): """verify that a ccx that has a due date in the past has ended""" now = datetime.now(UTC()) delta = timedelta(1) then = now - delta self.set_ccx_override('due', then) self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member def test_ccx_has_not_ended(self): """verify that a ccx that has a due date in the future has not eneded "
"" now = datetime.now(UTC()) delta = timedelta(1) then = now + delta self.set_ccx_override('due', then) self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member def test_ccx_without_due_date_has_not_ended(self): """verify that a ccx without a due date has not ended""" self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member # ensure that the expected localized format will be found by the i18n # service @patch('util.date_utils.ugettext', fake_ugettext(translations={ "SHORT_DATE_FORMAT": "%b %d, %Y", })) def test_start_datetime_short_date(self): """verify that the start date for a ccx formats properly by default""" start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC()) expected = "Jan 01, 2015" self.set_ccx_override('start', start) actual = self.ccx.start_datetime_text() # pylint: disable=no-member self.assertEqual(expected, actual) @patch('util.date_utils.ugettext', fake_ugettext(translations={ "DATE_TIME_FORMAT": "%b %d, %Y at %H:%M", })) def test_start_datetime_date_time_format(self): """verify that the DATE_TIME format also works as expected""" start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC()) expected = "Jan 01, 2015 at 12:00 UTC" self.set_ccx_override('start', start) actual = self.ccx.start_datetime_text('DATE_TIME') # pylint: disable=no-member self.assertEqual(expected, actual) @patch('util.date_utils.ugettext', fake_ugettext(translations={ "SHORT_DATE_FORMAT": "%b %d, %Y", })) def test_end_datetime_short_date(self): """verify that the end date for a ccx formats properly by default""" end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC()) expected = "Jan 01, 2015" self.set_ccx_override('due', end) actual = self.ccx.end_datetime_text() # pylint: disable=no-member self.assertEqual(expected, actual) @patch('util.date_utils.ugettext', fake_ugettext(translations={ "DATE_TIME_FORMAT": "%b %d, %Y at %H:%M", })) def test_end_datetime_date_time_format(self): """verify that the DATE_TIME format also works as expected""" end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC()) expected = "Jan 01, 2015 at 12:00 UTC" self.set_ccx_override('due', end) actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member self.assertEqual(expected, actual) @patch('util.date_utils.ugettext', fake_ugettext(translations={ "DATE_TIME_FORMAT": "%b %d, %Y at %H:%
titilambert/home-assistant
homeassistant/components/homeassistant/triggers/time.py
Python
apache-2.0
4,049
0.000247
"""Offer time listening automation rules.""" from datetime import datetime import logging import voluptuous as vol from homeassistant.const import CONF_AT, CONF_PLATFORM from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.event import ( async_track_point_in_time, async_track_state_change, async_track_time_change, ) import homeassistant.util.dt as dt_util # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) _TIME_TRIGGER_SCHEMA = vol.Any( cv.time, vol.All(str, cv.entity_domain("input_datetime")), msg="Expected HH:MM, HH:MM:SS or Entity ID from domain 'input_datetime'", ) TRIGGER_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): "time", vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]), } ) async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" entities = {} removes = [] @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) @callback def update_entity_trigger(entity_id, old_state=None, new_state=None): # If a listener was already set up for entity, remove it. remove = entities.get(entity_id) if remove: remove() removes.remove(remove) remove = None # Check state of entity. If valid, set up a listener. if new_state: has_date = new_state.attributes["has_date"] if has_date: year = new_state.attributes["year"] month = new_state.attributes["month"] day = new_state.attributes["day"] has_time = new_state.attributes["has_time"] if has_time: hour = new_state.attributes["hour"] minute = new_state.attribu
tes["minute"] second
= new_state.attributes["second"] else: # If no time then use midnight. hour = minute = second = 0 if has_date: # If input_datetime has date, then track point in time. trigger_dt = dt_util.DEFAULT_TIME_ZONE.localize( datetime(year, month, day, hour, minute, second) ) # Only set up listener if time is now or in the future. if trigger_dt >= dt_util.now(): remove = async_track_point_in_time( hass, time_automation_listener, trigger_dt ) elif has_time: # Else if it has time, then track time change. remove = async_track_time_change( hass, time_automation_listener, hour=hour, minute=minute, second=second, ) # Was a listener set up? if remove: removes.append(remove) entities[entity_id] = remove for at_time in config[CONF_AT]: if isinstance(at_time, str): # input_datetime entity update_entity_trigger(at_time, new_state=hass.states.get(at_time)) else: # datetime.time removes.append( async_track_time_change( hass, time_automation_listener, hour=at_time.hour, minute=at_time.minute, second=at_time.second, ) ) # Track state changes of any entities. removes.append( async_track_state_change(hass, list(entities), update_entity_trigger) ) @callback def remove_track_time_changes(): """Remove tracked time changes.""" for remove in removes: remove() return remove_track_time_changes
dcjohnston/geojs
dashboard/github_service/dashboard.py
Python
bsd-3-clause
5,762
0
#!/usr/bin/env python import os import shutil import socket from datetime import datetime import subprocess as sp import json from pymongo import MongoClient _ctest = ''' set(CTEST_SOURCE_DIRECTORY "{source}") set(CTEST_BINARY_DIRECTORY "{build}") include(${{CTEST_SOURCE_DIRECTORY}}/CTestConfig.cmake) set(CTEST_SITE "{site}") set(CTEST_BUILD_NAME "{name}") set(CTEST_CMAKE_GENERATOR "Unix Makefiles") ctest_start("Experimental") ctest_configure() ctest_build() ctest_test(PARALLEL_LEVEL 1 RETURN_VALUE res) ctest_submit() if(NOT res EQUAL 0) message(FATAL_ERROR "Test failures occurred.") endif() ''' _host = socket.gethostname().split('.')[0] def config(): return { 'mongo-host': 'lusitania', 'mongo-port': 27017, 'mongo-database': 'geojs_dashboard', 'test-dir': '~/geojs-testing', 'repo': 'https://github.com/OpenGeoscience/geojs.git', 'kill-server': '/Users/jbeezley/bin/killtestserver', 'add-path': '/usr/local/bin', 'cmake': '/usr/local/bin/cmake', 'ctest': '/usr/local/bin/ctest', 'git': '/usr/local/bin/git' } def _communicate(cmd, **kw): cfg = config() pth = os.environ.get('PATH', '') if cfg.get('add-path'): pth = cfg['add-path'] + ':' + pth kw['stderr'] = sp.STDOUT kw['stdout'] = sp.PIPE kw['shell'] = True p = sp.Popen( '/usr/bin/env PATH=' + pth + ' ' + cmd, **kw ) out, err = p.communicate() return p.returncode, out def run_test(repo, commit, testdir, branch): cfg = config() git = cfg.get('git', 'git') cmake = cfg.get('cmake', 'cmake') ctest = cfg.get('ctest', 'ctest') print cmake # ====================== # git clone and checkout # ====================== s, out = _communicate(' '.join([ git, 'clone', '--recursive', repo, testdir ])) if s != 0: return (False, 'clone "%s" failed' % repo, out) s, out = _communicate(' '.join([ git, '-C', testdir, 'checkout', commit ])) if s != 0: return (False, 'checkout "%s" failed' % commit, out) s, out = _communicate(' '.join([ git, '-C', testdir, 'submodule', 'update' ])) if s != 0: return (False, 'submodule update failed', out) # ========= # configure # ========= builddir = os.path.join(testdir, '_build') os.makedirs(builddir) s, out = _communicate( ' '.join([ cmake, '-D', 'SELENIUM_TESTS=ON', '-D', 'CHROME_TESTS=OFF', '-D', 'FIREFOX_TESTS=ON', '-D', 'COVERAGE_TESTS=OFF', '..' ]), cwd=builddir ) if s != 0: return (False, 'cmake configure failed', out) # ============== # build and test # ============== build_script = os.path.join(builddir, 'build.cmake') kw = { 'source': testdir, 'build': builddir, 'site': _host, 'name': branch + '-' + commit[:6] } open(build_script, 'w').write( _ctest.format(**kw) ) s, out = _communicate( ctest + ' -VV -S {}'.format(build_script), cwd=builddir ) test_result = s test_output = out if test_result != 0: return (False, 'Test(s) failed', test_output) return (True, 'All tests passed!', test_output) def start_test(item, oldTest=None): if oldTest: status = { 'pass': oldTest['status']['pass'], 'output': oldTest['status']['output'], 'reason': 'Already tested in branch %s' % oldTest['branch'], 'skipped': True } else: cfg = config() basedir = os.path.expanduser(cfg['test-dir']) testdir = os.path.join(basedir, item['commit']) shutil.rmtree(testdir, ignore_errors=True) try: os.makedirs(testdir) except OSError: pass result = run_test(cfg['repo'], item['commit'], testdir, item['branch']) status = { 'pass': result[0], 'reason': result[1], 'output': result[2], 'skipped': False } return status def notify(item, status): ''' Do something to notify people, not sure what. ''' pass def nightly(queue, results): for item in queue.find(): oldTest = results.find_one({'commit': item['commit']}) status = start_test(item, oldTest) if not oldTest: result = dict(item) result.pop('_id') result['time'] = datetime.now() r
esult['status'] = status results.insert(result) queue.remove(item) notify(item, status) def continuous(sha, branch, user, queue, results): oldTest = results.find_one({'commit': sha}) item = { 'commit
': sha, 'user': user, 'branch': branch, 'time': datetime.now() } status = start_test(item, oldTest) if not oldTest: result = dict(item) result['time'] = datetime.now() result['status'] = status results.insert(result) notify(item, status) return status def main(*args): cfg = config() cl = MongoClient( host=cfg['mongo-host'], port=cfg['mongo-port'], ) db = cl[cfg['mongo-database']] queue = db['queue'] results = db['results'] if cfg.get('kill-server'): sp.call(cfg['kill-server'], shell=True) if not len(args) or args[0] == 'nightly': nightly(queue, results) else: return continuous(*args[:3], queue=queue, results=results) if __name__ == '__main__': import sys print json.dumps(main(*sys.argv[1:]), indent=4)
bjuvensjo/scripts
vang/misc/s.py
Python
apache-2.0
1,135
0
#!/usr/bin/env python3 from argparse import ArgumentParser from re import compile from sys import argv def get_split(s): for ch in ('_', '-'): if ch in s: return s.split(ch) return compile('[A-Z]?[^A-Z]+').findall(s) def get_cases(s): split = get_split(s) capital = [w.capitalize() for w in split] lower = [w.lower() for w in split] upper = [w.upper() for w in split] return [ ''.join([lowe
r[0]] + capital[1:]), ''.join(capital), ''.join(lower), ''.join(upper), '_'.join(lower), '_'.join(upper), '-'.join(lower), '-'.join(upper), ] def get_zipped_cases(strings): return zip(*[get_cases(s) for s in strings]) def parse_args(args): parser = ArgumentParser( description='Prints various string representation of provided strings') parser.add_argument
('strings', nargs='+') return parser.parse_args(args) def main(strings): for items in get_zipped_cases(strings): print(' '.join(items)) if __name__ == '__main__': # pragma: no cover main(**parse_args(argv[1:]).__dict__)
sidnarayanan/BAdNet
train/gen/akt/paths.py
Python
mit
125
0
basedir =
'/data/t3serv014/snarayan/deep/v_deepgen_4_akt_small/' figsdir = '/home/snarayan/public_html/figs/deepgen/
v4_akt/'
stiphyMT/plantcv
plantcv/plantcv/visualize/obj_sizes.py
Python
mit
3,006
0.003327
# Visualize an annotated image with object sizes import os import cv2 import random import numpy as np from plantcv.plantcv import params from plantcv.plantcv import color_palette from plantcv.plantcv._debug import _debug def obj_sizes(img, mask, num_objects=100): """ Label the size of objects in an image. Inputs: img = RGB or grayscale image data mask = Binary mask made from selected contours num_objects = Optional parameter to limit the number of objects that will get annotated. Returns: plotting_img = Plotting image with objects labeled by area :param img: numpy.ndarray :param mask: numpy.ndarray :param num_objects: int :return plotting_img: numpy.ndarray """ pl
otting_img = np.copy(img) # Convert grayscale images to color if len(np.shape(plotting_img)) == 2: plotting_img = cv2.cvtColor(plotting_img, cv2.COLOR_GRAY2BGR) # Store debug debug = params.debug params.debug = None # ID contours and sort them from largest to smallest id_objects, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] sorted_objects = sorted(id_objects
, key=lambda x: cv2.contourArea(x)) # Function sorts smallest to largest so keep the last X objects listed # sorted_objects = sorted_objects[len(sorted_objects) - num_objects: len(sorted_objects)] # Reverse the sorted list to order contours from largest to smallest sorted_objects.reverse() rand_color = color_palette(num=num_objects, saved=False) random.shuffle(rand_color) label_coord_x = [] label_coord_y = [] area_vals = [] for i, contour in enumerate(sorted_objects): # Break out of the for loop once the number of objects have been plotted if i >= num_objects: break # ID and store area values and centers of mass for labeling them m = cv2.moments(contour) # Skip iteration if contour area is zero # This is needed because cv2.contourArea can be > 0 while moments area is 0. if m['m00'] == 0: continue area_vals.append(m['m00']) label_coord_x.append(int(m["m10"] / m["m00"])) label_coord_y.append(int(m["m01"] / m["m00"])) # Fill in objects with color cv2.drawContours(plotting_img, sorted_objects, i, rand_color[i], thickness=-1) # Label with area values for c, value in enumerate(area_vals): text = "{:.0f}".format(value) w = label_coord_x[c] h = label_coord_y[c] cv2.putText(img=plotting_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness) print(f"There were {max(0, len(id_objects) - num_objects)} objects not annotated.") params.debug = debug _debug(visual=plotting_img, filename=os.path.join(params.debug_outdir, str(params.device) + '_object_sizes.png')) return plotting_img
joequery/joequery.me
joequery/blog/posts/code/python-builtin-functions/simple_functions.py
Python
mit
319
0.009404
import math def eve
n_numbers_only(thelist): ''' Returns a list of even numbers in thelist ''' return [x for x in thelist if x%2 == 0] def is_perfect_square(x): ''' Returns True if x is a perfect square, False otherwise ''' thesqrt = int(math.sqrt(x)) return thesqrt * thesqrt
== x
hunch/hunch-gift-app
django/contrib/gis/geos/polygon.py
Python
mit
6,672
0.003447
from ctypes import c_uint, byref from django.contrib.gis.geos.error import GEOSIndexError from django.contrib.gis.geos.geometry import GEOSGeometry from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOM_PTR from django.contrib.gis.geos.linestring import LinearRing from django.contrib.gis.geos import prototypes as capi class Polygon(GEOSGeometry): _minlength = 1 def __init__(self, *args, **kwargs): """ Initializes on an exterior ring and a sequence of holes (both instances may be either LinearRing instances, or a tuple/list that may be constructed into a LinearRing). Examples of initialization, where shell, hole1, and hole2 are valid LinearRing geometries: >>> poly = Polygon(shell, hole1, hole2) >>> poly = Polygon(shell, (hole1, hole2)) Example where a tuple parameters are used: >>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)), ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4))) """ if not args: raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.') # Getting the ext_ring and init_holes parameters from the argument list ext_ring = args[0] init_holes = args[1:] n_holes = len(init_holes) # If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility] if n_holes == 1 and isinstance(init_holes[0], (tuple, list)): if len(init_holes[0]) == 0: init_holes = ()
n_holes = 0 elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0] n_holes = len(init_holes) polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes) super(Polygon, self).__init__(polygon, **kwargs) def __iter__(self): "Iterates over each ring in the polygon." for i in xrange(len(self)): yield self[i] def __len__(self): "Returns the number of rings in this Polygon." return self.num_interior_rings + 1 @classmethod def from_bbox(cls, bbox): "Constructs a Polygon from a bounding box (4-tuple)." x0, y0, x1, y1 = bbox return GEOSGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % ( x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) ) ### These routines are needed for list-like operation w/ListMixin ### def _create_polygon(self, length, items): # Instantiate LinearRing objects if necessary, but don't clone them yet # _construct_ring will throw a TypeError if a parameter isn't a valid ring # If we cloned the pointers here, we wouldn't be able to clean up # in case of error. rings = [] for r in items: if isinstance(r, GEOM_PTR): rings.append(r) else: rings.append(self._construct_ring(r)) shell = self._clone(rings.pop(0)) n_holes = length - 1 if n_holes: holes = get_pointer_arr(n_holes) for i, r in enumerate(rings): holes[i] = self._clone(r) holes_param = byref(holes) else: holes_param = None return capi.create_polygon(shell, holes_param, c_uint(n_holes)) def _clone(self, g): if isinstance(g, GEOM_PTR): return capi.geom_clone(g) else: return capi.geom_clone(g.ptr) def _construct_ring(self, param, msg='Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings'): "Helper routine for trying to construct a ring from the given parameter." if isinstance(param, LinearRing): return param try: ring = LinearRing(param) return ring except TypeError: raise TypeError(msg) def _set_list(self, length, items): # Getting the current pointer, replacing with the newly constructed # geometry, and destroying the old geometry. prev_ptr = self.ptr srid = self.srid self.ptr = self._create_polygon(length, items) if srid: self.srid = srid capi.destroy_geom(prev_ptr) def _get_single_internal(self, index): """ Returns the ring at the specified index. The first index, 0, will always return the exterior ring. Indices > 0 will return the interior ring at the given index (e.g., poly[1] and poly[2] would return the first and second interior ring, respectively). CAREFUL: Internal/External are not the same as Interior/Exterior! _get_single_internal returns a pointer from the existing geometries for use internally by the object's methods. _get_single_external returns a clone of the same geometry for use by external code. """ if index == 0: return capi.get_extring(self.ptr) else: # Getting the interior ring, have to subtract 1 from the index. return capi.get_intring(self.ptr, index-1) def _get_single_external(self, index): return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid) _set_single = GEOSGeometry._set_single_rebuild _assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild #### Polygon Properties #### @property def num_interior_rings(self): "Returns the number of interior rings." # Getting the number of rings return capi.get_nrings(self.ptr) def _get_ext_ring(self): "Gets the exterior ring of the Polygon." return self[0] def _set_ext_ring(self, ring): "Sets the exterior ring of the Polygon." self[0] = ring # Properties for the exterior ring/shell. exterior_ring = property(_get_ext_ring, _set_ext_ring) shell = exterior_ring @property def tuple(self): "Gets the tuple for each ring in this Polygon." return tuple([self[i].tuple for i in xrange(len(self))]) coords = tuple @property def kml(self): "Returns the KML representation of this Polygon." inner_kml = ''.join(["<innerBoundaryIs>%s</innerBoundaryIs>" % self[i+1].kml for i in xrange(self.num_interior_rings)]) return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
cogu/autosar
autosar/rte/generator.py
Python
mit
42,335
0.023597
import os import autosar.rte.partition import cfile as C import io import autosar.base import autosar.bsw.com innerIndentDefault=3 #default indendation (number of spaces) def _genCommentHeader(comment): lines = [] lines.append('/*********************************************************************************************************************') lines.append('* %s'%comment) lines.append('*********************************************************************************************************************/') return lines def _genCommentHeader2(comment): """ Same as _genCommentHeader but returns a C sequence instead of raw strings """ code = C.sequence() code.append(C.line('/*********************************************************************************************************************')) code.append(C.line('* %s'%comment)) code.append(C.line('*********************************************************************************************************************/')) return code class TypeGenerator: def __init__(self, partition, useDefaultTypes=True): self.partition = partition self.defaultTypes = {} if useDefaultTypes: self._initDefaultType() def generate(self, dest_dir = '.', file_name='Rte_Type.h'): """ Generates Rte_Type.h Note: The last argument has been deprecated and is no longer in use """ if self.partition.isFinalized == False: self.partition.finalize() file_path = os.path.join(dest_dir, file_name) with io.open(file_path, 'w', newline='\n') as fp: hfile=C.hfile(file_name) hfile.code.extend([C.line(x) for x in _genCommentHeader('Includes')]) hfile.code.append(C.include("Std_Types.h")) hfile.code.append(C.blank()) (basicTypes,complexTypes,modeTypes) = self.partition.types.getTypes() hfile.code.extend([C.line(x) for x in _genCommentHeader('Data Type Definitions')]) hfile.code.append(C.blank()) ws = self.partition.ws unusedDefaultTypes = self._findUnusedDefaultTypes(ws, basicTypes) first=True for ref in sorted(basicTypes)+sorted(complexTypes): dataType = ws.find(ref) if dataType is not None: typedef = None if first: first=False else: hfile.code.append(C.blank()) hfile.code.append('#define Rte_TypeDef_%s'%dataType.name) if isinstance(dataType,autosar.datatype.BooleanDataType): typedef = C.typedef('boolean', dataType.name) hfile.code.append(C.statement(typedef)) elif isinstance(dataType,autosar.datatype.IntegerDataType): valrange = dataType.maxVal-dataType.minVal bitcount = valrange.bit_length() typename = dataType.name basetype = self._typename(bitcount,dataType.minVal) typedef = C.typedef(basetype, typename) hfile.code.append(C.statement(typedef)) isUnsigned = True if basetype in ('uint8','uint16','uint32') else False if isUnsigned: minval=str(dataType.minVal)+'u' maxval=str(dataType.maxVal)+'u' else: minval=str(dataType.minVal) maxval=str(dataType.maxVal) hfile.code.append('#define %s_LowerLimit ((%s)%s)'%(typename,typename,minval)) hfile.code.append('#define %s_UpperLimit ((%s)%s)'%(typename,typename,maxval)) if dataType.compuMethodRef is not None: compuMethod = ws.find(dataType.compuMethodRef) if compuMethod is not None: lines1=[] lines2=[] if isinstance(compuMethod,autosar.datatype.CompuMethodConst): for elem in compuMethod.elements: if isUnsigned: value = str(elem.upperLimit)+'u' else: value = str(elem.upperLimit) lines1.append('#define RTE_CONST_%s (%s)'%(elem.textValue,value)) lines2.append('#define %s ((%s)%s)'%(elem.textValue,typename,value)) if len(lines2)>0: tmp=lines1+[C.blank()]+lines2 else: tmp=lines1 for line in tmp: hfile.code.append(line) else: raise ValueError(dataType.compuMethodRef) elif isinstance(dataType, autosar.datatype.RecordDataType): body = C.block(innerIndent=innerIndentDefault) for elem in dataType.elements: childType = ws.find(elem.typeRef, role='DataType') body.append(C.statement(C.variable(elem.name, childType.name))) struct = C.struct(None,body, typedef=dataType.name) hfile.code.append(C.statement(struct)) elif isinstance(dataType, autosar.datatype.StringDataType): hfile.code.append('typedef uint8 %s[%d];'%(dataType.name, dataType.length+1)) elif isinstance(dataType, autosar.datatype.ArrayDataType): childType = ws.find(dataType.typeRef, role='DataType') if childType is None: raise ValueError('invalid type reference: '+dataType.typeRef) hfile.code.append('typedef %s %s[%d];'%(childType.name, dataType.name, dataType.length)) elif isinstance(dataType, auto
sar.datatype.RealDataType): if dataType.encoding == 'DOUBLE': platform_typename = 'float64' else: platform_typename = 'float32' hfile.code.append('typedef %s %s;'%(platform_typename, dataType.name)) else:
raise NotImplementedError(type(dataType)) #sys.stderr.write('not implemented: %s\n'%str(type(dataType))) else: raise ValueError(ref) if len(modeTypes)>0: lines=_genCommentHeader('Mode Types') tmp=[] hfile.code.extend(lines) first=True for ref in modeTypes: if first: first=False else: tmp.append(C.blank()) modeType = ws.find(ref) hfile.code.append(C.statement(C.typedef('uint8', 'Rte_ModeType_'+modeType.name))) for i,elem in enumerate(modeType.modeDeclarations): # define RTE_MODE_EcuM_Mode_POST_RUN ((Rte_ModeType_EcuM_Mode)0) tmp.append(C.define('RTE_MODE_%s_%s'%(modeType.name,elem.name),'((Rte_ModeType_EcuM_Mode)%d)'%i)) hfile.code.append(C.blank()) hfile.code.extend(tmp) if len(unusedDefaultTypes)>0: hfile.code.append(C.blank(2)) hfile.code.append(C.line('#ifndef RTE_SUPPRESS_UNUSED_DATATYPES')) for name in sorted(unusedDefaultTypes): hfile.code.append(C.blank()) hfile.code.extend(self.defaultTypes[name]) hfile.code.append(C.blank()) hfile.code.append(C.line('#endif')) fp.write('\n'.join(hfile.lines())) fp.write('\n') def _initDefaultType(self): self.defaultTypes['Boolean']=C.sequence().extend([C.statement(C.typedef('boolean', 'Boolean'))]) self.defaultTypes['UInt8']=C.sequence().extend([C.statement(C.typedef('uint8', 'UInt8')), C.define('UInt8_LowerLimit', '((UInt8)0u)'), C.define('UInt8_UpperLimit', '((UInt8)255u)')]) self.defaultTypes['UInt16']=C.s
crossbario/autobahn-testsuite
autobahntestsuite/autobahntestsuite/case/case9_4_3.py
Python
apache-2.0
1,271
0.015736
############################################################################### ## ## Copyright (c) Crossbar.io Technologies GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain
a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ##########
##################################################################### from case9_4_1 import Case9_4_1 class Case9_4_3(Case9_4_1): DESCRIPTION = """Send fragmented binary message message with message payload of length 4 * 2**20 (4M). Sent out in fragments of 1k.""" EXPECTATION = """Receive echo'ed binary message (with payload as sent).""" def init(self): self.DATALEN = 4 * 2**20 self.FRAGSIZE = 1 * 2**10 self.PAYLOAD = "*" * self.DATALEN self.WAITSECS = 100 self.reportTime = True
evrenkutar/blog
blog/urls.py
Python
gpl-2.0
299
0.013378
__au
thor__ = 'evren kutar' from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^(?P<cslug>[\w-]+)/$', 'blog.views.post_category', name='category'), url(r'^(?P<cslug>[\w-]+)/(?P<slug>[\w-]+)/$', 'blog.views.po
st', name='post') )
cghall/salesforce-reporting
salesforce_reporting/parsers.py
Python
mit
10,296
0.003594
class ReportParser: """ Parser with generic functionality for all Report Types (Tabular, Summary, Matrix) Parameters ---------- report: dict, return value of Connection.get_report() """ def __init__(self, report): self.data = report self.type = self.data["reportMetadata"]["reportFormat"] self.has_details = self.data["hasDetailRows"] def get_grand_total(self): return self.data["factMap"]["T!T"]["aggregates"][0]["value"] @staticmethod def _flatten_record(record): return [field["label"] for field in record] def _get_field_labels(self): columns = self.data["reportMetadata"]["detailColumns"] column_details = self.data["reportExtendedMetadata"]["detailColumnInfo"] return {key: column_details[value]["label"] for key, value in enumerate(columns)} def records(self): """ Return a list of all records included in the report. If detail rows are not included in the report a ValueError is returned instead. Returns ------- records: list """ if not self.has_details: raise ValueError('Report does not include details so cannot access individual records') records = [] fact_map = self.data["factMap"] for group in fact_map.values(): rows = group["rows"] group_records = (self._flatten_record(row["dataCells"]) for row in rows) for record in group_records: records.append(record) return records def records_dict(self): """ Return a list of dictionaries for all records in the report in {field: value} format. If detail rows are not included in the report a ValueError is returned instead. Returns ------- records: list of dictionaries in {field: value, field: value...} format """ if not self.has_details: raise ValueError('Report does not include details so cannot access individual records') records = [] fact_map = self.data["factMap"] field_labels = self._get_field_labels() for group in fact_map.values(): rows = group["rows"] group_records = (self._flatten_record(row["dataCells"]) for row in rows) for record in group_records: labelled_record = {field_labels[key]: value for key, value in enumerate(record)} records.append(labelled_record) return records class MatrixParser(ReportParser): """ Parser with specific functionality for matrix reports Parameters ---------- report: dict, return value of Connection.get_report() """ def __init__(self, report): super().__init__(report) self.data = report self._check_type() def _check_type(self): expected = "MATRIX" if self.type != expected: raise ValueError("Incorrect report type. Expected {}, received {}.".format(expected, self.type)) else: pass def get_col_total(self, col_label, default=None): """ Return the total for the specified column. The default arg makes it possible to specify the return value if the column label is not found. Parameters ---------- col_label: string default: string, optional, default None If column is not found determines the return value Returns ------- total: int """ grp_across_list = self.data["groupingsAcross"]["groupings"] col_dict = {grp['label']: int(grp['key']) for grp in grp_across_list} try: col_key = col_dict[col_label] return self.data["factMap"]['T!{}'.format(col_key)]["aggregates"][0]["value"] except KeyError: return default def get_row_total(self, row_label, defau
lt=None): """ Return the total for the specified row. The default arg makes it possible to specify the return value if the column label is not found. Parameters ---------- row_
label: string default: string, optional, default None If row is not found determines the return value Returns ------- total: int """ grp_down_list = self.data["groupingsDown"]["groupings"] row_dict = {grp["label"]: int(grp["key"]) for grp in grp_down_list} try: row_key = row_dict[row_label] return self.data["factMap"]['{}!T'.format(row_key)]["aggregates"][0]["value"] except KeyError: return default @staticmethod def _convert_parameter(parameter): if type(parameter) is str: new_parameter = [parameter] elif parameter is None: new_parameter = [] elif type(parameter) is list: new_parameter = parameter else: raise ValueError return new_parameter @staticmethod def _get_subgroup_index(group_above, subgroup_name): subgroups_with_index = {subgroup['label']: index for index, subgroup in enumerate(group_above)} index = subgroups_with_index[subgroup_name] return index def _get_grouping(self, groups_of_interest, start_grouping, count): current_grouping = start_grouping while count > 1: group_name = groups_of_interest[count - 2] subgroup_index = self._get_subgroup_index(current_grouping, group_name) current_grouping = current_grouping[subgroup_index]["groupings"] count -= 1 self._get_grouping(group_name, current_grouping, count) return current_grouping def _get_static_key(self, groups_of_interest, static_grouping_key): grouping_depth = len(groups_of_interest) group_index = grouping_depth - 1 top_grouping = self.data[static_grouping_key]["groupings"] grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth) keys = {group['label']: group['key'] for group in grouping} static_key = keys[groups_of_interest[group_index]] return static_key def _get_dynamic_keys(self, groups_of_interest, dynamic_grouping_key): grouping_depth = len(groups_of_interest) + 1 top_grouping = self.data[dynamic_grouping_key]["groupings"] grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth) dynamic_keys = [group["key"] for group in grouping] labels = [group["label"] for group in grouping] return {"keys": dynamic_keys, "labels": labels} def _build_keys(self, static_groups_of_interest, dynamic_groups_of_interest, static_grouping_key, dynamic_grouping_key): static_key = self._get_static_key(static_groups_of_interest, static_grouping_key) dynamic_keys = self._get_dynamic_keys(dynamic_groups_of_interest, dynamic_grouping_key) keys = [] if static_grouping_key == "groupingsAcross": for el in dynamic_keys["keys"]: key = "{}!{}".format(el, static_key) keys.append(key) else: for el in dynamic_keys["keys"]: key = "{}!{}".format(static_key, el) keys.append(key) return {"keys": keys, "labels": dynamic_keys["labels"]} def _series(self, static_groups_of_interest, static_grouping_key, dynamic_grouping_key, dynamic_groups_of_interest=None, value_position=0): static_groups_of_interest = self._convert_parameter(static_groups_of_interest) dynamic_groups_of_interest = self._convert_parameter(dynamic_groups_of_interest) keys_labels = self._build_keys(static_groups_of_interest, dynamic_groups_of_interest, static_grouping_key, dynamic_grouping_key) labels = keys_labels["labels"] values = [] for key in keys_labels["keys"]: value = self.data["factMap"][key]["aggregates"][value_position]["value"] values.append(value) se
twonds/trinine
trinine/t9.py
Python
mit
4,257
0.000235
""" """ import imp import os import sys KEY_MAP = {0: [], 1: [], 2: [u'a', u'b', u'c'], 3: [u'd', u'e', u'f'], 4: [u'g', u'h', u'i'], 5: [u'j', u'k', u'l'], 6: [u'm', u'n', u'o'], 7: [u'p', u'q', u'r', u's'], 8: [u't', u'u', u'v'], 9: [u'w', u'x', u'y', u'z'] } class T9: def __init__(self, data_dir="./data"): self.data_dir = data_dir self.data_module = self.data_dir+"/data.py" self.suggest_length = 10 self.word_length = 4 def load(self): """ Load up words and tri data structure that is saved to disk. If it does not exis
t then use existing data to build it. """ if os.path.exists(self.data_module): self.data = imp.load_source('data', self.data_module) else: msg = "WARNING: Data module is not loaded. "
msg += "Please build by running `make build-data`" print msg sys.exit(1) def map_number(self, number): """ Map numbers from a dial pad to characters. @param number: A string of numbers dialed from a key pad. @type number: C{int} """ ret_chars = [] for num in str(number): chars = KEY_MAP[int(num)] if not chars: break ret_chars.append(chars) return ret_chars def _sort(self, words): return sorted(words, key=lambda x: self.data.WORDS.get(x, 0), reverse=True) def map_words(self, number): """ Map a string of numbers from a phone's key pad to possible words. @param number: A string of numbers dialed from a key pad. @type number: C{int} """ number_words = [] for i, chars in enumerate(self.map_number(number)): if i == 0: number_words = chars else: new_words = [] for word in number_words: for c in chars: new_word = word+c # Only use words in our word trie if self.data.TRIE.keys(new_word): new_words.append(new_word) number_words = new_words return number_words def words(self, number): """ Given a number return possible word combinations sorted by usage frequency. @param number: A string of numbers dialed from a key pad. @type number: C{int} """ ret_words = [] number_words = self.map_words(number) # Sort and filter words, adding extra words if our options are slim suggested_words = [] for word in self._sort(number_words): if word in self.data.WORDS: ret_words.append(word) word_keys = filter(lambda x: x != word, self._sort(self.data.TRIE.keys(word))) suggested_words += word_keys[:self.suggest_length] ret_words = ret_words + self._sort(suggested_words) return ret_words[:self.suggest_length] def main_user_loop(t): while True: try: number = int(input("Enter a number: ")) except EOFError: break except SyntaxError: break except TypeError: if number != 'quit': print "Invalid number" break for word in t.words(number): print word def stdin_loop(t): for number in sys.stdin: if not number.strip(): break number = int(number.strip()) for word in t.words(number): print word def main(data_dir, user_input=None): t = T9(data_dir=data_dir) # Load data module. Remember to build it. t.load() if user_input: main_user_loop(t) else: stdin_loop(t) if __name__ == '__main__': if len(sys.argv) == 3: main(sys.argv[1], sys.argv[2]) elif len(sys.argv) == 2: main(sys.argv[1]) else: usage = "Usage: {0} <data_directory>" print usage.format(sys.argv[0])
prathamtandon/g4gproblems
Arrays/max_sum_not_adjacent.py
Python
mit
2,744
0.001093
import unittest """ Given an array of positive integers, find the maximum sum of a sub-sequence with the constraint that no two numbers in the sequence should be adjacent in the array. Input: 3 2 7 10 Output: 13 (3 + 10) Input 3 2 5 10 7 Output: 15 (3 + 5 + 7) """ """ Approach: 1. Similar to 0-1 Knapsack problem. 2. F(S,i) = max(S[i] + F(S,i+2), F(S,i+1), S[i]) 3. That is, for every element there are three cases: (a) We add that element to previous best which is not adjacent (b) We do not include the element in best subsequence, ie take adjacent best (c) We start a new subsequence from the element dp[i] = max(dp[i-2] + x[i], dp[i-1], x[i]) Finally, dp[n-1] will have the final answer. """ """ Approach: 1. This one uses just 2 variables. 2. Firs
t variable tracks the maximum sum obtained by excluding current element 3. Second variable is current element added to first variable 4. Return max(first variable, second variable) """ def max_sum_not_adjacent_helper(list_of_numbers, index): if
index >= len(list_of_numbers): return 0 return max(list_of_numbers[index] + max_sum_not_adjacent_helper(list_of_numbers, index+2), max_sum_not_adjacent_helper(list_of_numbers, index+1)) def max_sum_not_adjacent(list_of_numbers): return max_sum_not_adjacent_helper(list_of_numbers, 0) def max_sum_not_adjacent_iterative(list_of_numbers): # let excluding = Max sum excluding previous element # and including = Max sum including previous element # Then, max sum excluding current element = Max(excluding, including) # And max sum including current element = excluding + arr[i] including = list_of_numbers[0] excluding = 0 for i in range(1, len(list_of_numbers)): temp = max(including, excluding) including = excluding + list_of_numbers[i] excluding = temp return max(including, excluding) class TestMaxSumNotAdjacent(unittest.TestCase): def test_max_sum_not_adjacent(self): list_of_numbers = [3, 2, 7, 10] self.assertEqual(max_sum_not_adjacent(list_of_numbers), 13) list_of_numbers = [3, 2, 5, 10, 7] self.assertEqual(max_sum_not_adjacent(list_of_numbers), 15) list_of_numbers = [5, 5, 10, 40, 50, 35] self.assertEqual(max_sum_not_adjacent(list_of_numbers), 80) def test_max_sum_not_adjacent_iterative(self): list_of_numbers = [3, 2, 7, 10] self.assertEqual(max_sum_not_adjacent_iterative(list_of_numbers), 13) list_of_numbers = [3, 2, 5, 10, 7] self.assertEqual(max_sum_not_adjacent_iterative(list_of_numbers), 15) list_of_numbers = [5, 5, 10, 40, 50, 35] self.assertEqual(max_sum_not_adjacent_iterative(list_of_numbers), 80)
matttilton/pySteg
pySteg.py
Python
mit
3,694
0.000541
"""This utility aims to provide an easy way to encode text inside of images.""" # TODO Write test cases. # TODO Write own version of cImage. # TODO Add interface. # TODO Add offset. # TODO Add random seed. from cImage import FileImage from cImage import EmptyImage class Encode: """Class""" def __init__(self, key_file_name, result_file_name, key_directory, result_directory): self.EncodedFileName = result_file_name self.keyDirectory = key_
directory self.key = FileImage(self.keyDirectory + key_file_name) self.resultDirectory = result_directory def Encode(self, data): """Take binary data and add it to an image.""" result = EmptyImage(self.key.getWidth(), self.key.getHeight()) count = 0 for row in range(self.key.getHeight()): for col in range(self.key.getWidth()): keyPixel = self.key.getPixel(col, row) if count < (len
(data)): if (int(data[count]) == 1): newPixel = self.flipLSB(keyPixel) else: newPixel = keyPixel count += 1 result.setPixel(col, row, newPixel) else: result.setPixel(col, row, keyPixel) result.save(self.resultDirectory + self.EncodedFileName) def backup(self): try: self.backup = FileImage(self.EncodedFileName + ".bak") except: self.backup = None def textToBinary(self, data): """Convert text to binary.""" result = ''.join(format(ord(x), '08b') for x in data) return str(result) def checkStorageSize(self): """Check maximum amount of data that can be encoded into an image.""" width = self.key.getWidth() height = self.key.getHeight() maxSize = width * height return maxSize def flipLSB(self, pixel): """Invert the LSB of the red value of a pixel.""" tmp = pixel.getRed() if (tmp > 120): tmp -= 120 else: tmp += 120 pixel.setRed(tmp) return pixel class Decode: def __init__(self, key_file_name, result_file_name, key_directory, result_directory): self.EncodedFileName = result_file_name self.keyDirectory = key_directory self.key = FileImage(key_file_name) self.resultDirectory = result_directory def Decode(self): """Extract binary data from image.""" encoded = FileImage(self.resultDirectory + self.EncodedFileName) result = [] for row in range(encoded.getHeight()): for col in range(encoded.getWidth()): encodedPixel = encoded.getPixel(col, row) keyPixel = self.key.getPixel(col, row) # 1 if encodedPixel.getRed() != keyPixel.getRed(): result.append(1) # 0 else: result.append(0) return result def binaryToText(self, data): """Convert binary to text.""" binaryString = ''.join(str(x) for x in data) binaryList = [binaryString[i:i+8] for i in range(0, len(binaryString), 8)] intList = [] for each in binaryList: intList.append(int(each, 2)) charList = [] for each in intList: charList.append(str(chr(each))) cleanCharList = [] for each in charList: if each is not '\x00': cleanCharList.append(each) result = ''.join(str(x) for x in cleanCharList) return result
Iconoclasteinc/tgit
tgit/countries.py
Python
gpl-3.0
11,846
0.000084
# -*- coding: utf-8 -*- # # TGiT, Music Tagger for Professionals # Copyright (C) 2013 Iconoclaste Musique Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. COUNTRIES = { "AF": "Afghanistan", "AX": "Aland Islan", "AL": "Albania", "DZ": "Algeria", "AS": "American Samoa", "AD": "Andorra", "AO": "Angola", "AI": "Anguilla", "AQ": "Antarctica", "AG": "Antigua and Barbuda", "AR": "Argentina", "AM": "Armenia", "AW": "Aruba", "AU": "Australia", "AT": "Austria", "AZ": "Azerbaijan", "BS": "Bahamas", "BH": "Bahrain", "BD": "Bangladesh", "BB": "Barbados", "BY": "Belarus", "BE": "Belgium", "BZ": "Belize", "BJ": "Benin", "BM": "Bermuda", "BT": "Bhutan", "BO": "Bolivia", "BA": "Bosnia and Herzegovina", "BW": "Botswana", "BV": "Bouvet Island", "BR": "Brazil", "VG": "British Virgin Islands", "IO": "British Indian Ocean Territory", "BN": "Brunei Darussalam", "BG": "Bulgaria", "BF": "Burkina Faso", "BI": "Burundi", "KH": "Cambodia", "CM": "Cameroon", "CA": "Canada", "CV": "Cape Verde", "KY": "Cayman Islands", "CF": "Central African Republic", "TD": "Chad", "CL": "Chile", "CN": "China", "HK": "Hong Kong, Special Administrative Region of China", "MO": "Macao, Special Administrative Region of China", "CX": "Christmas Island", "CC": "Cocos (Keeling) Islands", "CO": "Colombia", "KM": "Comoros", "CG": "Congo (Brazzaville)", "CD": "Congo, Democratic Republic of the", "CK": "Cook Islands", "CR": "Costa Rica", "CI": "Côte d'Ivoire", "HR": "Croatia", "CU": "Cuba", "CY": "Cyprus", "CZ": "Czech Republic", "DK": "Denmark", "DJ": "Djibouti", "DM": "Dominica", "DO": "Dominican Republic", "EC": "Ecuador", "EG": "Egypt", "SV": "El Salvador", "GQ": "Equatorial Guinea", "ER": "Eritrea", "EE": "Estonia", "ET": "Ethiopia", "FK": "Falkland Islands (Malvinas)", "FO": "Faroe Islands", "FJ": "Fiji", "FI": "Finland", "FR": "France", "GF": "French Guiana", "PF": "French Polynesia", "TF": "French Southern Territories", "GA": "Gabon", "GM": "Gambia", "GE": "Georgia", "DE": "Germany", "GH": "Ghana", "GI": "Gibraltar", "GR": "Greece", "GL": "Greenland", "GD": "Grenada", "GP": "Guadeloupe", "GU": "Guam", "GT": "Guatemala", "GG": "Guernsey", "GN": "Guinea", "GW": "Guinea-Bissau", "GY": "Guyana", "HT": "Haiti", "HM": "Heard Island and Mcdonald Islands", "VA": "Holy See (Vatican City State)", "HN": "Honduras", "HU": "Hungary", "IS": "Iceland", "IN": "India", "ID": "Indonesia", "IR": "Iran, Islamic Republic of", "IQ": "Iraq", "IE": "Ireland", "IM": "Isle of Man", "IL": "Israel", "IT": "Ita
ly", "JM": "Jamaica", "JP": "Japan", "JE": "Jersey", "JO": "Jordan", "KZ": "Kazakhstan", "KE": "Kenya", "KI": "Kiribati", "KP": "Korea, Democratic People's Republic of", "KR": "Korea, Republic of",
"KW": "Kuwait", "KG": "Kyrgyzstan", "LA": "Lao PDR", "LV": "Latvia", "LB": "Lebanon", "LS": "Lesotho", "LR": "Liberia", "LY": "Libya", "LI": "Liechtenstein", "LT": "Lithuania", "LU": "Luxembourg", "MK": "Macedonia, Republic of", "MG": "Madagascar", "MW": "Malawi", "MY": "Malaysia", "MV": "Maldives", "ML": "Mali", "MT": "Malta", "MH": "Marshall Islands", "MQ": "Martinique", "MR": "Mauritania", "MU": "Mauritius", "YT": "Mayotte", "MX": "Mexico", "FM": "Micronesia, Federated States of", "MD": "Moldova", "MC": "Monaco", "MN": "Mongolia", "ME": "Montenegro", "MS": "Montserrat", "MA": "Morocco", "MZ": "Mozambique", "MM": "Myanmar", "NA": "Namibia", "NR": "Nauru", "NP": "Nepal", "NL": "Netherlands", "AN": "Netherlands Antilles", "NC": "New Caledonia", "NZ": "New Zealand", "NI": "Nicaragua", "NE": "Niger", "NG": "Nigeria", "NU": "Niue", "NF": "Norfolk Island", "MP": "Northern Mariana Islands", "NO": "Norway", "OM": "Oman", "PK": "Pakistan", "PW": "Palau", "PS": "Palestinian Territory, Occupied", "PA": "Panama", "PG": "Papua New Guinea", "PY": "Paraguay", "PE": "Peru", "PH": "Philippines", "PN": "Pitcairn", "PL": "Poland", "PT": "Portugal", "PR": "Puerto Rico", "QA": "Qatar", "RE": "Réunion", "RO": "Romania", "RU": "Russian Federation", "RW": "Rwanda", "BL": "Saint-Barthélemy", "SH": "Saint Helena", "KN": "Saint Kitts and Nevis", "LC": "Saint Lucia", "MF": "Saint-Martin (French part)", "PM": "Saint Pierre and Miquelon", "VC": "Saint Vincent and Grenadines", "WS": "Samoa", "SM": "San Marino", "ST": "Sao Tome and Principe", "SA": "Saudi Arabia", "SN": "Senegal", "RS": "Serbia", "SC": "Seychelles", "SL": "Sierra Leone", "SG": "Singapore", "SK": "Slovakia", "SI": "Slovenia", "SB": "Solomon Islands", "SO": "Somalia", "ZA": "South Africa", "GS": "South Georgia and the South Sandwich Islands", "SS": "South Sudan", "ES": "Spain", "LK": "Sri Lanka", "SD": "Sudan", "SR": "Suriname", "SJ": "Svalbard and Jan Mayen Islands", "SZ": "Swaziland", "SE": "Sweden", "CH": "Switzerland", "SY": "Syrian Arab Republic (Syria)", "TW": "Taiwan, Republic of China", "TJ": "Tajikistan", "TZ": "Tanzania, United Republic of", "TH": "Thailand", "TL": "Timor-Leste", "TG": "Togo", "TK": "Tokelau", "TO": "Tonga", "TT": "Trinidad and Tobago", "TN": "Tunisia", "TR": "Turkey", "TM": "Turkmenistan", "TC": "Turks and Caicos Islands", "TV": "Tuvalu", "UG": "Uganda", "UA": "Ukraine", "AE": "United Arab Emirates", "GB": "United Kingdom", "US": "United States of America", "UM": "United States Minor Outlying Islands", "UY": "Uruguay", "UZ": "Uzbekistan", "VU": "Vanuatu", "VE": "Venezuela (Bolivarian Republic of)", "VN": "Viet Nam", "VI": "Virgin Islands, US", "WF": "Wallis and Futuna Islands", "EH": "Western Sahara", "YE": "Yemen", "ZM": "Zambia", "ZW": "Zimbabwe" } ISO3166_2_A2_TO_ISO3166_2_A3 = { "AF": "AFG", "AX": "ALA", "AL": "ALB", "DZ": "DZA", "AS": "ASM", "AD": "AND", "AO": "AGO", "AI": "AIA", "AQ": "ATA", "AG": "ATG", "AR": "ARG", "AM": "ARM", "AW": "ABW", "AU": "AUS", "AT": "AUT", "AZ": "AZE", "BS": "BHS", "BH": "BHR", "BD": "BGD", "BB": "BRB", "BY": "BLR", "BE": "BEL", "BZ": "BLZ", "BJ": "BEN", "BM": "BMU", "BT": "BTN", "BO": "BOL", "BA": "BIH", "BW": "BWA", "BV": "BVT", "BR": "BRA", "VG": "VGB", "IO": "IOT", "BN": "BRN", "BG": "BGR", "BF": "BFA", "BI": "BDI", "KH": "KHM", "CM": "CMR", "CA": "CAN", "CV": "CPV", "KY": "CYM", "CF": "CAF", "TD": "T
pyseed/objify
objify/test/test_core.py
Python
mit
13,959
0.002651
#!/usr/bin/env python # -*- coding: utf-8 -*- """ type 'pytest -v' to run u test series """ import codecs import json import os import pytest import tempfile import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) import core class TestHelpers: @staticmethod def file_read(fd): """ from current descript
or, create a new descriptor in read mode and read file content :return file content :rtype str """ with codecs.open(fd.name, 'r', 'utf-8') as f: res = f.read() return res @staticmethod def same_class(instance, should_cls): return str(instance.__class__) == str(should_cls) @staticmetho
d def json_compare(a, b): return json.dumps(a) == json.dumps(b) class Fake(object): pass @pytest.fixture() def fixture_dict_data(): return dict( id=1, active=True, name='foobar', nested=dict(id=1, name='nested'), items=[dict(id=1, name='item1'),], ) def fixture_dict_data_check_matching(o, data): assert o # check __dict__ assert isinstance(o.__dict__, dict) assert len(o.__dict__.keys()) == len(data.keys()) assert o.__dict__['id'] == data['id'] assert o.__dict__['active'] == data['active'] assert o.__dict__['name'] == data['name'] assert TestHelpers.same_class(o.__dict__['nested'], core.Object) assert len(o.__dict__['nested'].__dict__.keys()) == len(data['nested'].keys()) assert o.__dict__['nested'].__dict__['id'] == data['nested']['id'] assert o.__dict__['nested'].__dict__['name'] == data['nested']['name'] assert isinstance(o.__dict__['items'], list) assert len(o.__dict__['items']) == len(data['items']) assert TestHelpers.same_class(o.__dict__['items'][0], core.Object) assert o.__dict__['items'][0].__dict__['id'] == data['items'][0]['id'] assert o.__dict__['items'][0].__dict__['name'] == data['items'][0]['name'] # check attrs assert hasattr(o, 'id') assert hasattr(o, 'active') assert hasattr(o, 'name') assert hasattr(o, 'nested') assert hasattr(o, 'items') assert o.id == data['id'] assert o.active == data['active'] assert o.name == data['name'] assert TestHelpers.same_class(o.nested, core.Object) assert hasattr(o.nested, 'id') assert hasattr(o.nested, 'name') assert o.nested.id == data['nested']['id'] assert o.nested.name == data['nested']['name'] assert isinstance(o.items, list) assert len(o.items) == len(data['items']) assert hasattr(o.items[0], 'id') assert hasattr(o.items[0], 'name') assert o.items[0].id == data['items'][0]['id'] assert o.items[0].name == data['items'][0]['name'] @pytest.fixture() def fixture_json_data(): return json.dumps(dict( id=1, active=True, name='foobar', nested=dict(id=1, name='nested'), items=[dict(id=1, name='item1')], )) @pytest.fixture() def fixture_repr_data(): return "<class 'core.Object'>, 5 attrs: active: True, id: 1," \ " items: [<class 'core.Object'>, 2 attrs: id: 1, name: 'item1']," \ " name: 'foobar', nested: <class 'core.Object'>, 2 attrs: id: 1, name: 'nested'" @pytest.fixture() def fixture_str_data(): return "<class 'core.Object'>, 5 attrs: active: True, id: 1," \ " items: [{'id': 1, 'name': 'item1'}], name: 'foobar'," \ " nested: {'id': 1, 'name': 'nested'}" @pytest.fixture() def fixture_update_merge_data(): return { 'data': {'foo': {'bar': {'message': 'foobar'}}}, 'data2': { 'foo': {'bar': {'color': 'green'}}, 'foo2': {'bar': {'message': 'foobar 2', 'color': 'orange'}}, }, 'merge': { 'foo': {'bar': {'message': 'foobar', 'color': 'green'}}, 'foo2': {'bar': {'message': 'foobar 2', 'color': 'orange'}}, }, } @pytest.fixture() def fixture_config_file(request): fd = tempfile.NamedTemporaryFile(mode='w', suffix='.ini', delete=False) with fd: fd.write(""" [foo] foo1=Fee foo2=Fie [bar] bar1=Foe bar2=Foo """) def delete(): if not fd.closed: fd.close() os.remove(fd.name) request.addfinalizer(delete) return fd @pytest.fixture() def fixture_config_file_expected_data(): return dict( foo=dict(foo1='Fee', foo2='Fie'), bar=dict(bar1='Foe', bar2='Foo'), ) class Test01ObjectContract(): def test_00_of_class_ko(self): assert not core.Object.of_class(None) assert not core.Object.of_class(False) assert not core.Object.of_class(True) assert not core.Object.of_class(1) assert not core.Object.of_class('a') assert not core.Object.of_class(object()) assert not core.Object.of_class(Fake()) class Test02ObjectConstructor(): def test_00_contract_ko(self): with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT): core.Object(False) with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT): core.Object(True) with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT): core.Object(1) with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT): core.Object('a') with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT): core.Object(object()) def test_01_empty(self): o = core.Object() assert o # alias o = core.o() assert o o = core.Object(None) assert o o = core.Object(dict()) assert o def test_02_of_class(self): assert core.Object.of_class(core.Object()) assert core.Object.of_class(core.o()) @pytest.mark.usefixtures('fixture_dict_data') def test_03_from_dict(self, fixture_dict_data): fixture_dict_data_check_matching(core.Object(fixture_dict_data), fixture_dict_data) fixture_dict_data_check_matching(core.o(fixture_dict_data), fixture_dict_data) # get_dict: will be used for following test so at serie start @pytest.mark.usefixtures('fixture_dict_data') def test_04_get_dict(self, fixture_dict_data): o = core.o(fixture_dict_data) assert o.get_dict() == fixture_dict_data def test_05_kwargs(self): o = core.o(id=1, name='kwarg') assert o.get_dict() == dict(id=1, name='kwarg') o = core.o(dict(), id=1, name='kwarg') assert o.get_dict() == dict(id=1, name='kwarg') o = core.o(dict(description='from dict'), id=1, name='kwarg') assert o.get_dict() == dict(description='from dict', id=1, name='kwarg') class Test02ObjectUpdateContent(): @pytest.mark.usefixtures('fixture_dict_data') def test_00_setattr(self, fixture_dict_data): o = core.o(fixture_dict_data) # change exiting attribute o.name = 'changed' assert o.name == 'changed' o.nested.name = 'changed2' assert o.nested.name == 'changed2' o.items[0].name = 'changed3' assert o.items[0].name == 'changed3' # new attribute o.description = 'description' assert o.description == 'description' o.nested2 = core.o(dict(id=2, name='nested2')) assert o.nested2.id == 2 assert o.nested2.name == 'nested2' o.nested3 = core.o() o.nested3.id = 3 assert o.nested3.id == 3 o.items2 = [core.o(dict(id=2, name='item2'))] assert o.items2[0].id == 2 assert o.items2[0].name == 'item2' @pytest.mark.usefixtures('fixture_update_merge_data') def test_01_update(self, fixture_update_merge_data): with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_SELF): core.o().update(1) data = fixture_update_merge_data['data'] data2 = fixture_update_merge_data['data2'] o = core.o(data) assert o.get_dict() == data o.update(data2) assert o.get_dict() == data2 assert core.o(data).update(data2).get_dict() == data2 # chained style o = cor
jokajak/itweb
data/env/lib/python2.6/site-packages/tw.forms-0.9.9-py2.6.egg/tw/forms/__init__.py
Python
gpl-3.0
578
0.00173
""" Form widgets for ToscaWidgets. To download and install:: easy_install twForms """ from tw.api import Widget from tw.forms.core import * from tw.forms.fields import * from tw.form
s.datagrid import * from tw.forms.calendars import * # build all so doc tools introspect me properly from tw.forms.core import __all__ as __core_all from tw.forms.fields import __all__ as __fields_all from tw.forms.datagrid import __all__ as __datagrid_all from tw.forms.calendars import __all__ as __calendars_all __all__ = __core_all + __fields_all + __datagrid_all + __calendars_al
l
tinkerinestudio/Tinkerine-Suite
TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_plugins/craft_plugins/hop.py
Python
agpl-3.0
8,736
0.025412
""" This page is in the table of contents. Hop is a script to raise the extruder when it is not extruding. Note: Note: In some cases where you have thin overhang this plugin can help solve the problem object being knocked off by the head The hop manual page is at: http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Hop ==Operation== The default 'Activate Hop' checkbox is off. It is off because Vik and Nophead found better results without hopping. Numerous users reported better output without this plugin hence it is off by default. When activated the extruder will hop when traveling. When it is off, nothing will be done. ==Settings== ===Hop Over Layer Thickness=== Default is one. Defines the ratio of the hop height over the layer height, this is the most important hop setting. ===Minimum Hop Angle=== Default is 20 degrees. Defines the minimum angle that the path of the extruder will be raised. An angle of ninety means that the extruder will go straight up as soon as it is not extruding and a low angle means the extruder path will gradually rise to the hop height. ==Examples== The following examples hop the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and hop.py. > python hop.py This brings up the hop dialog. > python hop.py Screw Holder Bottom.stl The hop tool is parsing the file: Screw Holder Bottom.stl .. The hop tool has created the file: .. Screw Holder Bottom_hop.gcode """ from __future__ import absolute_import from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities import archive from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_utilities import skeinforge_craft from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import math import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getCraftedText( fileName, text, hopRepository = None ): "Hop a gcode linear move text." return getCraftedTextFromText( archive.getTextIfEmpty(fileName, text), hopRepository ) def getCraftedTextFromText( gcodeText, hopRepository = None ): "Hop a gcode linear move text." if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'hop'): return gcodeText if hopRepository == None: hopRepository = settings.getReadRepository( HopRepository() ) if not hopRepository.activateHop.value: return gcodeText return HopSkein().getCraftedGcode( gcodeText, hopRepository ) def getNewRepository(): 'Get new repository.' return HopRepository() def writeOutput(fileName, shouldAnalyze=True): "Hop a gcode linear move file. Chain hop the gcode if it is not already hopped." skeinforge_craft.writeChainTextWithNounMessage(fileName, 'hop', shouldAnalyze) class HopRepository(object): "A class to handle the hop settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.hop.html', self ) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Hop', self, '') self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Hop') self.activateHop = settings.BooleanSetting().getFromValue('Activate Hop', self, False ) self.hopOverLayerThickness = settings.FloatSpin().getFromValue( 0.5, 'Hop Over Layer Thickness (ratio):', self, 1.5, 1.0 ) self.minimumHopAngle = settings.FloatSpin().getFromValue( 20.0, 'Minimum Hop Angle (degrees):', self, 60.0, 30.0 ) self.executeTitle = 'Hop' def execute(self): "Hop button has been clicked." fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled) for fileName in fileNames: writeOutput(fileName) class HopSkein(object): "A class to hop a skein of extrusions." def __init__(self): 'Initialize' self.distanceFeedRate = gcodec.DistanceFeedRate() self.extruderActive = False self.feedRateMinute = 961.0 self.hopHei
ght = 0.4 self.hopDistance = self.hopHeight sel
f.justDeactivated = False self.layerCount = settings.LayerCount() self.lineIndex = 0 self.lines = None self.oldLocation = None def getCraftedGcode( self, gcodeText, hopRepository ): "Parse gcode text and store the hop gcode." self.lines = archive.getTextLines(gcodeText) self.minimumSlope = math.tan( math.radians( hopRepository.minimumHopAngle.value ) ) self.parseInitialization( hopRepository ) for self.lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[self.lineIndex] self.parseLine(line) return self.distanceFeedRate.output.getvalue() def getHopLine(self, line): "Get hopped gcode line." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) self.feedRateMinute = gcodec.getFeedRateMinute( self.feedRateMinute, splitLine ) if self.extruderActive: return line location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) highestZ = location.z if self.oldLocation != None: highestZ = max( highestZ, self.oldLocation.z ) highestZHop = highestZ + self.hopHeight locationComplex = location.dropAxis() if self.justDeactivated: oldLocationComplex = self.oldLocation.dropAxis() distance = abs( locationComplex - oldLocationComplex ) if distance < self.minimumDistance: if self.isNextTravel() or distance == 0.0: return self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop ) alongRatio = min( 0.41666666, self.hopDistance / distance ) oneMinusAlong = 1.0 - alongRatio closeLocation = oldLocationComplex * oneMinusAlong + locationComplex * alongRatio self.distanceFeedRate.addLine( self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop ) ) if self.isNextTravel(): return self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop ) farLocation = oldLocationComplex * alongRatio + locationComplex * oneMinusAlong self.distanceFeedRate.addGcodeMovementZWithFeedRate( self.feedRateMinute, farLocation, highestZHop ) return line if self.isNextTravel(): return self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop ) return line def isNextTravel(self): "Determine if there is another linear travel before the thread ends." for afterIndex in xrange( self.lineIndex + 1, len(self.lines) ): line = self.lines[ afterIndex ] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if firstWord == 'G1': return True if firstWord == 'M101': return False return False def parseInitialization( self, hopRepository ): 'Parse gcode initialization and store the parameters.' for self.lineIndex in xrange(len(self.lines)): line = self.lines[self.lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) self.distanceFeedRate.parseSplitLine(firstWord, splitLine) if firstWord == '(<layerHeight>': layerHeight = float(splitLine[1]) self.hopHeight = hopRepository.hopOverLayerThickness.value * layerHeight self.hopDistance = self.hopHeight / self.minimumSlope self.minimumDistance = 0.5 * layerHeight elif firstWord == '(</extruderInitialization>)': self.distanceFeedRate.addTagBracketedProcedure('hop') return self.distanceFeedRate.addLine(line) def parseLine(self, line): "Parse a gcode line and add it to the bevel gcode." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if self.distanceFeedRate.getIsAlteration(line): return if firstWord == 'G1': line = self.getHopLine(line) self.oldLocation = gcodec.getLocationFromSpl
welex91/ansible-modules-core
windows/win_reboot.py
Python
gpl-3.0
1,679
0.00536
DOCUMENTATION=''' --- module: win_reboot short_description: Reboot a windows machine description: - Reboot a Windows machine, wait for it to go down, come back up, and respond to commands. version_added: "2.1" options: pre_reboot_delay_sec: description: - Seconds for shutdown to wait before requesting reboot default: 2 shutdown_timeout_sec
: description: - Maximum seconds to wait for shutdown to occur - Increase this timeout for very slow hardware, large update applica
tions, etc default: 600 reboot_timeout_sec: description: - Maximum seconds to wait for machine to re-appear on the network and respond to a test command - This timeout is evaluated separately for both network appearance and test command success (so maximum clock time is actually twice this value) default: 600 connect_timeout_sec: description: - Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again default: 5 test_command: description: - Command to expect success for to determine the machine is ready for management default: whoami author: - Matt Davis (@nitzmahone) ''' EXAMPLES=''' # unconditionally reboot the machine with all defaults - win_reboot: # apply updates and reboot if necessary - win_updates: register: update_result - win_reboot: when: update_result.reboot_required # reboot a slow machine that might have lots of updates to apply - win_reboot: shutdown_timeout_sec: 3600 reboot_timeout_sec: 3600 ''' RETURNS=''' rebooted: description: true if the machine was rebooted returned: always type: boolean sample: true '''
Yurlungur/FLRW
MultiRegime.py
Python
mit
2,168
0.01476
#!/usr/bin/env python2 # multi-regime.py # Author: Jonah Miller (jonah.maxwell.miller@gmail.com) # Time-stamp: <2013-12-14 16:06:28 (jonah)> # This is a library to plot and fit for omega(rho) variable. We choose # omega so that we get the three distinct regimes for which we know # the analytic solution with continuous transition regions. # Imports # ---------------------------------------------------------------------- import numpy as np import mat
plotlib as mpl import matplotlib.pyplot as plt import plot_all_variables as pav from scipy.special import erf # ---------------------------------------------------------------------- # Constants # ---------------------------------------------------------------------- RHO_MIN=0 OMEGA_MIN = -1 ERF_MIN = -1 RHO_DARK_ENERGY = [10,20,25,37] RHO_MATTER = [20,40,80,92] RHO_RADIATION = 200 TRANSITION_WIDTH_DE=[1,5,12
,20] TRANSITION_WIDTH_MR=[1,5,12,20] NUM_ERFS = 2 DE_AMPLITUDE = 1.0/2.0 MATTER_AMPLITUDE = (1.0/3.0)* DE_AMPLITUDE XLABEL=r'$\rho$' YLABEL=r'$\omega$' # ---------------------------------------------------------------------- def omega(rho, rho_dark_energy,transition_width_de, rho_matter,transition_width_mr): return OMEGA_MIN \ + DE_AMPLITUDE + MATTER_AMPLITUDE\ + DE_AMPLITUDE*erf((rho - rho_dark_energy)/transition_width_de)\ + MATTER_AMPLITUDE*erf((rho - rho_matter)/transition_width_mr) def plot_rho(): mpl.rcParams.update({'font.size': pav.fontsize}) x = np.linspace(0,RHO_RADIATION,100) ys = [omega(x,RHO_DARK_ENERGY[i],TRANSITION_WIDTH_DE[i], RHO_MATTER[i],TRANSITION_WIDTH_MR[i]) for i in range(len(RHO_DARK_ENERGY))] lines = [plt.plot(x,y,linewidth=pav.my_linewidth) for y in ys] plt.axis([RHO_MIN,RHO_RADIATION, 1.1*OMEGA_MIN, 1.1*(OMEGA_MIN + 2*(DE_AMPLITUDE + MATTER_AMPLITUDE))]) plt.xlabel(XLABEL) plt.ylabel(YLABEL) plt.legend(["Abrupt transition", "Moderate transition", "Mild transition", "No well-defined regimes"], loc=4) plt.show() return if __name__ == "__main__": plot_rho()
laurent-george/weboob
modules/feedly/browser.py
Python
agpl-3.0
4,193
0.000954
# -*- coding: utf-8 -*- # Copyright(C) 2014 Bezleputh # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.tools.json import json from weboob.capabilities.base import UserError from weboob.capabilities.collection import Collection from weboob.browser import LoginBrowser, URL, need_login from .pages import EssentialsPage, TokenPage, ContentsPage, PreferencesPage, MarkerPage __all__ = ['FeedlyBrowser'] class FeedlyBrowser(LoginBrowser): BASEURL = 'http://www.feedly.com/' essentials = URL('http://s3.feedly.com/essentials/essentials_fr.json', EssentialsPage) token = URL('v3/auth/token', TokenPage) contents = URL('v3/streams/contents', ContentsPage) preferences = URL('v3/preferences', PreferencesPage) marker = URL('v3/markers', MarkerPage) def __init__(self, username, password, login_browser, *args, **kwargs): super(FeedlyBrowser, self).__init__(username, password, *args, **kwargs) self.user_id = None self.login_browser = login_browser def do_login(self): if self.login_browser: if self.login_browser.code is None or self.user_id is None: self.login_browser.do_login() params = {'code': self.login_browser.code, 'client_id': 'feedly', 'client_secret': '0XP4XQ07VVMDWBKUHTJM4WUQ', 'redirect_uri': 'http://dev.feedly.com/feedly.html', 'grant_type': 'authorization_code'} token, self.user_id = self.token.go(data=params).get_token() self.session.headers['X-Feedly-Access-Token'] = token else: raise UserError(r'You need to fill your username and password to access this page') @need_login def iter_threads(self): params = {'streamId': 'user/%s/category/global.all' % self.user_id, 'unreadOnly': 'true', 'ranked': 'newest', 'count': '100'} return self.contents.go(params=params).get_articles() @need_login def get_unread_feed(self, url): params = {'streamId': url, 'backfill': 'true', 'boostMustRead': 'true', 'unreadOnly': 'true'} return self.contents.go(params=params).get_articles() def get_categories(self): if self.username is not None a
nd self.password is not None: return self.get_logged_categories() return self.essentials.go().get_categories() @need_login def get_logged_categories(self): user_categories = list(se
lf.preferences.go().get_categories()) user_categories.append(Collection([u'global.saved'], u'Saved')) return user_categories def get_feeds(self, category): if self.username is not None and self.password is not None: return self.get_logged_feeds(category) return self.essentials.go().get_feeds(category) @need_login def get_logged_feeds(self, category): if category == 'global.saved': type = 'tag' else: type = 'category' url = 'user/%s/%s/%s' % (self.user_id, type, category) return self.get_unread_feed(url) def get_feed_url(self, category, feed): return self.essentials.go().get_feed_url(category, feed) @need_login def set_message_read(self, _id): datas = {'action': 'markAsRead', 'type': 'entries', 'entryIds': [_id]} self.marker.open(data=json.dumps(datas))
python-frederick/hackathon-2016
hackathon/wsgi.py
Python
bsd-2-clause
395
0
""" WSGI config for hackathon project. It exposes the WSGI callable as a module-level variable named ``application`
`. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hackathon.settings")
application = get_wsgi_application()
Anatoscope/sofa
applications/plugins/SofaPython/python/SofaPython/script.py
Python
lgpl-2.1
2,609
0.010732
'''simpler & deprecated python script controllers''' import Sofa import inspect def deprecated(cls): # TODO maybe we should print a backtrace to locate the origin? # or even better, use: https://docs.python.org/2/library/warnings.html#warnings.warn line = '''class `{0}` from module `{1}` is deprecated. You may now derive from `Sofa.PythonScriptController` and instantiate derived classes directly.'''.format(cls.__name__, cls.__module__) Sofa.msg_deprecated('SofaPython', line) Sofa.msg_deprecated('SofaPython', 'note: `createGraph` will no longer be called automatically. You need to call manually from __init__ instead.') Sofa.msg_deprecated('SofaPython', 'note: `onLoaded` will no longer be called automatically. You need to call manually from __init__ instead.') # uncomment to get the location where the deprecated class is created # import traceback; traceback.print_stack() return cls @deprecated class Controller(Sofa.PythonScriptController): # to stack data for recursive creations of Controllers instances = [] kwargs = [] def __new__(cls, node, name='pythonScriptController', filename='', **kwarg): """ :param filename: you may have to define it (at least once) to create a controller for which the class is defined in an external file. Be aware the file will then be read several times. """ # temporary variable to store optional arguments Controller.kwargs.append( kwarg ) node.createObject('PythonScriptController', filename = filename, classname = cls.__name__, name = name) # note the previous calls callbacks onLoaded and createGraph
try: return Controller.instances.pop() # let's trust the garbage collector except AttributeError: # if this fails, you need to call # Controller.onLoaded(self, node) in
derived classes print "[SofaPython.script.Controller.__new__] instance not found, did you call 'SofaPython.script.Controller.onLoaded' on your overloaded 'onLoaded' in {} ?".format(cls) raise def onLoaded(self, node): Controller.instances.append(self) self.additionalArguments(Controller.kwargs.pop()) # let's trust the garbage collector def additionalArguments(self,kwarg): """ to handle optional constructor arguments before createGraph """ pass
django-danceschool/django-danceschool
danceschool/core/migrations/0037_remove_registration_expirationdate.py
Python
bsd-3-clause
1,183
0.002536
# Generated by Django 2.2.17 on 2021-02-02 03:43 from django.db import migrations, models
import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0036_fix_reg_invoice_link'), ] operations = [ migrations.AlterField( model_name='registration', name='final', field=models.BooleanField(null=False, default=False, verbose_name='Registration has been finalized'), ),
migrations.AlterField( model_name='eventregistration', name='invoiceItem', field=models.OneToOneField(null=False, on_delete=django.db.models.deletion.CASCADE, related_name='eventRegistration', to='core.InvoiceItem', verbose_name='Invoice item'), ), migrations.AlterField( model_name='registration', name='invoice', field=models.OneToOneField(null=False, on_delete=django.db.models.deletion.CASCADE, related_name='registration', to='core.Invoice', verbose_name='Invoice'), ), migrations.RemoveField( model_name='registration', name='expirationDate', ), ]
depet/scikit-learn
sklearn/ensemble/__init__.py
Python
bsd-3-clause
1,055
0
""" The :mod:`sklearn.ensemble` module includes ensemble-based methods for classification and regression. """ from .base import BaseEnsemble from .forest import RandomForestClassifier from .forest import RandomForestRegressor from .forest import RandomTreesEmbedding from .forest import ExtraTreesClassifier from .forest import ExtraTreesRegressor from .weight_boosting import AdaBoostC
lassifier from .weight_boosting import AdaBoostRegressor from .gradient_boosting import GradientBoostingClassifier from .gradient_boosting import GradientBoostingRegressor from . import forest from . import weight_boosting from . import gradient_boosting from . import partial_dependence __all__ = ["BaseEnsemble", "RandomForestClassifier", "RandomForestRegressor", "RandomTreesEmbedding", "ExtraTreesClassifier", "ExtraTreesRegressor", "GradientBoost
ingClassifier", "GradientBoostingRegressor", "AdaBoostClassifier", "AdaBoostRegressor", "forest", "gradient_boosting", "partial_dependence", "weight_boosting"]
skycucumber/Messaging-Gateway
webapp/venv/lib/python2.7/site-packages/twisted/names/tap.py
Python
gpl-2.0
4,832
0.001863
# -*- test-case-name: twisted.names.test.test_tap -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Domain Name Server """ import os, traceback from twisted.python import usage from twisted.names import dns from twisted.application import internet, service from twisted.names import server from twisted.names import authority from twisted.names import secondary class Options(usage.Options): optParameters = [ ["interface", "i", "", "The interface to which to bind"], ["port", "p", "53", "The port on which to listen"], ["resolv-conf", None, None, "Override location of resolv.conf (implies --recursive)"], ["hosts-file", None, None, "Perform lookups with a hosts file"], ] optFlags = [ ["cache", "c", "Enable record caching"], ["recursive", "r", "Perform recursive lookups"], ["verbose", "v", "Log verbosely"], ] compData = usage.Completions( optActions={"interface" : usage.CompleteNetInterfaces()} ) zones = None zonefiles = None def __init__(self): usage.Options.__init__(self) self['verbose'] = 0 self.bindfiles = [] self.zonefiles = [] self.secondaries = [] def opt_pyzone(self, filename): """Specify the filename of a Python syntax zone definition""" if not os.path.exists(filename): raise usage.UsageError(filename + ": No such file") self.zonefiles.append(filename) def opt_bindzone(self, filename): """Specify the filename of a BIND9 syntax zone definition""" if not os.path.exists(filename): raise usage.UsageError(filename + ": No such file") self.bindfiles.append(filename) def opt_secondary(self, ip_domain): """Act as secondary for the specified domain, performing zone transfers from the specified IP (IP/domain) """ args = ip_domain.split('/', 1) if len(args) != 2: raise usage.UsageError("Argument must be of the form IP[:port]/domain") address = args[0].split(':') if len(address) == 1: address = (address[0], dns.PORT) else: try: port = int(address[1]) except ValueError: raise usage.UsageError( "Specify an integer port number, not %r" %
(address[1],)) address = (address[0], port) self.secondaries.append((address, [args[1]])) def opt_verbose(self): """Increment verbosity level""" self['verbose'] += 1 def postOptions(self): if self[
'resolv-conf']: self['recursive'] = True self.svcs = [] self.zones = [] for f in self.zonefiles: try: self.zones.append(authority.PySourceAuthority(f)) except Exception: traceback.print_exc() raise usage.UsageError("Invalid syntax in " + f) for f in self.bindfiles: try: self.zones.append(authority.BindAuthority(f)) except Exception: traceback.print_exc() raise usage.UsageError("Invalid syntax in " + f) for f in self.secondaries: svc = secondary.SecondaryAuthorityService.fromServerAddressAndDomains(*f) self.svcs.append(svc) self.zones.append(self.svcs[-1].getAuthority()) try: self['port'] = int(self['port']) except ValueError: raise usage.UsageError("Invalid port: %r" % (self['port'],)) def _buildResolvers(config): """ Build DNS resolver instances in an order which leaves recursive resolving as a last resort. @type config: L{Options} instance @param config: Parsed command-line configuration @return: Two-item tuple of a list of cache resovers and a list of client resolvers """ from twisted.names import client, cache, hosts ca, cl = [], [] if config['cache']: ca.append(cache.CacheResolver(verbose=config['verbose'])) if config['hosts-file']: cl.append(hosts.Resolver(file=config['hosts-file'])) if config['recursive']: cl.append(client.createResolver(resolvconf=config['resolv-conf'])) return ca, cl def makeService(config): ca, cl = _buildResolvers(config) f = server.DNSServerFactory(config.zones, ca, cl, config['verbose']) p = dns.DNSDatagramProtocol(f) f.noisy = 0 ret = service.MultiService() for (klass, arg) in [(internet.TCPServer, f), (internet.UDPServer, p)]: s = klass(config['port'], arg, interface=config['interface']) s.setServiceParent(ret) for svc in config.svcs: svc.setServiceParent(ret) return ret
druids/django-pyston
pyston/order/exceptions.py
Python
bsd-3-clause
175
0
class Or
derError(Exception): pass class
OrderIdentifierError(OrderError): """ Order exception that is raised if order identifier was not found. """ pass
Ebag333/Pyfa
service/damagePattern.py
Python
gpl-3.0
2,873
0.000348
# ============================================================================= # Copyright (C) 2010 Diego Duclos # # This file is part of pyfa. # # pyfa is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pyfa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyfa. If not, see <http://www.gnu.org/licenses/>. # ============================================================================= import copy import eos.db from eos.saveddata.damagePattern import DamagePattern as es_DamagePattern class ImportError(Exception): pass class DamagePattern(): instance = None @classmethod def getInstance(cls): if cls.instance is None: cls.instance = DamagePattern() return cls.instance def getDamagePatternList(self): return eos.db.getDamagePatternList() def getDamagePattern(self, name): return eos.db.getDamagePattern(name) def newPattern(self, name): p = es_DamagePattern(0, 0, 0, 0) p.name = name eos.db.save(p) return p def renamePattern(self, p, newName): p.name = newName eos.db.save(p) def deletePattern(self, p): eos.db.remove(p) def copyPattern(self, p): newP = copy.deepcopy(p) eos.db.save(newP) return newP def
saveChanges(self, p):
eos.db.save(p) def importPatterns(self, text): lookup = {} current = self.getDamagePatternList() for pattern in current: lookup[pattern.name] = pattern imports, num = es_DamagePattern.importPatterns(text) for pattern in imports: if pattern.name in lookup: match = lookup[pattern.name] match.__dict__.update(pattern.__dict__) else: eos.db.save(pattern) eos.db.commit() lenImports = len(imports) if lenImports == 0: raise ImportError("No patterns found for import") if lenImports != num: raise ImportError("%d patterns imported from clipboard; %d had errors" % (num, num - lenImports)) def exportPatterns(self): patterns = self.getDamagePatternList() for i in xrange(len(patterns) - 1, -1, -1): if patterns[i].name in ("Uniform", "Selected Ammo"): del patterns[i] patterns.sort(key=lambda p: p.name) return es_DamagePattern.exportPatterns(*patterns)
GreenJoey/My-Simple-Programs
python/Twisted/krondo Twisted Introduction/twisted-client-2/get-poetry-stack.py
Python
gpl-2.0
3,806
0.001051
# This is the Twisted Get Poetry Now! client, version 2.0 import datetime import optparse import os import traceback from twisted.internet.protocol import Protocol, ClientFactory def parse_args(): usage = """usage: %prog [options] [hostname]:port ... This is the Get Poetry Now! client, Twisted version 2.0. Run it like this: python get-poetry.py port1 port2 port3 ... If you are in the base directory of the twisted-intro package, you could run it like this: python twisted-client-2/get-poetry.py 10001 10002 10003 to grab poetry from servers on ports 10001, 10002, and 10003. Of course, there need to be servers listening on those ports for that to work. """ parser = optparse.OptionParser(usage) _, addresses = parser.parse_args() if not addresses: print(parser.format_help()) parser.exit() def parse_address(addr): if ':' not in addr: host = '127.0.0.1' port = addr else: host, port = addr.split(':', 1) if not port.isdigit(): parser.error('Ports must be integers.') return host, int(port) return list(map(parse_address, addresses)) class PoetryProtocol(Protocol): poem = b"" task_num = 0 def dataReceived(self, data): # Called whenever data is recieved from the transport self.poem += data traceback.print_stack() os._exit(0) def connectionLost(self, reason): self.poemRecevied(self.poem) def poemRecevied(self, poem): self.factory.poem_finished(self.task_num, poem) class PoetryClientFactory(ClientFactory): task_num = 1 # Initial task id protocol = PoetryProtocol # Tell the base-classes to use this protocol def __init__(self, poetry_count): self.poetry_count = poetry_count self.poems = {} # task_num -> poem def buildProtocol(self, address): # Create a object of the Protocol # The returned instance will handle input on an incoming server # connection, and an attribute "factory" pointing to the creating # factory. # Alternatively, L{None} may be returned to immediately close the # new connection. # Call the base-class's buildProtocol since our Protocol is basic proto = ClientFactory.buildProtocol(self, address) proto.task_num = self.task_num # Assign the new protocol its id self.task_num += 1 # Increment the id return proto # Return the built protocol def poem_finished(self, task_num=None, poem=None): if task_num is not None: self.poems[task_num] = poem self.poetry_count -= 1 if self.poetry_count == 0: self.report() from twisted.internet import reactor reactor.stop() def report(self): for i in self.poems: print("Task %d: %d bytes of poetry" %(i, len(self.poems[i]))) def clientConnectionFailed(self, connector, reason): print("Failed to con
nect to:", connector.getDestination()) self.poem_finished() if __name__ == '__main__': addresses = parse_args() start = datetime.datetime.now() factory = PoetryClientFactory(len(addresses)) from twisted.internet import reactor for address in addresses:
# Get the host and port from the returned tuples host, port = address # The .connectTCP method is of interest here. # It takes the host and port as first two parameter # And also a protocol factory to create the protocol objects on-demand reactor.connectTCP(host, port, factory) reactor.run() elapsed = datetime.datetime.now() - start print("Got %d poems in %s" %(len(addresses), elapsed))
bjolivot/ansible
lib/ansible/modules/network/avi/avi_analyticsprofile.py
Python
gpl-3.0
25,965
0.003967
#!/usr/bin/python # # Created on Aug 25, 2016 # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # Avi Version: 16.3.8 # # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_analyticsprofile author: Gaurav Rastogi (grastogi@avinetworks.com) short_description: Module for setup of AnalyticsProfile Avi RESTful Object description: - This module is used to configure AnalyticsProfile object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.3" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent","present"] apdex_response_threshold: description: - If a client receives an http response in less than the satisfactory latency threshold, the request is considered satisfied. - It is considered tolerated if it is not satisfied and less than tolerated latency factor multiplied by the satisfactory latency threshold. - Greater than this number and the client's request is considered frustrated. - Default value when not specified in API or module is interpreted by Avi Controller as 500. apdex_response_tolerated_factor: description: - Client tolerated response latency factor. - Client must receive a response within this factor times the satisfactory threshold (apdex_response_threshold) to be considered tolerated. - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. apdex_rtt_threshold: description: - Satisfactory client to avi round trip time(rtt). - Default value when not specified in API or module is interpreted by Avi Controller as 250. apdex_rtt_tolerated_factor: description: - Tolerated client to avi round trip time(rtt) factor. - It is a multiple of apdex_rtt_tolerated_factor. - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. apdex_rum_threshold: description: - If a client is able to load a page in less than the satisfactory latency threshold, the pageload is considered satisfied. - It is considered tolerated if it is greater than satisfied but less than the tolerated latency multiplied by satisifed latency. - Greater than this number and the client's request is considered frustrated. - A pageload includes the time for dns lookup, download of all http objects, and page render time. - Default value when not specified in API or module is interpreted by Avi Controller as 5000. apdex_rum_tolerated_factor: description: - Virtual service threshold factor for tolerated page load time (plt) as multiple of apdex_rum_threshold. - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. apdex_server_response_threshold: description: - A server http response is considered satisfied if latency is less than the satisfactory latency threshold. - The response is considered tolerated when it is greater than satisfied but less than the tolerated latency factor * s_latency. - Greater than this number and the server response is considered frustrated. - Default value when not specified in API or module is interpreted by Avi Controller as 400. apdex_server_response_tolerated_factor: description: - Server tolerated response latency factor. - Servermust response within this factor times the satisfactory threshold (apdex_server_response_threshold) to be considered tolerated. - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. apdex_server_rtt_threshold: description: - Satisfactory client to avi round trip time(rtt). - Default value when not specified in API or module is interpreted by Avi Controller as 125. apdex_server_rtt_tolerated_factor: description: - Tolerated client to avi round trip time(rtt) factor. - It is a multiple of apdex_rtt_tolerated_factor. - Default value when not specified in API or module is interpreted by Avi Controller as 4.0. client_log_config: description: - Clientlogconfiguration settings for analyticsprofile. conn_lossy_ooo_threshold: description: - A connection between client and avi is considered lossy when more than this percentage of out of order packets are received. - Default value when not specified in API or module is interpreted by Avi Controller as 50. conn_lossy_timeo_rexmt_threshold: description: - A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted due to timeout. - Default value when not specified in API or module is interpreted by Avi Controller as 20. conn_lossy_total_rexmt_threshold: description: - A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted. - Default value when not specified in API or module is interpreted by Avi Controller as 50. conn_lossy_zero_win_size_event_threshold: description: - A client connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold. - Default value when not specified in API or module is interpreted by Avi Controller as 2. conn_server_lossy_ooo_threshold: description: - A connection between avi and server is considered lossy when more than this percentage of out of order packets are received. - Default value when not specified in API or module is interpreted by Avi Controller as 50. conn_server_lossy_timeo_rexmt_threshold: description: - A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted d
ue to timeo
ut. - Default value when not specified in API or module is interpreted by Avi Controller as 20. conn_server_lossy_total_rexmt_threshold: description: - A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted. - Default value when not specified in API or module is interpreted by Avi Controller as 50. conn_server_lossy_zero_win_size_event_threshold: description: - A server connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold. - Default value when not specified in API or module is interpreted by Avi Controller as 2. description: description: - User defined description for the object. disable_se_analytics: description: - Disable node (service engine) level analytics forvs metrics. - Default value when not specified in API or module is interpreted by Avi Controller as False. disable_se
julien78910/CouchPotatoServer
version.py
Python
gpl-3.0
32
0.03125
V
ERSION
= None BRANCH = 'master'
zstackio/zstack-woodpecker
integrationtest/vm/mini/multiclusters/paths/multi_path173.py
Python
apache-2.0
2,948
0.018996
import zstackwoodpecker.test_state as ts_header import os TestAction = ts_header.TestAction def path(): return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[ [TestAction.create_mini_vm, 'vm1', 'cluster=cluster1'], [TestAction.reboot_vm, 'vm1'], [TestAction.create_mini_vm, 'vm2', 'cluster=cluster2'], [TestAction.create_vm_backup, 'vm2', 'vm2-backup1'], [TestAction.destroy_vm, 'vm2'], [TestAction.expunge_vm, 'vm2'], [TestAction.create_image_from_volume, 'vm1', 'vm1-image1'], [TestAction.poweroff_only, 'cluster=cluster1'], [TestAction.create_volume, 'volume1', 'cluster=cluster1', 'flag=scsi'], [TestAction.attach_volume, 'vm1', 'volume1'], [TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=thick,scsi'], [TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'], [TestAction.start_vm, 'vm1'], [TestAction.create_vm_backup, 'vm1', 'vm1-backup2'], [TestAction.stop_vm, 'vm1'], [TestAction.use_vm_backup, 'vm1-backup2'], [TestAction.delete_image, 'image2'], [TestAction.expunge_image, 'image2'], [TestAction.create_mini_vm, 'vm3', 'cluster=cluster2'], [TestAction.create_vm_backup, 'vm3', 'vm3-backup4'], [TestAction.destroy_vm, 'vm1'], [TestAction.poweroff_only, 'cluster=cluster2'], [TestAction.create_mini_vm, 'vm4', 'cluster=cluster1'], [TestAction.migrate_vm, 'vm4'], [TestAction.attach_volume, 'vm4', 'volume1'], [TestAction.detach_volume, 'volume1'], [TestAction.delete_volume, 'volume1'], [TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=scsi'], [TestAction.attach_volume, 'vm4', 'volume3'], [TestAction.create_volume_backup, 'volume3', 'volume3-backup5'], [TestAction.delete_volume_backup, 'volume3-backup5'], [TestAction.create_mini_vm, 'vm5', 'network=random', 'cluster=cluster2'], [TestAction.expunge_volume, 'volume1'], [TestAction.reboot_vm, 'vm5'], [TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=scsi'], [TestAction.attach_volume, 'vm4', 'volume4'], [TestAction.create_volume_backup, 'volume4', 'volume4-backup6'], [TestAction.resize_data_volume, 'volume2', 5*1024*1024], [TestAction.poweroff_only, 'cluster=cluster2'], [TestAction.attach_volume, 'vm5', 'volume2'], [TestAction.start_vm, 'vm5'], [TestAction.create_volume_backup, 'volume2', 'volume2-backup7'], [TestAction.stop_vm, 'vm5'], [TestAction.use_volume_backup, 'volume2-backup7'], ]) ''' The final status: Running:['vm4'] Stopped:['vm3', 'vm5'] Enadbled:['vm2-backup1', 'vm1-backup2', 'volume1-backup2', 'vm3-backup4', 'volume4-backup6', 'volume2-backup7', 'vm1-ima
ge1'] attached:['volume3', 'volume4', 'volume2'] Detached:[] Deleted:['vm1', 'volume3-backup5'] Expunged:['vm2', 'volume1', 'imag
e2'] Ha:[] Group: vm_backup2:['vm1-backup2', 'volume1-backup2']---vm1@volume1 vm_backup3:['vm3-backup4']---vm3@ vm_backup1:['vm2-backup1']---vm2@ '''
nisanick/Prisma_Machina
cogs/roleplay.py
Python
mit
45,290
0.004455
import asyncio import random import checks from TextChecker import TextChecker from database import Database from checks import * from data.rp_texts import * from data.links import * from web import Web from roleplay.Player import Player import discord from discord.ext import commands class Roleplay(commands.Cog): def __init__(self, bot: commands.Bot): self.parameters = {} self.bot = bot self.delta = 10 self.players = {} self.playerids = [] self.announce_message = None self.system_message = None self.turn_number = 0 self.prompt = None @commands.command(name='say', case_insensitive=True) @commands.check(checks.can_manage_rp) @commands.check(checks.in_say_channel) async def _say(self, ctx, channel: discord.TextChannel, *, message): """ *RP Moderator only* | *#rp-scripting only* | Sends a message as a bot in specified channel. """ await channel.send(message) @commands.command(name='dm', case_insensitive=True) @commands.check(checks.can_manage_rp) @commands.check(checks.in_say_channel) async def _dm(self, ctx, user: discord.User, *, message): """ *RP Moderator only* | *#rp-scripting only* | Sends a direct message as a bot to specified member. Use full name (name#number), ping or ID.
""" channel = user.dm
_channel if channel is None: await user.create_dm() channel = user.dm_channel await channel.send(message) await ctx.message.add_reaction('✅') @commands.command(name='rm', case_insensitive=True) @commands.check(checks.can_manage_rp) @commands.check(checks.in_say_channel) async def _remove_message(self, ctx, message: discord.Message): """ *RP Moderator only* | *#rp-scripting only* | Removes a specified message that the bot posted. Use message ID. """ if message.author.id == self.bot.user.id: await message.delete() else: to_delete = await ctx.send("Can't remove that message.") await asyncio.sleep(7) await to_delete.delete() if not isinstance(ctx.channel, discord.DMChannel): await ctx.message.delete() @commands.command(name='medit', case_insensitive=True) @commands.check(checks.can_manage_rp) @commands.check(checks.in_say_channel) async def _edit_message(self, ctx, message: discord.Message, *, text): """ *RP Moderator only* | *#rp-scripting only* | Edits a specified message that bot posted. Use message ID. """ await message.edit(content=text) @commands.group(name='rp', case_insensitive=True) async def _rp(self, ctx): """ Base command for RP utilities. Use `?help rp` for details. Mind that parameters [who] and [channel] are admin exclusive. Here is a list of all possible subcommands: """ await ctx.message.delete() if ctx.invoked_subcommand is None: await ctx.send("Subcommand required!") @_rp.command(name='turn') @commands.check(checks.can_manage_rp) async def _turn(self, ctx): """ *RP Moderator only* | Tells the bot to post used actions and start new turn. """ message = '**::TURN {}::**\n'.format(self.turn_number) message += 'Turn ended with these actions taking place:\n' message += '```\n' message += "{:^35}|{:^25}\n".format('Player', 'Action') message += "{:*^35}|{:*^25}\n".format('', '') for player_id, (player, action) in self.players.items(): player = self.system_message.guild.get_member(player_id) message += "{:<35}|{:<25}\n".format(player.nick or player.name, action or '<no action set>') message += '```\n' message += 'New turn has begun, please state your actions.' await self.bot.get_channel(326954112744816643).send(message) # await self.bot.get_channel(config.ANNOUNCE_CHANNEL).send(message) for player_id in self.playerids: player, action = self.players[player_id] self.players[player_id] = (player, None) self.turn_number += 1 await self.post_players(True) @_rp.command(name='start') @commands.check(checks.can_manage_rp) async def _start(self, ctx): """ *RP Moderator only* | Creates a new RP session if there is not one running already. Use `?help rp start` for more information about RP sessions. Players can join the session via `?rp join` Players are supposed to state their action with `?rp use` command each turn. Turns are ended by `?rp turn` command. Session is over when `?rp end` command is used. In case the session wasn't closed properly (bot crash, etc.) use `?rp clean` to reset it. """ announce_channel = self.bot.get_channel(326954112744816643) system_channel = self.bot.get_channel(374691520055345162) # announce_channel = self.bot.get_channel(config.ANNOUNCE_CHANNEL) # system_channel = self.bot.get_channel(config.ADMINISTRATION_CHANNEL) db = await Database.get_connection(self.bot.loop) insert = "INSERT INTO roleplay_session(announce_id, system_id) values ($1, $2)" select = "SELECT 1 FROM roleplay_session WHERE done is FALSE" async with db.transaction(): if await db.fetchval(select) is None: announce_message = await announce_channel.send("Session started. To participate, use `?rp join`") system_message = await system_channel.send("Session participants") self.announce_message = announce_message self.system_message = system_message self.turn_number = 1 await db.execute(insert, *(str(announce_message.id), str(system_message.id))) else: await ctx.send('There is already an unfinished session. Please end it before starting new one.') await Database.close_connection(db) @_rp.command(name='join') async def _join(self, ctx): """ Joins you to currently open session, if there is one at the moment. """ if self.announce_message is None: await ctx.send('No active session') return player_id = ctx.author.id for player in self.playerids: if player == player_id: to_delete = await ctx.send('Player is already in session') await asyncio.sleep(1) await to_delete.delete() return args = { 'discord_id': ctx.author.id, 'key': config.TRANSACTION_KEY # 'discord_id': '144229491907100672' } response = await Web.get_response(user_data_link, args) await Web.get_response(lock_link, args) player = Player(player_id, response['Inventory']) self.players[player_id] = (player, None) self.playerids.append(player_id) await self.announce_message.edit( content='{}\n{} joined'.format(self.announce_message.content, ctx.author.nick or ctx.author.name)) await self.post_players() async def post_players(self, new=False): if new: self.system_message = await self.bot.get_channel(374691520055345162).send('placeholder') # self.system_message = await self.bot.get_channel(config.ADMINISTRATION_CHANNEL).send('placeholder') message = '```\n' message += "{:^35}|{:^25}\n".format('Player', 'Action') message += "{:*^35}|{:*^25}\n".format('', '') for player_id, (player, action) in self.players.items(): player = self.system_message.guild.get_member(player_id) message += "{:<35}|{:<25}\n".format(player.nick or player.name, action or '<no action set>') message += '```' await self.system_message.edit(content=message) @_rp.command(name='use') async def _use(self, ctx, *, what=None): """ Queues action f
rwightman/pytorch-image-models
tests/test_optim.py
Python
apache-2.0
24,464
0.001635
""" Optimzier Tests These tests were adapted from PyTorch' optimizer tests. """ import math import pytest import functools from copy import deepcopy import torch from torch.testing._internal.common_utils import TestCase from torch.autograd import Variable from timm.scheduler import PlateauLRScheduler from timm.optim import create_optimizer_v2 # HACK relying on internal PyTorch test functionality for comparisons that I don't want to write torch_tc = TestCase() def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors): weight = Variable(weight, requires_grad=True) bias = Variable(bias, requires_grad=True) input = Variable(input) optimizer = constructor(weight, bias) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) # to check if the optimizer can be printed as a string optimizer.__repr__() def fn(): optimizer.zero_grad() y = weight.mv(input) if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): y = y.cuda(bias.get_device()) loss = (y + bias).pow(2).sum() loss.backward() return loss initial_value = fn().item() for _i in range(200): for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): val_loss = fn() scheduler.step(val_loss) else: scheduler.step() optimizer.step(fn) assert fn().item() < initial_value def _test_state_dict(weight, bias, input, constructor): weight = Variable(weight, requires_grad=True) bias = Variable(bias, requires_grad=True) input = Variable(input) def fn_base(optimizer, weight, bias): optimizer.zero_grad() i = input_cuda if weight.is_cuda else input loss = (weight.mv(i) + bias).pow(2).sum() loss.backward() return loss optimizer = constructor(weight, bias) fn = functools.partial(fn_base, optimizer, weight, bias) # Prime the optimizer for _i in range(20): optimizer.step(fn) # Clone the weights and construct new optimizer for them weight_c = Variable(weight.data.clone(), requires_grad=True) bias_c = Variable(bias.data.clone(), requires_grad=True) optimizer_c = constructor(weight_c, bias_c) fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) # Load state dict state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_c.load_state_dict(state_dict_c) # Run both optimizations in parallel for _i in range(20): optimizer.step(fn) optimizer_c.step(fn_c) #ass
ert torch.equal(weight, weight_c) #assert torch.equal(bias, bias_c) torch_tc.assertEqual(weight, weight_c) torch_tc.assertEqual(bias, bias_c) # Make sure state dict wasn't modified torch_tc.assertEqual(state_dict, state_dict_c) # Make sure state dict is deterministic with equal but not identical parameters
torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict()) # Make sure repeated parameters have identical representation in state dict optimizer_c.param_groups.extend(optimizer_c.param_groups) torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1]) # Check that state dict can be loaded even when we cast parameters # to a different type and move to a different device. if not torch.cuda.is_available(): return input_cuda = Variable(input.data.float().cuda()) weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True) bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True) optimizer_cuda = constructor(weight_cuda, bias_cuda) fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda) state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_cuda.load_state_dict(state_dict_c) # Make sure state dict wasn't modified torch_tc.assertEqual(state_dict, state_dict_c) for _i in range(20): optimizer.step(fn) optimizer_cuda.step(fn_cuda) torch_tc.assertEqual(weight, weight_cuda) torch_tc.assertEqual(bias, bias_cuda) # validate deepcopy() copies all public attributes def getPublicAttr(obj): return set(k for k in obj.__dict__ if not k.startswith('_')) assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer)) def _test_basic_cases(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] _test_state_dict( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor ) _test_basic_cases_template( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor, scheduler_constructors ) # non-contiguous parameters _test_basic_cases_template( torch.randn(10, 5, 2)[..., 0], torch.randn(10, 2)[..., 0], torch.randn(5), constructor, scheduler_constructors ) # CUDA if not torch.cuda.is_available(): return _test_basic_cases_template( torch.randn(10, 5).cuda(), torch.randn(10).cuda(), torch.randn(5).cuda(), constructor, scheduler_constructors ) def _test_model(optimizer, params, device=torch.device('cpu')): weight = torch.tensor( [[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]], device=device, requires_grad=True) bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True) weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True) bias2 = torch.tensor([-0.0711], device=device, requires_grad=True) input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2) model = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Sigmoid(), torch.nn.Linear(3, 1), torch.nn.Sigmoid()) model.to(device) pretrained_dict = model.state_dict() pretrained_dict['0.weight'] = weight pretrained_dict['0.bias'] = bias pretrained_dict['2.weight'] = weight2 pretrained_dict['2.bias'] = bias2 model.load_state_dict(pretrained_dict) optimizer = create_optimizer_v2(model, opt=optimizer, **params) prev_loss = float('inf') for i in range(20): optimizer.zero_grad() output = model(input) loss = output.sum() loss.backward() loss = loss.item() assert loss < prev_loss prev_loss = loss optimizer.step() def rosenbrock(tensor): x, y = tensor return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 def drosenbrock(tensor): x, y = tensor return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2))) def _test_rosenbrock(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] params_t = torch.tensor([1.5, 1.5]) params = Variable(params_t, requires_grad=True) optimizer = constructor([params]) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) solution = torch.tensor([1, 1]) initial_dist = params.data.dist(solution) def eval(params, w): # Depending on w, provide only the x or y gradient optimizer.zero_grad() loss = rosenbrock(params) loss.backward() grad = drosenbrock(params.data) # NB: We torture test the optimizer by returning an # uncoalesced sparse tensor if w: i = torch.LongTensor([[0, 0]]) x = grad[0] v = torch.tensor([x / 4., x - x / 4.]) else: i = torch.LongTensor([[1, 1]]) y = grad[1] v = torch.tensor([y - y / 4., y
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/aio/operations/_load_balancers_operations.py
Python
mit
23,464
0.004773
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class LoadBalancersOperations: """LoadBalancersOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2016_09_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, load_balancer_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-09-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore async def begin_delete( self, resource_group_name: str, load_balancer_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified load balancer. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param load_balancer_name: The name of the load balancer. :type load_balancer_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, load_balancer_name=load_balancer_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore async def get( self, resource_group_name: str, load_balancer_name: str, expand: Optional[
str] = None, **kwargs: Any ) -> "_models.LoadBalancer": """Ge
ts the specified load balancer. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param load_balancer_name: The name of the load balancer. :type load_balancer_name: str :param expand: Expands referenced resources. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: LoadBalancer, or the result of cls(response) :rtype: ~azure.mgmt.network.v2016_09_01.models.LoadBalancer :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.
krzyzacy/test-infra
triage/summarize.py
Python
apache-2.0
21,103
0.002606
#!/usr/bin/env python3 # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Summarize groups failed tests together by finding edit distances between their failure strings, and emits JSON for rendering in a browser. """ # pylint: disable=invalid-name,missing-docstring import argparse import functools import hashlib import json import logging import os import re import sys import time import zlib import berghelroach editdist = berghelroach.dist flakeReasonDateRE = re.compile( r'[A-Z][a-z]{2}, \d+ \w+ 2\d{3} [\d.-: ]*([-+]\d+)?|' r'\w{3}\s+\d{1,2} \d+:\d+:\d+(\.\d+)?|(\d{4}-\d\d-\d\d.|.\d{4} )\d\d:\d\d:\d\d(.\d+)?') # Find random noisy strings that should be replaced with renumbered strings, for more similarity. flakeReasonOrdinalRE = re.compile( r'0x[0-9a-fA-F]+' # hex constants r'|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?' # IPs + optional port r'|[0-9a-fA-F]{8}-\S{4}-\S{4}-\S{4}-\S{12}(-\d+)?' # UUIDs + trailing digits r'|[0-9a-f]{12,32}' # hex garbage r'|(?<=minion-group-|default-pool-)[-0-9a-z]{4,}' # node names ) def normalize(s): """ Given a traceback or error message from a text, reduce excess entropy to make clustering easier. This includes: - blanking dates and timestamps - renumbering unique information like - pointer addresses - UUIDs - IP addresses - sorting randomly ordered map[] strings. """ # blank out dates s = flakeReasonDateRE.sub('TIME', s) # do alpha conversion-- rename random garbage strings (hex pointer values, node names, etc) # into 'UNIQ1', 'UNIQ2', etc. matches = {} def repl(m): s = m.group(0) if s not in matches: matches[s] = 'UNIQ%d' % (len(matches) + 1) return matches[s] if 'map[' in s: # Go's maps are in a random order. Try to sort them to reduce diffs. s = re.sub(r'map\[([^][]*)\]', lambda m: 'map[%s]' % ' '.join(sorted(m.group(1).split())), s) s = flakeReasonOrdinalRE.sub(repl, s) if len(s) > 10000: # for long strings, remove repeated lines! s = re.sub(r'(?m)^(.*\n)\1+', r'\1', s) if len(s) > 10000: # ridiculously long test output s = s[:5000] + '\n...[truncated]...\n' + s[-5000:] return s def normalize_name(name): """ Given a test name, remove [...]/{...}. Matches code in testgrid and kubernetes/hack/update_owners.py. """ name = re.sub(r'\[.*?\]|{.*?\}', '', name) name = re.sub(r'\s+', ' ', name) return name.strip() def make_ngram_counts(s, ngram_counts={}): """ Convert a string into a histogram of frequencies for different byte combinations. This can be used as a heuristic to estimate edit distance between two strings in constant time. Instead of counting each ngram individually, they are hashed into buckets. This makes the output count size constant. """ # Yes, I'm intentionally memoizing here. # pylint: disable=dangerous-default-value size = 64 if s not in ngram_counts: counts = [0] * size for x in range(len(s)-3): counts[zlib.crc32(s[x:x+4].encode('utf8')) & (size - 1)] += 1 ngram_counts[s] = counts # memoize return ngram_counts[s] def ngram_editdist(a, b): """ Compute a heuristic lower-bound edit distance using ngram counts. An insert/deletion/substitution can cause up to 4 ngrams to differ: abcdefg => abcefg (abcd, bcde, cdef, defg) => (abce, bcef, cefg) This will underestimate the edit distance in many cases: - ngrams hashing into the same bucket will get confused - a large-scale transposition will barely disturb ngram frequencies, but will have a very large effect on edit distance. It is useful to avoid more expensive precise computations when they are guaranteed to exceed some limit (being a lower bound), or as a proxy when the exact edit distance computation is too expensive (for long inputs). """ counts_a = make_ngram_counts(a) counts_b = make_ngram_counts(b) return sum(abs(x-y) for x, y in zip(counts_a, counts_b))//4 def make_ngram_counts_digest(s): """ Returns a hashed version of the ngram counts. """ return hashlib.sha1(str(make_ngram_counts(s)).encode()).hexdigest()[:20] def file_memoize(description, name): """ Decorator to save a function's results to a file. """ def inner(func): @functools.wraps(func) def wrapper(*args, **kwargs): if os.path.exists(name): with open(name) as f: data = json.load(f) logging.info('done (cached) %s', description) return data data = func(*args, **kwargs) with open(name, 'w') as f: json.dump(data, f) logging.info('done %s', description) return data wrapper.__wrapped__ = func return wrapper return inner @file_memoize('loading failed tests', 'memo_load_failures.json') def load_failures(builds_file, tests_files): """ Load builds and failed tests files. Group builds by path, group test failures by test name. Args: filenames Returns: { build_path: [{ path: build_path, started: 12345, ...} ...], ...}, { test_name: [{build: gs://foo/bar, name: test_name, failure_text: xxx}, ...], ...} """ builds = {} with open(builds_file) as f: for build in json.load(f): if not build['started'] or not build['number']: continue for attr in ('started', 'tests_failed', 'number', 'tests_run'): build[attr] = int(build[attr]) build['elapsed'] = int(float(build['elapsed'])) if 'pr-logs' in build['path']: buil
d['pr'] = build['path'].split('/')[-3]
builds[build['path']] = build failed_tests = {} for tests_file in tests_files: with open(tests_file) as f: for line in f: test = json.loads(line) failed_tests.setdefault(test['name'], []).append(test) for tests in failed_tests.values(): tests.sort(key=lambda t: t['build']) return builds, failed_tests def find_match(fnorm, clusters): for ngram_dist, other in sorted((ngram_editdist(fnorm, x), x) for x in clusters): # allow up to 10% differences limit = int((len(fnorm)+len(other))/2.0 * 0.10) if ngram_dist > limit: continue if limit <= 1 and other != fnorm: # no chance continue dist = editdist(fnorm, other, limit) if dist < limit: return other return None def cluster_test(tests): """ Compute failure clusters given a list of failures for one test. Normalize the failure text prior to clustering to avoid needless entropy. Args: [{name: test_name, build: gs://foo/bar, failure_text: xxx}, ...] Returns: {cluster_text_1: [test1, test2, ...]} """ clusters = {} start = time.time() for test in tests: ftext = test['failure_text'] fnorm = normalize(ftext) if fnorm in clusters: clusters[fnorm].append(test) else: other = find_match(fnorm, clusters) if other: clusters[other].append(test) else: clusters[fnorm] = [test] if time.time() > start + 60: logging.info('bailing early, taking too long!') break return clusters
Tiendil/deworld
deworld/layers/vegetation_layer.py
Python
bsd-2-clause
5,066
0.003948
# coding: utf-8 import random from deworld.layers.base_layer import BaseLayer class VEGETATION_TYPE: DESERT = 0 GRASS = 1 FOREST = 2 class VegetationLayer(BaseLayer): MIN = 0.0 MAX = 1.0 HEIGHT_FOREST_BARIER_START = None HEIGHT_FOREST_BARIER_END = None HEIGHT_GRASS_BARIER_START = None HEIGHT_GRASS_BARIER_END = None TEMPERATURE_FOREST_BARIER_START = None TEMPERATURE_FOREST_BARIER_END = None TEMPERATURE_GRASS_BARIER_START = None TEMPERATURE_GRASS_BARIER_END = None WETNESS_FOREST_BARIER_START = None WETNESS_FOREST_BARIER_END = None WETNESS_GRASS_BARIER_START = None WETNESS_GRASS_BARIER_END = None FOREST_BORDER = None GRASS_BORDER = None SPAWN_PROBABILITY = None CURRENT_GRASS_POWER_BONUS = None CURRENT_FOREST_POWER_BONUS = None def __init__(self, **kwargs): super(VegetationLayer, self).__init__(default=VEGETATION_TYPE.DESERT, default_power=(0.0, 0.0), **kwargs) self._merge_config(self.config.LAYERS.VEGETATION) def serialize(self): return super(VegetationLayer, self).serialize() @classmethod def deserialize(cls, world, data): return cls(world=world, data=data['data'], power=data.get('power')) def add_power(self, x, y, power): old_power = self.power[y][x] self.power[y][x] = (old_power[0] + power[0], old_power[1] + power[1]) def _border_right_power(self, power, value, border_start, border_end): if value > border_start: if value > border_end: power = 0 else: power *= 1 - float(value - border_start) / (border_end - border_start) return power def _border_left_power(self, power, value
, border_start, border_end): if value < border_start: if value < border_end: power = 0 else: power *= 1 - float(border_start - value) / (border_start
- border_end) return power def can_spawn(self, x, y, type_): for y in range(y-1, y+1+1): for x in range(x-1, x+1+1): if not (0 <= y < self.h and 0 <= x < self.w): continue if self.data[y][x] in type_: return True return random.uniform(0, 1) < self.SPAWN_PROBABILITY def power_from_current_situation(self, x, y): grass, forest = 0.0, 0.0 for y in range(y-1, y+1+1): for x in range(x-1, x+1+1): if not (0 <= y < self.h and 0 <= x < self.w): continue if self.data[y][x] == VEGETATION_TYPE.GRASS: grass += self.CURRENT_GRASS_POWER_BONUS elif self.data[y][x] == VEGETATION_TYPE.FOREST: forest += self.CURRENT_FOREST_POWER_BONUS return random.uniform(0, grass), random.uniform(0, forest) def sync(self): for y in range(0, self.h): for x in range(0, self.w): power_points = self.power[y][x] power_grass, power_forest = power_points if self.data[y][x] == VEGETATION_TYPE.DESERT: power_grass = max(power_grass, power_forest) power_forest = max(power_grass, power_forest) height = self.world.layer_height.data[y][x] power_forest = self._border_right_power(power_forest, height, self.HEIGHT_FOREST_BARIER_START, self.HEIGHT_FOREST_BARIER_END) power_grass = self._border_right_power(power_grass, height, self.HEIGHT_GRASS_BARIER_START, self.HEIGHT_GRASS_BARIER_END) temperature = self.world.layer_temperature.data[y][x] power_forest = self._border_right_power(power_forest, temperature, self.TEMPERATURE_FOREST_BARIER_START, self.TEMPERATURE_FOREST_BARIER_END) power_grass = self._border_right_power(power_grass, temperature, self.TEMPERATURE_GRASS_BARIER_START, self.TEMPERATURE_GRASS_BARIER_END) wetness = self.world.layer_wetness.data[y][x] power_forest = self._border_left_power(power_forest, wetness, self.WETNESS_FOREST_BARIER_START, self.WETNESS_FOREST_BARIER_END) power_grass = self._border_left_power(power_grass, wetness, self.WETNESS_GRASS_BARIER_START, self.WETNESS_GRASS_BARIER_END) bonus_grass, bonus_forest = self.power_from_current_situation(x, y) power_grass += bonus_grass power_forest += bonus_forest if power_forest > power_grass and power_forest > self.FOREST_BORDER and self.can_spawn(x, y, [VEGETATION_TYPE.FOREST]): self.next_data[y][x] = VEGETATION_TYPE.FOREST elif power_grass > self.GRASS_BORDER and self.can_spawn(x, y, [VEGETATION_TYPE.GRASS, VEGETATION_TYPE.FOREST]): self.next_data[y][x] = VEGETATION_TYPE.GRASS else: self.next_data[y][x] = VEGETATION_TYPE.DESERT self.power[y][x] = (power_grass, power_forest)
huhongbo/dd-agent
resources/processes.py
Python
bsd-3-clause
3,086
0.00162
# stdlib from collections import namedtuple # project from resources import ( agg, ResourcePlugin, SnapshotDescriptor, SnapshotField, ) from utils.subprocess_output import get_subprocess_output class Processes(ResourcePlugin): RESOURCE_KEY = "processes" FLUSH_INTERVAL = 1 # in minutes def describe_snapshot(self): return SnapshotDescriptor( 1, SnapshotField("user", 'str', aggregator=agg.append, temporal_aggregator=agg.append), SnapshotField("pct_cpu", 'float'), SnapshotField("pct_mem", 'float'), SnapshotField("vsz", 'int'), SnapshotField("rss", 'int'), SnapshotField("family", 'str', aggregator=None, temporal_aggregator=None, group_on=True, temporal_group_on=True), SnapshotField("ps_count", 'int')) def _get_proc_list(self): # Get output from ps try: process_exclude_args = self.config.get('exclude_process_args', False) if process_exclude_args: ps_arg = 'aux' else: ps_arg = 'auxww' output, _, _ = get_subprocess_output(['ps', ps_arg], self.log) processLines = output.splitlines() # Also removes a trailing empty line except Exception: self.log.exception('Cannot get process list') raise del processLines[0] # Removes the headers processes = [] for line in processLines: line = line.split(None, 10) processes.append(map(lambda s: s.strip(), line)) return processes @staticmethod def group_by_family(o): return o[5] @staticmethod def filter_by_usage(o): # keep everything over 1% (cpu or ram) return o[0] > 1 or o[1] > 1 def _parse_proc_list(self, processes): def _compute_family(command): if command.startswith('['): return 'kernel' else: return (command.split()[0]).split('/')[-1] PSLine = namedtuple("PSLine", "user,pid,pct_cpu,pct_mem,vsz,rss,tty,stat,started,time,command") self.start_snapshot() for line in processes: t
ry: psl = PSLine(*line) self.add_to_snapshot([psl.user, float(psl.pct_cpu), float(psl.pct_mem), int(psl.vsz), int(psl.rss),
_compute_family(psl.command), 1]) except Exception: pass self.end_snapshot(group_by=self.group_by_family) def flush_snapshots(self, snapshot_group): self._flush_snapshots(snapshot_group=snapshot_group, group_by=self.group_by_family, filter_by=self.filter_by_usage) def check(self): self._parse_proc_list(self._get_proc_list())
blossomica/airmozilla
airmozilla/closedcaptions/views.py
Python
bsd-3-clause
2,235
0
import pycaption from django import http from django.shortcuts import get_object_or_404 from airmozilla.closedcaptions.models import ClosedCaptions class TxtWriter(pycaption.base.BaseWriter): def write(self, caption_set): lang = caption_set.get_languages()[0] captions = caption_set.get_captions(lang) output = 'Language: {}\n\n'.format(lang
) for caption in captions: line = caption.get_text().replace('\n', ' ') if line.startswith('- '): output += '\n\n' output += line
+ ' ' return output SUPPORTED_WRITERS = { 'dfxp': pycaption.DFXPWriter, 'ttml': pycaption.DFXPWriter, 'sami': pycaption.SAMIWriter, 'srt': pycaption.SRTWriter, 'scc': pycaption.SCCWriter, 'webvtt': pycaption.WebVTTWriter, 'txt': TxtWriter, } FILE_EXTENSIONS = { 'dfxp': 'dfxp.xml', 'dfxp': 'dfxp', 'ttml': 'dfxp', 'sami': 'sami', 'srt': 'srt', 'scc': 'scc', 'webvtt': 'vtt', 'txt': 'txt', } CONTENT_TYPES = { 'txt': 'text/plain', 'sami': ' text/xml', 'dfxp': 'application/ttml+xml; charset=utf-8', 'vtt': 'text/vtt', } def download(request, filename_hash, id, slug, extension): closedcaptions = get_object_or_404( ClosedCaptions, id=id, event__slug__iexact=slug, ) if extension not in FILE_EXTENSIONS.values(): raise http.Http404('Unrecognized extension') if closedcaptions.filename_hash != filename_hash: raise http.Http404('Unrecognized hash') for key, ext in FILE_EXTENSIONS.items(): if ext == extension: output_writer = SUPPORTED_WRITERS[key] content = closedcaptions.file.read() if not ( closedcaptions.file.name.lower().endswith('.ttml') or closedcaptions.file.name.lower().endswith('.dfxp') ): content = content.decode('utf-8') reader = pycaption.detect_format(content) assert reader converter = pycaption.CaptionConverter() converter.read(content, reader()) response = http.HttpResponse() response['Content-Type'] = CONTENT_TYPES.get(extension, 'text/plain') response.write(converter.write(output_writer())) return response
wmayner/pyphi
pyphi/exceptions.py
Python
gpl-3.0
653
0
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # exceptions.py """PyPhi exceptions.""" class StateUnreachableError(ValueError): """The current state cannot be reached from any previous state.""" def __init__(self, state): self.state = state msg = "The state {} cannot be reached in the given TPM." super().__init__(msg.format(state)) class ConditionallyDependentError(ValueError)
: """The TPM is conditionally dependent.""" class JSONVersionError(ValueError): """JSON was serialized with a different version of PyPhi.""" class WrongDirectionError(ValueError):
"""The wrong direction was provided."""
jerrylei98/Dailydos
wsgi.py
Python
mit
77
0.025974
fr
om app import application if __name__=="__main__": application.run()
mattr555/AtYourService-school
main/__init__.py
Python
mit
1,906
0.005771
from django.contrib.contenttypes.mo
dels import ContentType from django.contrib.auth.models import Permission, Group from django.contrib.sites.models import Site from django.db.models.signals import post_save, post_migrate from django.core.exceptions import ObjectDoesNotExist from main.models impor
t SiteSettings, UserProfile, User # custom user related permissions def add_user_permissions(sender, **kwargs): pass def add_groups(sender, **kwargs): ct = ContentType.objects.get(app_label='auth', model='user') perm, created = Permission.objects.get_or_create(codename='can_view', name='Can View Users', content_type=ct) group, created = Group.objects.get_or_create(name='Volunteer') if created: p = Permission.objects.get(codename='add_userevent') group.permissions.add(p) group, created = Group.objects.get_or_create(name='Org_Admin') if created: p = Permission.objects.get(codename='add_organization') group.permissions.add(p) group, created = Group.objects.get_or_create(name='NHS_Admin') if created: p = Permission.objects.get(codename='can_view') group.permissions.add(p) if not SiteSettings.objects.exists(): settings = SiteSettings(site=Site.objects.get(pk=1), candidate_leadership_hours=50, candidate_service_hours=100, member_service_hours=6).save() def create_userprof(sender, instance, created, **kwargs): """for when the user is created on the first syncdb""" if created and instance.is_superuser: try: up = instance.user_profile except ObjectDoesNotExist: UserProfile(user=instance, email_valid=True, grad_class=2000, membership_status='MEM').save() #post_migrate.connect(add_user_permissions, sender=auth_models) # post_migrate.connect(add_groups) post_save.connect(create_userprof, sender=User, dispatch_uid="create_userprof")
hackerspace-ntnu/website
userprofile/migrations/0030_skill.py
Python
mit
955
0.004188
# Generated by Django 3.0.2 on 2020-03-04 20:08 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('files', '0004_image_compressed'), ('userprofile', '0029_auto_20200304_2007'), ] operations = [ migrations.CreateModel( name='Skill', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize
=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.TextField(
)), ('category', models.ManyToManyField(to='userprofile.Category')), ('prerequisites', models.ManyToManyField(blank=True, to='userprofile.Skill')), ('thumb', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='files.Image')), ], ), ]
chrisidefix/visvis
functions/volread.py
Python
bsd-3-clause
1,322
0.008321
# -*- coding: utf-8 -*- # Copyright (C) 2012, Almar Klein # # Visvis is distributed under the terms of the (new) BSD License. # The full license can be found in 'license.txt'. import visvis as vv import numpy as np import os # Try importing imagei
o imageio = None try: import imageio except ImportError: pass def volread(filename): """ volread(filename) Read volume from a file. If filename is 'stent', read a dedicated test dataset. For reading any other kind of volume, the imageio package is required. """ if filename == 'stent': #
Get full filename path = vv.misc.getResourceDir() filename2 = os.path.join(path, 'stent_vol.ssdf') if os.path.isfile(filename2): filename = filename2 else: raise IOError("File '%s' does not exist." % filename) # Load s = vv.ssdf.load(filename) return s.vol.astype('int16') * s.colorscale elif imageio is not None: return imageio.volread(filename) else: raise RuntimeError("visvis.volread needs the imageio package to read arbitrary files.") if __name__ == '__main__': vol = vv.volread('stent') t = vv.volshow(vol) t.renderStyle = 'mip' # maximum intensity projection (is the default)
paulovn/aiml-chatbot-kernel
aimlbotkernel/__init__.py
Python
bsd-3-clause
97
0.010309
__version__ = '1.0.4' KERNEL_NAME = 'aimlbot' LANGUAGE = 'chatbot' DISPLAY_
NAME= 'AIML Chatbot'
coolman565/blu_two
commander/commander_impl.py
Python
mit
1,705
0.001173
import logging import os import re import subprocess import time from ctypes.wintypes import MAX_PATH from commander import Commander, Response logger = logging.get
Logger(__name__) class Com
manderImpl(Commander): def __init__(self): super().__init__() def call(self, cmd: str) -> Response: logger.info('Executing command, command: %s', cmd) start: int = time.time() completed = subprocess.run( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) end: int = time.time() duration: int = end - start response: Response = Response( completed.returncode, completed.stdout.decode('utf-8').split(os.linesep), completed.stderr.decode('utf-8').split(os.linesep) ) self.log_output(cmd=cmd, response=response) logger.info('Finished, execution_time: %d, return_code: %d', duration, response.return_code) return response def log_output(self, cmd: str, response: Response) -> None: dir: str = os.path.join('F:\\workspace\\blu_two\\log', self.slugify(cmd)) if len(dir) > MAX_PATH: dir = dir[0:MAX_PATH] os.makedirs(dir, exist_ok=True) with open('{}/return_code'.format(dir), 'w') as file: file.write(str(response.return_code)) with open('{}/std_out'.format(dir), 'w') as file: for line in response.std_out: file.write("{line}\n".format(line=line)) with open('{}/std_err'.format(dir), 'w') as file: for line in response.std_err: file.write("{line}\n".format(line=line))
ajoshiusc/brainsuite-workflows
utility_scripts/main_check_remaining.py
Python
mit
1,120
0.000893
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Thu Apr 27 12:16:26 2017 @author: Anand A Joshi, Divya Varadarajan """ import glob from os.path import isfile, split import configparser config_file = u'/big_disk/ajoshi/ABIDE2/study.cfg' Config = configparser.ConfigParser() Config.read(config_file) Config.sections() STUDY_DIR = Config.get('CSESVREG', 'STUDY_DIR') NPROC = int(Config.get('CSESVREG', 'NPROC')) BST_INSTALL = Config.get('CSESVREG', 'BST_INSTALL') SVREG_ATLAS = Config.get('CSESVREG', 'SVREG_ATLAS') SVREG_FLAGS = Config.get('CSESVREG', 'SVREG_FLAGS') CSE_EXE = Config.get('CSESVREG', 'CSE_EXE') SVREG_EXE = Config.get('CSESVREG', 'SVREG_EXE') sublist = lst = glob.glob(STUDY_DIR+'/*') SMOOTHNESS = '6' ind = 0 cmdln1 = [] cmdln2 = [] incom = 0 com = 0 for sub in sublist: img = sub + '/anat/t1.roiwise.stats.txt' subpath, filename = split(img) outsurfname = subpath + '/t1.heat_sol_comp.mat' # print img if not isfile(outsurfname): incom +=
1 print outsurfname continue com += 1 pri
nt str(incom) + ' remaining ' + str(com) + ' done'
checkr/fdep
fdep/backends/http.py
Python
mit
750
0
import requests from fdep.backends import StorageBackend class HTTPBacken
d(StorageBackend): """Implement HTTP/HTTPS.""" SCHEME_NAME = 'http' def get_to(self, local_path): r = requests.get(self.url, stream=True) total_length = int(r.headers.get('content-length', 0)) self.progressbar.start_progress(total_length) with open(local_path, 'wb') as f: for chunk in r.iter_content(10240): f.write(chunk
) self.progressbar.progress_callback(len(chunk)) self.progressbar.end_progress() def put_from(self, local_path): raise NotImplementedError("HTTP backend does not support uploading") class HTTPSBackend(HTTPBackend): SCHEME_NAME = 'https'
Reality9/spiderfoot
modules/sfp_intfiles.py
Python
gpl-2.0
6,481
0.003395
# -*- coding: utf-8 -*- # ---------------------------------
---------------------------------------------- # Name: sfp_intfiles # Purpose:
From Spidering and from searching search engines, identifies # files of potential interest. # # Author: Steve Micallef <steve@binarypool.com> # # Created: 06/04/2014 # Copyright: (c) Steve Micallef 2014 # Licence: GPL # ------------------------------------------------------------------------------- import re import urllib from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent class sfp_intfiles(SpiderFootPlugin): """Interesting Files:Footprint:Identifies potential files of interest, e.g. office documents, zip files.""" # Default options opts = { 'pages': 20, # Number of search results pages to iterate 'fileexts': ["doc", "docx", "ppt", "pptx", "pdf", 'xls', 'xlsx', 'zip'], 'usesearch': True, 'searchengine': "yahoo" } # Option descriptions optdescs = { 'pages': "Number of search engine results pages to iterate through if using one.", 'fileexts': "File extensions of files you consider interesting.", 'usesearch': "Use search engines to quickly find files. If false, only spidering will be used.", 'searchengine': "If using a search engine, which one? google, yahoo or bing." } results = list() def setup(self, sfc, userOpts=dict()): self.sf = sfc self.results = list() for opt in userOpts.keys(): self.opts[opt] = userOpts[opt] # What events is this module interested in for input def watchedEvents(self): return ["INTERNET_NAME", "LINKED_URL_INTERNAL"] # What events this module produces # This is to support the end user in selecting modules based on events # produced. def producedEvents(self): return ["SEARCH_ENGINE_WEB_CONTENT", "INTERESTING_FILE"] def yahooCleaner(self, string): return " url=\"" + urllib.unquote(string.group(1)) + "\" " # Handle events sent to this module def handleEvent(self, event): eventName = event.eventType srcModuleName = event.module eventData = event.data self.sf.debug("Received event, " + eventName + ", from " + srcModuleName) if eventName == "INTERNET_NAME" and not self.opts['usesearch']: self.sf.debug("Not using a search engine to find interesting files.") return None if eventData in self.results: return None else: self.results.append(eventData) if eventName == "LINKED_URL_INTERNAL": for fileExt in self.opts['fileexts']: if "." + fileExt.lower() in eventData.lower(): if eventData in self.results: continue else: self.results.append(eventData) evt = SpiderFootEvent("INTERESTING_FILE", eventData, self.__name__, event) self.notifyListeners(evt) return None # Handling INTERNET_NAME event.. for fileExt in self.opts['fileexts']: # Sites hosted on the domain if self.opts['searchengine'].lower() == "google": pages = self.sf.googleIterate("site:" + eventData + "+" + "%2Bext:" + fileExt, dict(limit=self.opts['pages'], useragent=self.opts['_useragent'], timeout=self.opts['_fetchtimeout'])) if self.opts['searchengine'].lower() == "bing": pages = self.sf.bingIterate("site:" + eventData + "+" + "%2Bext:" + fileExt, dict(limit=self.opts['pages'], useragent=self.opts['_useragent'], timeout=self.opts['_fetchtimeout'])) if self.opts['searchengine'].lower() == "yahoo": pages = self.sf.yahooIterate("site:" + eventData + "+" + "%2Bext:" + fileExt, dict(limit=self.opts['pages'], useragent=self.opts['_useragent'], timeout=self.opts['_fetchtimeout'])) if pages is None: self.sf.info("No results returned from " + self.opts['searchengine'] + " for " + fileExt + " files.") continue for page in pages.keys(): if page in self.results: continue else: self.results.append(page) # Check if we've been asked to stop if self.checkForStop(): return None # Submit the gresults for analysis evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", pages[page], self.__name__, event) self.notifyListeners(evt) if self.opts['searchengine'].lower() == "yahoo": res = re.sub("RU=(.[^\/]+)\/RK=", self.yahooCleaner, pages[page], 0) else: res = pages[page] links = self.sf.parseLinks(page, res, eventData) if len(links) == 0: continue for link in links: if link in self.results: continue else: self.results.append(link) if self.sf.urlFQDN(link).endswith(eventData) and \ "." + fileExt.lower() in link.lower(): self.sf.info("Found an interesting file: " + link) evt = SpiderFootEvent("INTERESTING_FILE", link, self.__name__, event) self.notifyListeners(evt) # End of sfp_intfiles class
estebistec/django-get-forms
examples/demo/demo/forms.py
Python
bsd-3-clause
127
0
# -*- coding: utf-8 -*- from
django import forms class SearchForm
(forms.Form): query = forms.CharField(required=False)
xtuyaowu/jtyd_python_spider
page_parse/user/public.py
Python
mit
6,454
0.000775
# -*-coding:utf-8 -*- import re import json from bs4 import BeautifulSoup from page_parse import status from decorators.decorator import parse_decorator from db.models import UserRelation from utils.filters import url_filter from db.user_relation import save_relations def get_userid(html): return status.get_userid(html) def get_username(html): return status.get_username(html) def get_userdomain(html): return status.get_userdomain(html) @parse_decorator(1) def _get_header(html): soup = BeautifulSoup(html, "html.parser") scripts = soup.find_all('script') pattern = re.compile(r'FM.view\((.*)\)') cont = '' for script in scripts: m = pattern.search(script.string) if m and 'pl.header.head.index' in script.string: all_info = m.group(1) cont = json.loads(all_info)['html'] return cont def get_verifytype(html): """ :param html: page source :return: 0 stands for unauthorized,1 stands for persional authentication,2 stands for enterprise authentication """ if 'icon_pf_approve_co' in html: return 2 elif 'icon_pf_approve' in html: return 1 else: return 0 @parse_decorator(1) def get_verifyreason(html, verify_type): """ details for authentication :param html: page source :param verify_type: authentication type :return: authentication info """ if verify_type == 1 or verify_type == 2: soup = BeautifulSoup(_get_header(html), 'html.parser') return soup.find(attrs={'class': 'pf_intro'})['title'] else: return '' @parse_decorator(1) def get_headimg(html): """ Get the head img url of current user :param html: page source :return: head img url """ soup = BeautifulSoup(_get_header(html), 'html.parser') try: headimg = url_filter(soup.find(attrs={'class': 'photo_wrap'}).find(attrs={'class': 'photo'})['src']) except AttributeError: headimg = '' return headimg @parse_decorator(1) def get_left(html): """ The left part of the page, which is public """ soup = BeautifulSoup(html, "html.parser") scripts = soup.find_all('script') pattern = re.compile(r'FM.view\((.*)\)') cont = '' l_id = '' # first ensure the left part for script in scripts: m = pattern.search(script.string) if m and 'WB_frame_b' in script.string: all_info = m.group(1) cont = json.loads(all_info)['html'] lsoup = BeautifulSoup(cont, 'html.parser') l_id = lsoup.find(attrs={'class': 'WB_frame_b'}).div['id'] for script in scripts: m = pattern.search(script.string) if m and l_id in script.string: all_info = m.group(1) try: cont = json.loads(all_info)['html'] except KeyError: return '' return cont @parse_decorator(1) def get_right(html): """ Parse the right part of user detail :param html: page source :return: the right part of user info page """ soup = BeautifulSoup(html, "html.parser") scripts = soup.find_all('script') pattern = re.compile(r'FM.view\((.*)\)') cont = '' # first ensure right part,enterprise users may have two r_id rids = [] for script in scripts: m = pattern.search(script.string) if m and 'WB_frame_c' in script.string: all_info = m.group(1) cont = json.loads(all_info).get('html', '') if not cont: return '' rsoup = BeautifulSoup(cont, 'html.parser') r_ids = rsoup.find(attrs={'class': 'WB_frame_c'}).find_all('div') for r in r_ids: rids.append(r['id']) for script in scripts: for r_id in rids: m = pattern.search(script.string) if m and r_id in script.string: all_info = m.group(1) cont += json.loads(all_info).get('html', '') return cont @parse_decorator(0) def get_level(html): """ Get the level of users """ pattern = '<span>Lv.(.*?)<\\\/span>' rs = re.search(pattern, html) if rs: return rs.group(1) else: return 0 @parse_decorator(2) def get_fans
_or_fol
lows(html, uid, type): """ Get fans or follows and store their relationships :param html: current page source :param uid: current user id :param type: type of relations :return: list of fans or followers """ if html == '': return list() pattern = re.compile(r'FM.view\((.*)\)') soup = BeautifulSoup(html, "html.parser") scripts = soup.find_all('script') user_ids = list() relations = list() for script in scripts: m = re.search(pattern, script.string) if m and 'pl.content.followTab.index' in script.string: all_info = m.group(1) cont = json.loads(all_info).get('html', '') soup = BeautifulSoup(cont, 'html.parser') follows = soup.find(attrs={'class': 'follow_box'}).find_all(attrs={'class': 'follow_item'}) pattern = 'uid=(.*?)&' for follow in follows: m = re.search(pattern, str(follow)) if m: r = m.group(1) # filter invalid ids if r.isdigit(): user_ids.append(r) relations.append(UserRelation(uid, r, type)) save_relations(relations) return user_ids def get_max_crawl_pages(html): """ Get the max page we can crawl :param html: current page source :return: max page number we can crawl """ if html == '': return 1 pattern = re.compile(r'FM.view\((.*)\)') soup = BeautifulSoup(html, "html.parser") scripts = soup.find_all('script') length = 1 for script in scripts: m = re.search(pattern, script.string) if m and 'pl.content.followTab.index' in script.string: all_info = m.group(1) cont = json.loads(all_info).get('html', '') soup = BeautifulSoup(cont, 'html.parser') pattern = 'uid=(.*?)&' if 'pageList' in cont: urls2 = soup.find(attrs={'node-type': 'pageList'}).find_all(attrs={ 'class': 'page S_txt1', 'bpfilter': 'page'}) length += len(urls2) return length
thelastpickle/python-driver
cassandra/io/asyncorereactor.py
Python
apache-2.0
13,797
0.001667
# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit from collections import deque from functools import partial import logging import os import socket import sys from threading import Lock, Thread, Event import time import weakref import sys from six.moves import range try: from weakref import WeakSet except ImportError: from cassandra.util import WeakSet # noqa import asyncore try: import ssl except ImportError: ssl = None # NOQA from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager log = logging.getLogger(__name__) _dispatcher_map = {} def _cleanup(loop_weakref): try: loop = loop_weakref() except ReferenceError: return loop._cleanup() class WaitableTimer(Timer): def __init__(self, timeout, callback): Timer.__init__(self, timeout, callback) self.callback = callback self.event = Event() self.final_exception = None def finish(self, time_now): try: finished = Timer.finish(self, time_now) if finished: self.event.set() return True return False except Exception as e: self.final_exception = e self.event.set() return True def wait(self, timeout=None): self.event.wait(timeout) if self.final_exception: raise self.final_exception class _PipeWrapper(object): def __init__(self, fd): self.fd = fd def fileno(self): return self.fd def close(self): os.close(self.fd) def getsockopt(self, level, optname, buflen=None): # act like an unerrored socket for the asyncore error handling if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen: return 0 raise NotImplementedError() class _AsyncoreDispatcher(asyncore.dispatcher): def __init__(self, socket): asyncore.dispatcher.__init__(self, map=_dispatcher_map) # inject after to avoid base class validation self.set_socket(socket) self._notified = False def writable(self): return False def validate(self): assert not self._notified self.notify_loop() assert self._notified self.loop(0.1) assert not self._notified def loop(self, timeout): asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1) class _AsyncorePipeDispatcher(_AsyncoreDispatcher): def __init__(self): self.read_fd, self.write_fd = os.pipe() _AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd)) def writable(self): return False def handle_read(self): while len(os.read(self.read_fd, 4096)) == 4096: pass self._notified = False def notify_loop(self): if not self._notified: self._notified = True os.write(self.write_fd, b'x') class _AsyncoreUDPDispatcher(_AsyncoreDispatcher): """ Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because it relies on local port binding. Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per instance, or this could be specialized to scan until an address is found. To use:: from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher """ bind_address = ('localhost', 10000) def __init__(self): self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._socket.bind(self.bind_
address) self._socket.setblocking(0) _AsyncoreDispatcher.__init__(self, self._socket) def handle_read(self): try: d = self._socket.recvfrom(1) while d and d[1]: d = s
elf._socket.recvfrom(1) except socket.error as e: pass self._notified = False def notify_loop(self): if not self._notified: self._notified = True self._socket.sendto(b'', self.bind_address) def loop(self, timeout): asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1) class _BusyWaitDispatcher(object): max_write_latency = 0.001 """ Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check if anything is writable. """ def notify_loop(self): pass def loop(self, timeout): if not _dispatcher_map: time.sleep(0.005) count = timeout // self.max_write_latency asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count) def validate(self): pass def close(self): pass class AsyncoreLoop(object): timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts _loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher def __init__(self): self._pid = os.getpid() self._loop_lock = Lock() self._started = False self._shutdown = False self._thread = None self._timers = TimerManager() try: dispatcher = self._loop_dispatch_class() dispatcher.validate() log.debug("Validated loop dispatch with %s", self._loop_dispatch_class) except Exception: log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class) dispatcher.close() dispatcher = _BusyWaitDispatcher() self._loop_dispatcher = dispatcher atexit.register(partial(_cleanup, weakref.ref(self))) def maybe_start(self): should_start = False did_acquire = False try: did_acquire = self._loop_lock.acquire(False) if did_acquire and not self._started: self._started = True should_start = True finally: if did_acquire: self._loop_lock.release() if should_start: self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop") self._thread.daemon = True self._thread.start() def wake_loop(self): self._loop_dispatcher.notify_loop() def _run_loop(self): log.debug("Starting asyncore event loop") with self._loop_lock: while not self._shutdown: try: self._loop_dispatcher.loop(self.timer_resolution) self._timers.service_timeouts() except Exception: log.debug("Asyncore event loop stopped unexepectedly", exc_info=True) break self._started = False log.debug("Asyncore event loop ended") def add_timer(self, timer): self._timers.add_timer(timer) # This function is called from a different thread than the event loop # thread, so for this call to be thread safe, we must wake up the loop # in case it's stuck at a select self.wake_loop() def _cleanup(self): global _dispatcher_map self._shutdown = True if not self._thread: return log.debug("Waiting for event loop thread to join...") self._thread.join(timeout=1.0) if self._thread.is_alive():
XeCycle/indico
indico/web/flask/blueprints/event/display/misc.py
Python
gpl-3.0
2,063
0.005332
# This file is part of Indico. # Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it
will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public Licens
e for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from MaKaC.webinterface.rh import conferenceDisplay from indico.web.flask.blueprints.event.display import event # My conference event.add_url_rule('/my-conference/', 'myconference', conferenceDisplay.RHMyStuff) event.add_url_rule('/my-conference/contributions', 'myconference-myContributions', conferenceDisplay.RHConfMyStuffMyContributions) event.add_url_rule('/my-conference/sessions', 'myconference-mySessions', conferenceDisplay.RHConfMyStuffMySessions) event.add_url_rule('/my-conference/tracks', 'myconference-myTracks', conferenceDisplay.RHConfMyStuffMyTracks) # Other views event.add_url_rule('/other-view', 'conferenceOtherViews', conferenceDisplay.RHConferenceOtherViews) # EMail form event.add_url_rule('/email', 'EMail', conferenceDisplay.RHConferenceEmail, methods=('GET', 'POST')) event.add_url_rule('/email/send', 'EMail-send', conferenceDisplay.RHConferenceSendEmail, methods=('POST',)) # Participation invitation event.add_url_rule('/invitation/participant/<participantId>', 'confModifParticipants-invitation', conferenceDisplay.RHConfParticipantsInvitation, methods=('GET', 'POST')) event.add_url_rule('/invitation/participant/<participantId>/refuse', 'confModifParticipants-refusal', conferenceDisplay.RHConfParticipantsRefusal, methods=('GET', 'POST'))
ptomasroos/vitess
py/vtdb/keyrange_constants.py
Python
bsd-3-clause
450
0
# Copyright 2013, Google
Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. # This is the shard name for when the keyrange covers the entire space # for unsharded database. SHARD_ZERO = '0' # Keyrange that spans the entire space, used # for unsharded database.
NON_PARTIAL_KEYRANGE = '' MIN_KEY = '' MAX_KEY = '' KIT_UNSET = '' KIT_UINT64 = 'uint64' KIT_BYTES = 'bytes'
mlperf/training_results_v0.6
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/python/topi/nn/elemwise.py
Python
apache-2.0
1,936
0.002066
"""Elementwise operators""" from __future__ import absolute_import as _abs import tvm from .. import tag from ..util import get_const_int @tvm.tag_scope(tag=tag.ELEMWISE) def relu(x): """Take relu of input x. Parameters ---------- x : tvm.Tensor Input argument. Returns ------- y : tvm.Tensor The result. """ return tvm.compute(x.shape, lambda *i: tvm.max(x(*i), tvm.const(0, x.dtype))) @tvm.tag_scope(tag=tag.ELEMWISE) def leaky_r
elu(x, alpha): """Take leaky relu of input x. Parameters ---------- x : tvm.Tensor Input argument. alpha : float The slope for the small gradient when x < 0 Returns ------- y : tvm.Tensor The result. """ def _compute(*indices): value = x(*indices) calpha = tvm.const(alpha, value.dtype) return tvm.select(value > 0, value, value * calpha) return tvm.compute(x.shape, _compute) @tvm.tag_scope(tag=tag.BROADCAST) d
ef prelu(x, slope, axis=1): """ PReLU. It accepts two arguments: an input ``x`` and a weight array ``W`` and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`, where :math:`*` is an elementwise multiplication for each sample in the batch. Arguments: x : tvm.Tensor Input argument. slope : tvm.Tensor Channelised slope tensor for prelu axis : int The axis where the channel data needs to be applied Returns: y : tvm.Tensor The result. Links: [http://arxiv.org/pdf/1502.01852v1.pdf] """ assert len(x.shape) == 4 and len(slope.shape) == 1 assert axis < len(x.shape) assert get_const_int(slope.shape[0]) == get_const_int(x.shape[axis]) def _compute_channelwise(*indices): return tvm.select(x(*indices) > 0, x(*indices), x(*indices) * slope(indices[axis])) return tvm.compute(x.shape, _compute_channelwise)
vkryachko/Vase
tests/test_multidict.py
Python
bsd-2-clause
640
0
import unittest from vase.util import MultiDict class MultiDictTests(unittest.TestCase): def test_setitem(self): md = MultiDict() key = 'hello' value = 'world' md[key] = value self.assertEqual(md[key], value) self.assertEqual(md.get(key), value) self.assertEqual(md.getli
st(key), [value]) self.assertRaises(KeyError, md.__getitem__, "vasya") self.assertEqual(md.get("vasya"), None)
self.assertEqual(list(md.items()), [(key, value)]) self.assertEqual(list(md.lists()), [(key, [value])]) self.assertEqual(list(md.values()), [value])
nagyistoce/netzob
src/netzob/UI/Vocabulary/Views/MessageTableView.py
Python
gpl-3.0
29,245
0.003112
# -*- coding: utf-8 -*- #+---------------------------------------------------------------------------+ #| 01001110 01100101 01110100 01111010 01101111 01100010 | #| | #| Netzob : Inferring communication protocols | #+---------------------------------------------------------------------------+ #| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry | #| This program is free software: you can redistribute it and/or modify | #| it under the terms of the GNU General Public License as published by | #| the Free Software Foundation, either version 3 of the License, or | #| (at your option) any later version. | #| | #| This program is distributed in the hope that it will be useful, | #| but WITHOUT ANY WARRANTY; without even the implied warranty of | #| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | #| GNU General Public License for more details. | #| | #| You should have received a copy of the GNU General Public License | #| along with this program. If not, see <http://www.gnu.org/licenses/>. | #+---------------------------------------------------------------------------+ #| @url : http://www.netzob.org | #| @contact : contact@netzob.org
| #| @sponsors : Amossys, http://www.amossys.fr | #| Supélec, http://www.rennes.supelec.
fr/ren/rd/cidre/ | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Standard library imports #+---------------------------------------------------------------------------+ import os import logging #+---------------------------------------------------------------------------+ #| Related third party imports #+---------------------------------------------------------------------------+ from gi.repository import Gtk, Gdk, GLib, Pango import gi from netzob.Common.SignalsManager import SignalsManager gi.require_version('Gtk', '3.0') #+---------------------------------------------------------------------------+ #| Local application imports #+---------------------------------------------------------------------------+ from netzob.Common.NetzobException import NetzobException from netzob.Common.ResourcesConfiguration import ResourcesConfiguration from netzob.Common.Type.Format import Format class MessageTableView(object): MAX_DISPLAYED_FIELDS = 200 def __init__(self, controller): self.controller = controller self.builder = Gtk.Builder() self.builder.add_from_file(os.path.join( ResourcesConfiguration.getStaticResources(), "ui", "vocabulary", "messageTable.glade")) self._getObjects(self.builder, ["messageTableBox", "fieldNameLabel", "messageTableScrolledWindow"]) self.builder.connect_signals(self.controller) self.displayedField = None # Make an empty treeview self.messageTableTreeView = self.__makeMessageTreeView() self.messageTableScrolledWindow.add(self.messageTableTreeView) self.messageTableTreeView.show() self.treeViewHeaderGroup = TreeViewHeaderWidgetGroup() def _getObjects(self, builder, objectsList): for object in objectsList: setattr(self, object, builder.get_object(object)) def __makeMessageTreeView(self): # Instanciate treeview messageTableTreeView = Gtk.TreeView() messageTableTreeView.connect("enter-notify-event", self.controller.messageTableTreeView_enter_notify_event_cb) messageTableTreeView.connect("leave-notify-event", self.controller.messageTableTreeView_leave_notify_event_cb) messageTableTreeView.connect("button-press-event", self.controller.messageTableTreeView_button_press_event_cb) messageTableTreeView.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE) messageTableTreeView.get_selection().connect("changed", self.controller.messageTableTreeView_changed_event_cb) messageTableTreeView.set_rules_hint(True) messageTableTreeView.set_grid_lines(Gtk.TreeViewGridLines.BOTH) # Configures it as a Drag Source messageTableTreeView.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [], Gdk.DragAction.MOVE) messageTableTreeView.connect("drag-data-get", self.__drag_data_get_event) messageTableTreeView.drag_source_add_text_targets() # Create columns if self.displayedField is None or len(self.displayedField.getExtendedFields()) < 1: return messageTableTreeView startOfColumns = 1 + self.displayedField.getExtendedFields()[0].getIndex() numOfColumns = startOfColumns + min(self.MAX_DISPLAYED_FIELDS, len(self.displayedField.getExtendedFields())) self.treeViewHeaderGroup.clear() for colIdx in range(startOfColumns, numOfColumns): (tvc, head) = self.__makeTreeViewColumn(startOfColumns, colIdx) #tvc.set_clickable(True) messageTableTreeView.append_column(tvc) but = tvc.get_button() box = but.get_children()[0] align = box.get_children()[0] align.connect("size-allocate", propagate_size_allocation) self.treeViewHeaderGroup.add(head) # Setup column headers. columns = messageTableTreeView.get_columns() for column in columns: column_widget = column.get_widget() column_header = find_closest_ancestor(column_widget, Gtk.Button) if column_header: column_header.connect('button-press-event', propagate_button_press_event) column_header.set_focus_on_click(False) return messageTableTreeView def refreshProperties(self): """refresh the properties like background color""" self.messageTableTreeView.queue_draw() def __makeTreeViewColumn(self, startOfColumns, i): i = i - 1 markupCellRenderer = Gtk.CellRendererText() treeViewColumn = Gtk.TreeViewColumn() field = self.displayedField.getFieldByIndex(i) headerWidget = TreeViewHeaderWidget(field, treeViewColumn, self) treeViewColumn.set_widget(headerWidget) treeViewColumn.set_resizable(True) treeViewColumn.pack_start(markupCellRenderer, True) treeViewColumn.add_attribute(markupCellRenderer, "markup", i + 2 - startOfColumns) markupCellRenderer.set_property("font", "monospace") return (treeViewColumn, headerWidget) def setDisplayedField(self, field): """Memorizes field as the displayed field in this message table and updates itself to display this field.""" self.displayedField = field self.update() def getDisplayedField(self): """Returns the currently displayed field in this message table""" return self.displayedField def updateMessageTableTreeView(self): """Performs a full update on the treeview displaying messages. You should call this method only if you need a full update of the table""" logging.debug("Start to update the message table") ## Remove former TreeView if necessary if self.messageTableTreeView is not None: self.messageTableScrolledWindow.remove(self.messageTableTreeView) if self.displayedField is None: return ## Create a new treeview self.messageTableTreeView = self.__makeMessageTreeView() ## Create and fill store for the create tree view
cliftonmcintosh/openstates
openstates/sd/__init__.py
Python
gpl-3.0
3,672
0.000272
from pupa.scrape import Jurisdiction, Organization import scrapelib import lxml.html from .people import SDLegislatorScraper from .bills import SDBillScraper class SouthDakota(Jurisdiction): division_id = "ocd-division/country:us/state:sd" classification = "government" name = "South Dakota" url = "http://www.sdlegislature.gov/" scrapers = { 'people': SDLegislatorScraper, 'bills': SDBillScraper } parties = [ {'name': 'Republican'}, {'name': 'Dem
ocratic'} ] legislative_sessions = [ { "_scraped_name": "2009 (84th) Session", "identifier": "2009",
"name": "2009 Regular Session" }, { "_scraped_name": "2010 (85th) Session", "identifier": "2010", "name": "2010 Regular Session" }, { "_scraped_name": "2011 (86th) Session", "identifier": "2011", "name": "2011 Regular Session", "start_date": "2011-01-11" }, { "_scraped_name": "2011 (86th) Special Session", "identifier": "2011s", "name": "2011 Special Session" }, { "_scraped_name": "2012 (87th) Session", "identifier": "2012", "name": "2012 Regular Session" }, { "_scraped_name": "2013 (88th) Session", "identifier": "2013", "name": "2013 Regular Session" }, { "_scraped_name": "2014 (89th) Session", "identifier": "2014", "name": "2014 Regular Session" }, { "_scraped_name": "2015 (90th) Session", "identifier": "2015", "name": "2015 Regular Session" }, { "_scraped_name": "2016 (91st) Session", "identifier": "2016", "name": "2016 Regular Session" }, { "_scraped_name": "2017 (92nd) Session", "identifier": "2017", "name": "2017 Regular Session" } ] ignored_scraped_sessions = [ "Previous Years" ] def get_organizations(self): legislature_name = "South Dakota State Legislature" lower_chamber_name = "House" lower_seats = 0 lower_title = "Representative" upper_chamber_name = "Senate" upper_seats = 0 upper_title = "Senator" legislature = Organization(name=legislature_name, classification="legislature") upper = Organization(upper_chamber_name, classification='upper', parent_id=legislature._id) lower = Organization(lower_chamber_name, classification='lower', parent_id=legislature._id) for n in range(1, upper_seats + 1): upper.add_post( label=str(n), role=upper_title, division_id='{}/sldu:{}'.format(self.division_id, n)) for n in range(1, lower_seats + 1): lower.add_post( label=str(n), role=lower_title, division_id='{}/sldl:{}'.format(self.division_id, n)) yield legislature yield upper yield lower def get_session_list(self): html = scrapelib.Scraper().get('http://www.sdlegislature.gov/' 'Legislative_Session/Menu.aspx').text doc = lxml.html.fromstring(html) sessions = doc.xpath('//div[@id="ctl00_ContentPlaceHolder1_BlueBoxLeft"]//ul/li' '/a/div/text()') return sessions
dmartin35/pronosfoot
tools/utils.py
Python
mit
1,623
0.02711
""" UTILITIES """ def distinct(inlist): """ returns a list of distinct values (no duplicated values) """ outlist = [] for elem in inlist: if not elem in outlist: outlist.append(elem) return outlist def list_evolution(list1,list2): """ returns the index evolution of each element of the list 1 compared to the index within list 2 NB: if lists length do not match, place None value at missing index """ # return [list2.index(x) - list1.index(x) for x in list1 if x in list2] evo = [] for x in list1: if x in list2: evo.append(list2.index(x) - list1.index(x)) el
se: evo.append(None) return evo def list_to_dict(keyslist,valueslist): """ convert lists of keys and values to a dict
""" if len(keyslist) != len(valueslist): return {} mydict = {} for idx in range(0,len(keyslist)): mydict[keyslist[idx]] = valueslist[idx] return mydict #evo_progress: return the evolution of each item of a list compared to its left value evo_progress = lambda pos: [] if pos == [] else [j for i in [[0],[pos[i-1]-pos[i] for i in range(1,len(pos))]] for j in i] if __name__ == '__main__': mylist = ['A','B','A','C','A','A','D'] print('ORIGINAL:', mylist) print('DISTINCT', distinct(mylist)) mylist1 = ['A','B','C','D'] mylist2 = ['A','D','B','C'] print(list_evolution(mylist2,mylist1)) print(list_to_dict(['a','b','c'], [1,2,3])) print(evo_progress([])) print(evo_progress([1])) print(evo_progress([1,4,2,4,8,5,5,3]))
Erotemic/local
depricated/speech/Examples/_tortoisesvn.py
Python
gpl-3.0
6,848
0.003505
# coding=utf-8 # # (c) Copyright 2008 by Daniel J. Rocco # Licensed under the Creative Commons Attribution- # Noncommercial-Share Alike 3.0 United States License, see # <http://creativecommons.org/licenses/by-nc-sa/3.0/us/> # """ Command-module for controlling **TortoiseSVN** from Windows Explorer ============================================================================ This module implements various voice-commands for using the Windows Explorer extensions of the TortoiseSVN subversion client. (c) Copyright 2008 by Daniel J. Rocco Licensed under the Creative Commons Attribution- Noncommercial-Share Alike 3.0 United States License, see <http://creativecommons.org/licenses/by-nc-sa/3.0/us/> """ import os.path import subprocess import os import win32gui import urllib #from subprocess import Popen from dragonfly import (Grammar, ConnectionGrammar, AppContext, CompoundRule, Choice, Window, Config, Section, Item) #--------------------------------------------------------------------------- # Set up this module's configuration. config = Config("TortoiseSVN") config.tortoisesvn = Section("TortoiseSVN configuration") config.tortoisesvn.path = Item(r'C:\Program Files\TortoiseSVN\bin\TortoiseProc.exe') config.tortoisesvn.command = Item("(tortoise | subversion) <command>") config.tortoisesvn.global_command = Item("(tortoise | subversion) <command> <predef>") config.tortoisesvn.actions = Item({ "add": "add", "checkout": "checkout", "commit": "commit", "revert": "revert", "merge": "merge", "delete": "delete", "diff": "diff", "log": "log", "import": "import", "update": "update", "revert": "revert", "ignore": "ignore", "rename": "rename", "properties": "properties", "reposit
ory": "repobrowser", "edit conflict": "conflicteditor", }, ) config.tortoisesvn.predef = Item({ "dragonfly | dee fly": r"C:\data\projects\Dra
gonfly\work dragonfly", }, ) #config.generate_config_file() config.load() #--------------------------------------------------------------------------- # Utility generator function for iterating over COM collections. def collection_iter(collection): for index in xrange(collection.Count): yield collection.Item(index) #--------------------------------------------------------------------------- # This module's grammar for use within Windows Explorer. class ExplorerGrammar(ConnectionGrammar): def __init__(self): ConnectionGrammar.__init__( self, name="Explorer subversion", context=AppContext(executable="explorer"), app_name="Shell.Application" ) def get_active_explorer(self): handle = Window.get_foreground().handle for window in collection_iter(self.application.Windows()): if window.HWND == handle: return window self._log.warning("%s: no active explorer." % self) return None def get_current_directory(self): window = self.get_active_explorer() path = urllib.unquote(window.LocationURL[8:]) if path.startswith("file:///"): path = path[8:] return path def get_selected_paths(self): window = self.get_active_explorer() items = window.Document.SelectedItems() paths = [] for item in collection_iter(items): paths.append(item.Path) return paths def get_selected_filenames(self): paths = self.get_selected_paths() return [os.path.basename(p) for p in paths] #--------------------------------------------------------------------------- # Create the rule from which the other rules will be derived. # This rule implements the method to execute TortoiseSVN. class TortoiseRule(CompoundRule): def _execute_command(self, path_list, command): # Construct arguments and spawn TortoiseSVN. path_arg = '/path:"%s"' % str('*'.join(path_list)) command_arg = "/command:" + command os.spawnv(os.P_NOWAIT, config.tortoisesvn.path, [config.tortoisesvn.path, command_arg, path_arg]) # For some reason the subprocess module gives quote-related errors. #Popen([tortoise_path, command_arg, path_arg]) #--------------------------------------------------------------------------- # Create the rule for controlling TortoiseSVN from Windows Explorer. class ExplorerCommandRule(TortoiseRule): spec = config.tortoisesvn.command extras = [ Choice("command", config.tortoisesvn.actions), ] def _process_recognition(self, node, extras): selection = self.grammar.get_selected_paths() if not selection: selection = [self.grammar.get_current_directory()] self._execute_command(selection, extras["command"]) #--------------------------------------------------------------------------- # Create the rule for controlling TortoiseSVN from anywhere. class GlobalCommandRule(TortoiseRule): spec = config.tortoisesvn.global_command extras = [ Choice("command", config.tortoisesvn.actions), Choice("predef", config.tortoisesvn.predef), ] def _process_recognition(self, node, extras): path_list = [extras["predef"]] command = extras["command"] self._execute_command(path_list, command) #--------------------------------------------------------------------------- # Load the grammar instance and define how to unload it. explorer_grammar = ExplorerGrammar() explorer_grammar.add_rule(ExplorerCommandRule()) global_grammar = Grammar("TortoiseSVN global") global_grammar.add_rule(GlobalCommandRule()) explorer_grammar.load() global_grammar.load() # Unload function which will be called by natlink at unload time. def unload(): global explorer_grammar, global_grammar if explorer_grammar: explorer_grammar.unload() explorer_grammar = None if global_grammar: global_grammar.unload() global_grammar = None
antoinecarme/sklearn2sql_heroku
tests/classification/FourClass_100/ws_FourClass_100_AdaBoostClassifier_db2_code_gen.py
Python
bsd-3-clause
144
0.013889
from skl
earn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("AdaBoostClassifier"
, "FourClass_100" , "db2")
sternshus/arelle2.7
svr-2.7/arelle/plugin/profileFormula.py
Python
apache-2.0
5,608
0.008559
''' Profile Formula Validation is an example of a plug-in to GUI menu that will profile formula execution. (c) Copyright 2012 Mark V Systems Limited, All rights reserved. ''' import os from tkinter import simpledialog, messagebox def profileFormulaMenuEntender(cntlr, menu): # Extend menu with an item for the profile formula plugin menu.add_command(label="Profile formula validation", underline=0, command=lambda: profileFormulaMenuCommand(cntlr) ) def profileFormulaMenuCommand(cntlr): # save DTS menu item has been invoked if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None: cntlr.addToLog("No taxonomy loaded.") return # get file name into which to save log file while in foreground thread p
rofileReportFile = cntlr.uiFileDialog("save", title=_("arelle - Save Formula Profile Report"), initialdir=cntlr.config.setdefault("formulaProfileReportDir","."), filetypes=[(_("Profile report file .log"), "*.log")], defaultextension=".log") if not profileReport
File: return False errMsg = "" maxRunTime = 0 while (1): timeout = simpledialog.askstring(_("arelle - Set formula run time limit"), _("{0}You may enter the maximum number of minutes to run formulas.\n" "(Leave empty for no run time limitation.)".format(errMsg)), parent=cntlr.parent) if timeout: try: maxRunTime = float(timeout) break except ValueError as err: errMsg = str(err) + "\n\n" excludeCompileTime = messagebox.askyesno(_("arelle - Exclude formula compile statistics"), _("Should formula compiling be excluded from the statistics?\n" "(Yes will make a separate compiling \"pass\" so that statistics include execution only.)".format(errMsg)), parent=cntlr.parent) cntlr.config["formulaProfileReportDir"] = os.path.dirname(profileReportFile) cntlr.saveConfig() # perform validation and profiling on background thread import threading thread = threading.Thread(target=lambda c=cntlr, f=profileReportFile, t=maxRunTime, e=excludeCompileTime: backgroundProfileFormula(c,f,t,e)) thread.daemon = True thread.start() def backgroundProfileFormula(cntlr, profileReportFile, maxRunTime, excludeCompileTime): from arelle import Locale, XPathParser, ValidateXbrlDimensions, ValidateFormula # build grammar before profiling (if this is the first pass, so it doesn't count in profile statistics) XPathParser.initializeParser(cntlr.modelManager) # load dimension defaults ValidateXbrlDimensions.loadDimensionDefaults(cntlr.modelManager) import cProfile, pstats, sys, time # a minimal validation class for formula validator parameters that are needed class Validate: def __init__(self, modelXbrl, maxRunTime): self.modelXbrl = modelXbrl self.parameters = None self.validateSBRNL = False self.maxFormulaRunTime = maxRunTime def close(self): self.__dict__.clear() val = Validate(cntlr.modelManager.modelXbrl, maxRunTime) formulaOptions = val.modelXbrl.modelManager.formulaOptions if excludeCompileTime: startedAt = time.time() cntlr.addToLog(_("pre-compiling formulas before profiling")) val.validateFormulaCompileOnly = True ValidateFormula.validate(val) del val.validateFormulaCompileOnly cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale, _("formula pre-compiling completed in %.2f secs"), time.time() - startedAt)) cntlr.addToLog(_("executing formulas for profiling")) else: cntlr.addToLog(_("compiling and executing formulas for profiling")) startedAt = time.time() statsFile = profileReportFile + ".bin" cProfile.runctx("ValidateFormula.validate(val)", globals(), locals(), statsFile) cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale, _("formula profiling completed in %.2f secs"), time.time() - startedAt)) # dereference val val.close() # specify a file for log priorStdOut = sys.stdout sys.stdout = open(profileReportFile, "w") statObj = pstats.Stats(statsFile) statObj.strip_dirs() statObj.sort_stats("time") statObj.print_stats() statObj.print_callees() statObj.print_callers() sys.stdout.flush() sys.stdout.close() del statObj sys.stdout = priorStdOut os.remove(statsFile) __pluginInfo__ = { 'name': 'Profile Formula Validation', 'version': '1.0', 'description': "This plug-in adds a profiled formula validation. " "Includes XPath compilation in the profile if it is the first validation of instance; " "to exclude XPath compile statistics, validate first the normal way (e.g., toolbar button) " "and then validate again using this profile formula validation plug-in. ", 'license': 'Apache-2', 'author': 'Mark V Systems Limited', 'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.', # classes of mount points (required) 'CntlrWinMain.Menu.Validation': profileFormulaMenuEntender, }