repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
bburan/psiexperiment
|
psi/experiment/workbench.py
|
Python
|
mit
| 4,641
| 0.001077
|
import logging
log = logging.getLogger(__name__)
import enaml
from enaml.application import deferred_call
from enaml.workbench.api import Workbench
with enaml.imports():
from enaml.stdlib.message_box import critical
from . import error_style
from psi import set_config
from psi.core.enaml.api import load_manifest, load_manifest_from_file
class PSIWorkbench(Workbench):
def register_core_plugins(self, io_manifest, controller_manifests):
# Note, the get_plugin calls appear to be necessary to properly
# initialize parts of the application before new plugins are loaded.
# This is likely some sort of bug or poor design on my part.
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from enaml.workbench.ui.ui_manifest import UIManifest
from psi.experiment.manifest import ExperimentManifest
self.register(ExperimentManifest())
self.register(CoreManifest())
self.register(UIManifest())
self.get_plugin('enaml.workbench.ui')
self.get_plugin('enaml.workbench.core')
manifest_class = load_manifest_from_file(io_manifest, 'IOManifest')
self.register(manifest_class())
manifests = []
for manifest in controller_manifests:
manifest_class = load_manifest(manifest)
manifest = manifest_class()
manifests.append(manifest)
self.register(manifest)
from psi.context.manifest import ContextManifest
from psi.data.manifest import DataManifest
from psi.token.manifest import TokenManifest
from psi.controller.calibration.manifest import CalibrationManifest
self.register(ContextManifest())
self.register(DataManifest())
self.register(TokenManifest())
self.register(CalibrationManifest())
# Required to bootstrap plugin loading
self.get_plugin('psi.controller')
self.get_plugin('psi.controller.calibration')
context = self.get_plugin('psi.context')
# Now, bind context to any manifests that want it (TODO, I should
# have a core PSIManifest that everything inherits from so this
# check isn't necessary).
for manifest in manifests:
if hasattr(manifest, 'C'):
manifest.C = context.lookup
def start_workspace(self,
experiment_name,
base_path=None,
workspace='psi.experiment.workspace',
commands=None,
load_preferences=True,
load_layout=True,
preferences_file=None,
layout_file=None,
calibration_file=None):
# TODO: Hack alert ... don't store this information in a shared config
# fil
|
e. It's essentially a global variable.
set_config('EXPERIMENT', experiment_name)
ui = self.get_plugin('enaml.workbench.ui')
core = self.get_plugin('enaml.
|
workbench.core')
# Load preferences
if load_preferences and preferences_file is not None:
deferred_call(core.invoke_command, 'psi.load_preferences',
{'filename': preferences_file})
elif load_preferences and preferences_file is None:
deferred_call(core.invoke_command, 'psi.get_default_preferences')
# Load layout
if load_layout and layout_file is not None:
deferred_call(core.invoke_command, 'psi.load_layout',
{'filename': layout_file})
elif load_layout and layout_file is None:
deferred_call(core.invoke_command, 'psi.get_default_layout')
# Exec commands
if commands is not None:
for command in commands:
deferred_call(core.invoke_command, command)
controller = self.get_plugin('psi.controller')
if base_path is not None:
controller.register_action('experiment_prepare',
'psi.data.set_base_path',
{'base_path': base_path})
if calibration_file is not None:
controller.load_calibration(calibration_file)
# Now, open workspace
ui.select_workspace(workspace)
ui.show_window()
if base_path is None:
ui.workspace.dock_area.style = 'error'
ui.start_application()
|
radio-astro/radiopadre
|
radiopadre/js9/__init__.py
|
Python
|
mit
| 2,243
| 0.004904
|
import os
import os.path
import traceback
# init JS9 configuration
# js9 source directory
DIRNAME = os.path.dirname(__file__)
JS9_ERROR = os.environ.get("RADIOPADRE_JS9_ERROR") or None
def init_js9():
global radiopadre
import radiopadre
from radiopadre.render import render_status_message
global JS9_ERROR
if JS9_ERROR:
return
global _prefix
_prefix = radiopadre.SHADOW_URL_PREFIX
global RADIOPADRE_INSTALL_PREFIX
global RADIOPADRE_LOCAL_PREFIX
global JS9_INSTALL_PREFIX
global JS9_HELPER_PORT
global JS9_INIT_HTML_HTTP
global JS9_SCRIPT_PREFIX
global JS9_LOCAL_SETTINGS
RADIOPADRE_INSTALL_PREFIX = _prefix + "/radiopadre-www" # URL used to access radiopadre code
RADIOPADRE_LOCAL_PREFIX = os.path.join(_prefix, radiopadre.ABSROOTDIR, ".radiopadre") # URL used to access radiopadre aux dir
JS9_INSTALL_PREFIX = _prefix+"/js9-www" # URL used to access JS9 code
JS9_SCRIPT_PREFIX = _prefix
JS9_LOCAL_SETTINGS = os.environ["RADIOPADRE_JS9_SETTINGS"]
try:
JS9_HELPER_PORT = int(os.environ["RADIOPADRE_JS9_HELPER_PORT
|
"])
except:
JS9_ERROR = "invalid RADIOPADRE_JS9_HELPER_PORT setting, integer value expected"
# get init code, substitute global variables into it
if not JS9_ERROR:
try:
with open(os.path.join(DIRNAME, "js9-init-template.html")) as inp:
source = inp.read()
JS9_INIT_HTML_HTTP = source.format(**globals())
except Exception, exc:
traceba
|
ck.print_exc()
JS9_ERROR = "Error reading init templates: {}".format(str(exc))
# on error, init code replaced by error message
if JS9_ERROR:
JS9_INIT_HTML_HTTP = render_status_message("Error initializing JS9: {}".format(JS9_ERROR), bgcolor='yellow')
radiopadre.add_startup_warning("""Warning: the JS9 FITS viewer is not functional ({}). Live FITS file viewing
will not be available in this notebook. You probably want to fix this problem (missing libcfitsio-dev and/or nodejs
packages, typically), then reinstall the radiopadre environment on this system ({}).
""".format(JS9_ERROR, os.environ['HOSTNAME']))
|
dan4ik95dv/housemanagement
|
tsj/fixtures/origins/export_companies.py
|
Python
|
mit
| 2,898
| 0.002761
|
#!/usr/bin/python
from json import dumps as jdumps
from sys import stdin, argv
from itertools import count
from pprint import PrettyPrinter
from collections import OrderedDict
def order_dict(keys, d):
return OrderedDict([(k, d[k]) for k in keys])
ENTITY_ORDER = ("model", "pk", "fields")
USER_ORDER = (
"username",
"first_name",
"last_name",
"is_active",
"is_superuser",
"is_staff",
"last_login",
"groups",
"user_permissions",
"password",
"email",
"date_joined")
COMPANY_ORDER = (
"name",
"full_name",
"workgraph",
"company_type",
"post_address",
"legal_address",
"kpp",
"bank_name",
"kor_schet",
"orgn_date",
"bik",
"boss_fio",
"phone",
"inn",
"proof",
"user",
"orgn",
"orgn_emitter",
"email",
"bill_numb")
j = []
lines = stdin.readlines()
for s, i in zip(lines, count(1)):
vals = s.strip()[1:-1].split('","')
user = order_dict(ENTITY_ORDER, {
"model": "auth.user",
"pk": i,
"fields": order_dict(USER_ORDER, {
"username": "testcom"+str(i),
"first_name": "",
"last_name": "",
"is_active": True,
"is_superuser": True,
"is_staff": True,
"last_login": "2014-10-17T19:40:23.371Z",
"groups": [],
"user_permissions": [],
"password": "pbkdf2_sha256$12000$vxQhU3S5NDLt$SxI8swzmISd0c2lN2wyDrE4pt72s8ZB1NqHwQOf7uNU=",
"email": "testcom@test.test",
"date_joined": "2014-10-
|
17T19:40:08.944Z"
})
})
entity = order_dict(ENTITY
|
_ORDER, {
"model": "tsj.company",
"pk": i,
"fields": {
"kpp": "12345654321",
"bank_name": "12343543",
"kor_schet": "2342342343",
"orgn_date": "2014-10-10",
"bik": "3424234324",
"boss_fio": "\u0419\u0446\u0443\u043a \u0415\u043d\u0433 \u0418\u0447\u0435\u0448\u0443\u0442\u0441\u044f",
"phone": "777771",
"inn": "123123213123213",
"proof": "scans/\u043f\u0430\u0441\u043f\u043e\u0440\u0442_HZzLWaq.png",
"user": i,
"orgn": "123213213213123",
"orgn_emitter": "\u0439\u0446\u0443\u043a\u0435\u043d\u043a\u0443\u0446\u0439",
"email": "takie@dela.test",
"bill_numb": "1123213123"
}
})
entity['fields'] = order_dict(COMPANY_ORDER, dict({
'name': vals[0],
'full_name': vals[0],
'workgraph': vals[1],
'company_type': 0 if int(vals[2]) <800 else 1,
'post_address': vals[3],
'legal_address': vals[3]
}, **entity['fields']))
j += [user, entity]
pp = PrettyPrinter(indent=4)
print('s = ')
pp.pprint(j)
if(len(argv) > 1):
jf = open(argv[1], 'w')
jf.writelines(jdumps(j, indent=4))
jf.close()
|
JoProvost/calbum
|
calbum/sources/exiftool.py
|
Python
|
apache-2.0
| 3,192
| 0.000627
|
# Copyright 2015 Jonathan Provost.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOU
|
T WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import loggi
|
ng
import subprocess
from calbum.core.model import Media, string_to_datetime
exiftool_path = 'exiftool'
class ExifToolMedia(Media):
file_extensions = ()
timestamp_tags = (
'Creation Date',
'Date/Time Original',
'Track Create Date',
'Media Create Date',
'Create Date',
)
def exif(self):
if not hasattr(self, '_exif'):
self._exif = {}
try:
for line in subprocess.check_output(
[exiftool_path, self.path()]).splitlines():
key, value = line.split(':', 1)
self._exif[key.strip()] = value.strip()
except OSError as e:
logging.warning('Metadata processing with "{}" '
'failed for "{}": {}'.format(
exiftool_path, self.path(), repr(e)))
except subprocess.CalledProcessError as e:
logging.warning('Metadata processing with "{}" '
'failed for "{}": {}'.format(
exiftool_path, self.path(), repr(e)))
return self._exif
def timestamp(self):
"""
Return the creation timestamp of the media as defined in the EXIF
metadata ('Creation Date', 'Date/Time Original', 'Track Create Date',
'Media Create Date', 'Create Date')
:rtype: datetime
"""
try:
exif = self.exif()
d = next((
string_to_datetime(str(exif[tag]), self.time_zone)
for tag in self.timestamp_tags if tag in exif), None)
if d:
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=self.time_zone)
if d.year < 1970:
logging.info('Patched incorrect time offset, for {} '
'see https://trac.ffmpeg.org/ticket/1471'
.format(self.path()))
# See https://trac.ffmpeg.org/ticket/1471
d = d.replace(year=d.year+66)
return d
except ValueError:
pass
return super(ExifToolMedia, self).timestamp()
def location(self):
raise NotImplemented()
class JpegPicture(ExifToolMedia):
file_extensions = ('.jpeg', '.jpg')
class TiffPicture(ExifToolMedia):
file_extensions = ('.tiff', '.tif')
class VideoMP4Media(ExifToolMedia):
file_extensions = ('.mp4',)
class Video3GPMedia(ExifToolMedia):
file_extensions = ('.3gp', '.3g2')
|
priyankarani/trytond-shipping-dpd
|
carrier.py
|
Python
|
bsd-3-clause
| 4,946
| 0.000202
|
# -*- coding: utf-8 -*-
"""
carrier
"""
from decimal import Decimal
from suds import WebFault
from trytond.transaction import Transaction
from trytond.pool import PoolMeta, Pool
from trytond.model import fields, ModelView
from trytond.pyson import Eval
from trytond.wizard import Wizard, StateView, Button
from dpd_client import DPDClient
__all__ = ['Carrier', 'TestConnectionStart', 'TestConnection']
__metaclass__ = PoolMeta
STATES = {
'required': Eval('carrier_cost_method') == 'dpd',
'invisible': Eval('carrier_cost_method') != 'dpd'
}
class Carrier:
"Carrier"
__name__ = 'carrier'
dpd_url = fields.Char(
'Base URL', help="Ex. https://public-ws-stage.dpd.com",
states=STATES, depends=['carrier_cost_method']
)
dpd_login_service_wsdl = fields.Char(
'Login Service URL', states=STATES, depends=['carrier_cost_method']
)
dpd_shipment_service_wsdl = fields.Char(
'Shipment Service URL', states=STATES, depends=['carrier_cost_method']
)
dpd_depot_data_service_wsdl = fields.Char(
'Depot Data Service URL', states=STATES, depends=['carrier_cost_method']
)
dpd_parcel_shop_finder_service_wsdl = fields.Char(
'Parcel Shop Finder Service URL', states=STATES,
depends=['carrier_cost_method']
)
dpd_username = fields.Char(
'Username/DelisID', states=STATES, depends=['carrier_cost_method']
)
dpd_password = fields.Char(
'Password', states=STATES, depends=['carrier_cost_method']
)
dpd_depot = fields.Char(
'Depot', states=STATES, depends=['carrier_cost_method']
)
@classmethod
def __setup__(cls):
super(Carrier, cls).__setup__()
selection = ('dpd', 'DPD')
if selection not in cls.carrier_cost_method.selection:
cls.carrier_cost_method.selection.append(selection)
cls._buttons.update({
'test_dpd_credentials': {},
})
@fields.depends('carrier_cost_method', 'dpd_url')
def on_change_dpd_url(self):
"""
Set the login_service and shipment_service URL on change of dpd_url
"""
if self.carrier_cost_method != 'dpd':
return {}
if not self.dpd_url:
return {}
return {
'dpd_login_service_wsdl': (
self.dpd_url + '/services/LoginService/V2_0?wsdl'),
'dpd_shipment_service_wsdl': (
self.dpd_url + '/services/ShipmentService/V3_2?wsdl'),
'dpd_depot_data_service_wsdl': (
self.dpd_url + '/services/DepotDataService/V1_0?wsdl'),
'dpd_parcel_shop_finder_service_wsdl': (
self.dpd_url + '/services/DepotDataService/V1_0?wsdl'),
}
def get_dpd_client(self):
"""
Return the DPD client with the username and password set
"""
return DPDClient(
self.dpd_login_service_wsdl,
self.dpd_shipment_service_wsdl,
self.dpd_depot_data_service_wsdl,
self.dpd_parcel_shop_finder_service_wsdl,
self.dpd_username,
self.dpd_password,
message_language=Transaction().context.get('language', 'en_US')
)
@classmethod
@ModelView.button_action('shipping_dpd.wizard_test_connection')
def test_dpd_credentials(cls, carriers):
"""
Tests the connection. If there is a WebFault, raises an UserError
"""
if
|
len(carriers) != 1:
cls.raise_user_error('Only one carrier can be tested at a time.')
client = carriers[0].get_dpd_client()
try:
client.get_auth()
except WebFault, exc:
cls.raise_user_error(exc.faul
|
t)
def get_sale_price(self):
"""Estimates the shipment rate for the current shipment
DPD dont provide and shipping cost, so here shipping_cost will be 0
returns a tuple of (value, currency_id)
:returns: A tuple of (value, currency_id which in this case is USD)
"""
Currency = Pool().get('currency.currency')
Company = Pool().get('company.company')
if self.carrier_cost_method != 'dpd':
return super(Carrier, self).get_sale_price() # pragma: no cover
currency, = Currency.search([('code', '=', 'USD')])
company = Transaction().context.get('company')
if company:
currency = Company(company).currency
return Decimal('0'), currency.id
class TestConnectionStart(ModelView):
"Test Connection"
__name__ = 'shipping_dpd.wizard_test_connection.start'
class TestConnection(Wizard):
"""
Test Connection Wizard
"""
__name__ = 'shipping_dpd.wizard_test_connection'
start = StateView(
'shipping_dpd.wizard_test_connection.start',
'shipping_dpd.wizard_test_connection_view_form',
[
Button('Ok', 'end', 'tryton-ok'),
]
)
|
cloudbase/lis-tempest
|
tempest/api/compute/admin/test_simple_tenant_usage_negative.py
|
Python
|
apache-2.0
| 2,648
| 0
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class TenantUsagesNegativeTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setUpClass(cls):
super(TenantUsagesNegativeTestJSON, cls).setUpClass()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
cls.identity_client = cls._get_identity_admin_client()
now = datetime.datetime.now()
cls.start = cls._parse_strtime(no
|
w - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timede
|
lta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
@test.attr(type=['negative', 'gate'])
def test_get_usage_tenant_with_empty_tenant_id(self):
# Get usage for a specific tenant empty
params = {'start': self.start,
'end': self.end}
self.assertRaises(exceptions.NotFound,
self.adm_client.get_tenant_usage,
'', params)
@test.attr(type=['negative', 'gate'])
def test_get_usage_tenant_with_invalid_date(self):
# Get usage for tenant with invalid date
params = {'start': self.end,
'end': self.start}
self.assertRaises(exceptions.BadRequest,
self.adm_client.get_tenant_usage,
self.client.tenant_id, params)
@test.attr(type=['negative', 'gate'])
def test_list_usage_all_tenants_with_non_admin_user(self):
# Get usage for all tenants with non admin user
params = {'start': self.start,
'end': self.end,
'detailed': int(bool(True))}
self.assertRaises(exceptions.Unauthorized,
self.client.list_tenant_usages, params)
class TenantUsagesNegativeTestXML(TenantUsagesNegativeTestJSON):
_interface = 'xml'
|
stsievert/swix
|
swix/speed/speed.py
|
Python
|
mit
| 1,179
| 0.017812
|
from __future__ import division
from pylab import *
from timeit import timeit
def pe1():
N = 1e6
x = arange(N)
i = argwhere((abs(x%3) < 1e-9) * (abs(x%5) < 1e-9))
def pe10():
N = 2e6
primes = arange(N)
for i in arange(2, sqrt(N)):
j = arange(2, N/i) * i
j = asarray(j, dtype=int)
primes[j] *= 0.0
def pe73():
N = 1e3
d = arange(N)+1
n = arange(N)+1
n, d = meshgrid(n, d)
# setting the upper triangular to 0
#il1 = triu_indices(n.shape[0])
#n[il1] = 0
|
f = n / d
f = unique(f)
i = (f > 1/3) & (f < 1/2)
def soft_threshold():
N = 1e2
j = linspace(-1, 1, num=N)
(x, y) = meshgrid(j
|
, j)
z = pow(x, 2) + pow(y, 2)
i = abs(z) < 0.5
z[argwhere(i)] *= 0
z[argwhere(~i)] -= 0.5
def pi_approx():
N = 1e6
k = arange(N, dtype=int)
pi_approx = 1 / (2*k + 1)
pi_approx[2*k[:N/2]+1] *= -1
print "pe1_time : ", timeit(pe1 ,number=10)
print "pe10_time : ", timeit(pe10 ,number=3)
print "pe73_time : ", timeit(pe73 ,number=1)
print "st_time : ", timeit(soft_threshold ,number=3)
print "pi_approx time : ", timeit(pi_approx ,number=10)
|
cs-hse-projects/profanity-filter
|
profanity_filter/ui_progress_bar.py
|
Python
|
mit
| 1,153
| 0.001735
|
"""
Progress bar encapsulation
"""
# Copyright (c) Timur Iskhakov.
# Distributed under the terms of the MIT License.
import progressbar
class
|
UIProgressBar:
__widgets = [' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
def __init__(self, message):
"""
:param message: :class:`str` Message to print
"""
self.message = message
self.progress_bar = None
self.value = 0
def init(self, ma
|
x_value):
"""Initiates and starts the progress bar.
:param max_value: :class:`int`, Number of steps for bar
"""
self.progress_bar = progressbar.ProgressBar(widgets=[self.message] + UIProgressBar.__widgets,
maxval=max_value)
self.progress_bar.start()
def step(self, value=1):
"""Updates the progress bar.
:param value: Value to increment the process
"""
self.value += value
self.progress_bar.update(self.value)
def finish(self):
"""Finishes the progress bar."""
self.progress_bar.finish()
self.value = 0
|
lhupfeldt/jenkinsflow
|
demo/jobs/calculated_flow_jobs.py
|
Python
|
bsd-3-clause
| 1,296
| 0.003086
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from collections import OrderedDict
from framework im
|
port api_select
def create_jobs(api_type):
g1_components = range(1)
g2_components = range(2)
g3_components = range(2)
component_groups = OrderedDict((('g1', g1_components), ('g2', g2_components), ('g3', g3_components)))
api = api_select.api(__file__, api_type)
def job(name, expect_order, params=None):
api.job(name, exec_time=0.5, max_fails=0, expec
|
t_invocations=1, expect_order=expect_order, params=params)
api.flow_job()
job('prepare', 1)
for gname, group in component_groups.items():
for component in group:
job('deploy_component_' + gname + '_' + str(component), 2)
job('report_deploy', 3)
job('prepare_tests', 3)
job('test_ui', 4)
job('test_x', 4)
for gname, group in component_groups.items():
for component in group:
job('test_component_' + gname + '_' + str(component), 5)
job('report', 6, params=(('s1', 'tst_regression', 'desc'), ('c1', ('complete', 'partial'), 'desc')))
job('promote', 7)
return api
if __name__ == '__main__':
create_jobs(api_select.ApiType.JENKINS)
|
bruecksen/isimip
|
isi_mip/sciencepaper/admin.py
|
Python
|
mit
| 107
| 0.009346
|
from django.contrib import admin
from isi_mip.sciencepaper.models i
|
mport Paper
admin.site.register(Pap
|
er)
|
killabytenow/chirribackup
|
chirribackup/storage/Local.py
|
Python
|
gpl-3.0
| 5,284
| 0.006625
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
###############################################################################
# chirribackup/storage/Local.py
#
# Local backup storage -- backup is organized in a local folder
#
# -----------------------------------------------------------------------------
# Chirri Backup - Cheap and ugly backup tool
# Copyright (C) 2016 Gerardo Garcia Peña <killabytenow@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from chirribackup.exceptions import ChirriException
from chirribackup.Logger import logger
import chirribackup.input
import chirribackup.storage.BaseStorage
import shutil
import os
import stat
class DirectoryNotFoundLocalStorageException(ChirriException):
"""Exception launched when directory is not found"""
class NotDirectoryLocalStorageException(ChirriException):
"""Exception launched when directory is not found"""
class Local(chirribackup.storage.BaseStorage.BaseStorage):
# class attributes
name = "Local storage"
storage_status_keys = {
"sm_local_storage_dir": { "save": 1, "type": "str", "value": None },
}
# object attributes
ls_path = None
def __init__(self, ldb, config = False):
super(Local, self).__init__(ldb, config)
if not config:
self.ls_path = os.path.realpath(self.ldb.sm_local_storage_dir)
if not os.path.isdir(self.ls_path):
os.makedirs(self.ls_path, 0770)
def __build_ls_path(self, remote_file, create_dir = False):
if isinstance(remote_file, list):
remote_file = os.path.join(*remote_file)
target_file = os.path.realpath(os.path.join(self.ls_path, remote_file))
if not target_file.startswith(os.path.join(self.ls_path, "")) \
and target_file != self.ls_path:
raise ChirriException("Target file '%s' outside of localstorage dir '%s'." % (remote_file, self.ls_path))
target_dir = os.path.dirname(target_file)
if not os.path.exists(target_dir):
if create_dir:
os.makedirs(target_dir, 0770)
else:
raise DirectoryNotFoundLocalStorageException("Directory %s not found." % target_dir)
elif not os.path.isdir(target_dir):
raise NotDirectoryLocalStorageException("%s is not a directory." % target_dir)
return target_file
def ask_config(self, config):
ok = False
while not ok:
config["sm_local_storage_dir"] = os.path.realpath(os.path.expanduser(chirribackup.input.ask("storage directory", config["sm_local_storage_dir"])))
if os.path.exists(config["sm_local_storage_dir"]) \
and not os.path.isdir(config["sm_local_storage_dir"]):
logger.error("Path '%s' is not a directory.")
else:
ok = True
def upload_file(self, remote_file, local_f
|
ile):
"""upload a file"""
target_file = self.__build_ls_path(remote_file, True)
shutil.copyfile(local_file, target_file)
|
def upload_data(self, remote_file, data):
"""post a file"""
target_file = self.__build_ls_path(remote_file, True)
with open(target_file, "wb", 0660) as ofile:
ofile.write(data)
def __get_listing(self, path = ""):
l = []
try:
for f in os.listdir(self.__build_ls_path(path)):
if os.path.isdir(self.__build_ls_path([ path, f ])):
l.extend(self.get_listing(self.path_join(path, f)))
else:
statinfo = os.lstat(self.__build_ls_path([ path, f ]))
l.append({
"name" : self.path_join(path, f),
"size" : statinfo.st_size,
})
except DirectoryNotFoundLocalStorageException, ex:
logger.warning("No snapshots found.")
return l
def get_listing(self):
return self.__get_listing()
def get_listing_snapshots(self):
return self.__get_listing("snapshots")
def get_listing_chunks(self):
return self.__get_listing("chunks")
def download_file(self, remote_file, local_path):
"""download a file to disk"""
shutil.copyfile(self.__build_ls_path(remote_file, False), local_path)
def download_data(self, remote_file):
with open(self.__build_ls_path(remote_file, False), "r") as f:
data = f.read()
return data
def delete_file(self, remote_file):
os.unlink(self.__build_ls_path(remote_file, False))
|
Zlash65/erpnext
|
erpnext/projects/doctype/project_template_task/project_template_task.py
|
Python
|
gpl-3.0
| 287
| 0.006969
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019
|
, Frappe Technologies Pvt. Ltd. and contributors
# Fo
|
r license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ProjectTemplateTask(Document):
pass
|
Brainbuster/openpli-buildumgebung
|
bitbake/lib/toaster/bldcontrol/localhostbecontroller.py
|
Python
|
gpl-2.0
| 14,870
| 0.005783
|
#
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Toaster Implementation
#
# Copyright (C) 2014 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import re
from django.db import transaction
from django.db.models import Q
from bldcontrol.models import BuildEnvironment, BRLayer, BRVariable, BRTarget, BRBitbake
import subprocess
from toastermain import settings
from bbcontroller import BuildEnvironmentController, ShellCmdException, BuildSetupException
import logging
logger = logging.getLogger("toaster")
from pprint import pprint, pformat
class LocalhostBEController(BuildEnvironmentController):
""" Implementation of the BuildEnvironmentController for the localhost;
this controller manages the default build directory,
the server setup and system start and stop for the localhost-type build environment
"""
def __init__(self, be):
super(LocalhostBEController, self).__init__(be)
self.dburl = settings.getDATABASE_URL()
self.pokydirname = None
self.islayerset = False
def _shellcmd(self, command, cwd = None):
if cwd is None:
cwd = self.be.sourcedir
#logger.debug("lbc_shellcmmd: (%s) %s" % (cwd, command))
p = subprocess.Popen(command, cwd = cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out,err) = p.communicate()
p.wait()
if p.returncode:
if len(err) == 0:
err = "command: %s \n%s" % (command, out)
else:
err = "command: %s \n%s" % (command, err)
#logger.warn("localhostbecontroller: shellcmd error %s" % err)
raise ShellCmdException(err)
else:
#logger.debug("localhostbecontroller: shellcmd success")
return out
def _createdirpath(self, path):
from os.path import dirname as DN
if path == "":
raise Exception("Invalid path creation specified.")
if not os.path.exists(DN(path)):
self._createdirpath(DN(path))
if not os.path.exists(path):
os.mkdir(path, 0755)
def _setupBE(self):
assert self.pokydirname and os.path.exists(self.pokydirname)
self._createdirpath(self.be.builddir)
self._shellcmd("bash -c \"source %s/oe-init-build-env %s\"" % (self.pokydirname, self.be.builddir))
# delete the templateconf.cfg; it may come from an unsupported layer configuration
os.remove(os.path.join(self.be.builddir, "conf/templateconf.cfg"))
def writeConfFile(self, file_name, variable_list = None, raw = None):
filepath = os.path.join(self.be.builddir, file_name)
with open(filepath, "w") as conffile:
if variable_list is not None:
for i in variable_list:
conffile.write("%s=\"%s\"\n" % (i.name, i.value))
if raw is not None:
conffile.write(raw)
def startBBServer(self):
assert self.pokydirname and os.path.exists(self.pokydirname)
assert self.islayerset
# find our own toasterui listener/bitbake
from toaster.bldcontrol.management.commands.loadconf import _reduce_canon_path
own_bitbake = _reduce_canon_path(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../bin/bitbake"))
assert os.path.exists(own_bitbake) and os.path.isfile(own_bitbake)
logger.debug("localhostbecontroller: running the listener at %s" % own_bitbake)
toaster_ui_log_filepath = os.path.join(self.be.builddir, "toaster_ui.log")
# get the file length; we need to detect the _last_ start of the toaster UI, not the first
toaster_ui_log_filelength = 0
if os.path.exists(toaster_ui_log_filepath):
with open(toaster_ui_log_filepath, "r") as f:
f.seek(0, 2) # jump to the end
toaster_ui_log_filelength = f.tell()
cmd = "bash -c \"source %s/oe-init-build-env %s 2>&1 >toaster_server.log && bitbake --read %s/conf/toaster-pre.conf --postread %s/conf/toaster.conf --server-only -t xmlrpc -B 0.0.0.0:0 2>&1 >>toaster_server.log \"" % (self.pokydirname, self.be.builddir, self.be.builddir, self.be.builddir)
port = "-1"
logger.debug("localhostbecontroller: starting builder \n%s\n" % cmd)
cmdoutput = self._shellcmd(cmd)
with open(self.be.builddir + "/toaster_server.log", "r") as f:
for i in f.readlines():
if i.startswith("Bitbake server address"):
port = i.split(" ")[-1]
logger.debug("localhostbecontroller: Found bitbake server port %s" % port)
cmd = "bash -c \"source %s/oe-init-build-env-memres -1 %s && DATABASE_URL=%s %s --observe-only -u toasterui --remote-server=0.0.0.0:-1 -t xmlrpc\"" % (self.pokydirname, self.be.builddir, self.dburl, own_bitbake)
with open(toaster_ui_log_filepath, "a+") as f:
p = subprocess.Popen(cmd, cwd = self.be.builddir, shell=True, stdout=f, stderr=f)
def _toaster_ui_started(filepath, filepos = 0):
if not os.path.exists(filepath):
return False
with open(filepath, "r") as f:
f.seek(filepos)
for line in f:
if line.startswith("NOTE: ToasterUI waiting for events"):
return True
return False
retries = 0
started = False
while not started and retries < 10:
started = _toaster_ui_started(toaster_ui_log_filepath, toaster_ui_log_filelength)
import time
logger.debug("localhostbecontroller: Waiting bitbake server to start")
time.sleep(0.5)
retries += 1
if not started:
toaster_
|
ui_log = open(os.path.join(self.be.builddir, "toaster_ui.log"), "r").read()
toaster_server_log = open(os.path.join(self.be.builddir, "toaster_server.log")
|
, "r").read()
raise BuildSetupException("localhostbecontroller: Bitbake server did not start in 5 seconds, aborting (Error: '%s' '%s')" % (toaster_ui_log, toaster_server_log))
logger.debug("localhostbecontroller: Started bitbake server")
while port == "-1":
# the port specification is "autodetect"; read the bitbake.lock file
with open("%s/bitbake.lock" % self.be.builddir, "r") as f:
for line in f.readlines():
if ":" in line:
port = line.split(":")[1].strip()
logger.debug("localhostbecontroller: Autodetected bitbake port %s", port)
break
assert self.be.sourcedir and os.path.exists(self.be.builddir)
self.be.bbaddress = "localhost"
self.be.bbport = port
self.be.bbstate = BuildEnvironment.SERVER_STARTED
self.be.save()
def stopBBServer(self):
assert self.pokydirname and os.path.exists(self.pokydirname)
assert self.islayerset
self._shellcmd("bash -c \"source %s/oe-init-build-env %s && %s source toaster stop\"" %
(self.pokydirname, self.be.builddir, (lambda: "" if self.be.bbtoken is None else "BBTOKEN=%s" % self.be.bbtoken)()))
self.be.bbstate = BuildEnvironment.SERVER_STOPPED
self.be.save()
logger.debug("localhostbecontroller: Stopped bitbake server")
def getGitCloneDirectory(self, url, branch):
""" Utility that returns the last componen
|
YosefLab/scVI
|
scvi/core/models/__init__.py
|
Python
|
bsd-3-clause
| 207
| 0
|
from .archesmixin import ArchesMixin
from .base import BaseModelClass
from .rnamixin import RNASeqMixin
from .vaemixin import VAEMixin
__all__ = ["ArchesMixin", "BaseModelClass", "RN
|
ASeqMixi
|
n", "VAEMixin"]
|
craig5/python-samples
|
root_script/{{cookiecutter.proj_name}}/setup.py
|
Python
|
gpl-3.0
| 158
| 0
|
#!/usr/bin/env python3
"""
Setup for root_script.
"""
# core python libaries
import setuptool
|
s
# third party libaries
# custom libraries
setuptools.setup()
|
|
litoeknee/byteNet-tensorflow
|
utils.py
|
Python
|
mit
| 271
| 0.03321
|
import numpy as np
def weigh
|
ted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
def list_to_string(ascii_list):
res = u""
for a in ascii_list:
if a >= 0 and
|
a < 256:
res += unichr(a)
return res
|
voidrank/django-chunked-upload
|
chunked_upload/exceptions.py
|
Python
|
mit
| 262
| 0
|
"""
Exc
|
eptions raised by django-chunked-upload.
"""
class ChunkedUploadError(Exception):
"""
Exception raised if errors in the reque
|
st/process.
"""
def __init__(self, status, **data):
self.status_code = status
self.data = data
|
ghchinoy/tensorflow
|
tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py
|
Python
|
apache-2.0
| 5,407
| 0.002404
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReshapeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
outputs = []
# Here we test two types of reshapes, one changes the batch dimension and
# the other does not. Note that we're not able to test reshaping to
# scalar, since TRT requires input tensor to be of rank at least 2, so a
# reshape with scalar input will be filtered out of the segment before
# conversion.
#
# These reshapes happen at batch dimension, thus conversion should fail.
for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2], [2, 50, -1, 24, 2]]:
incompatible_reshape = array_ops.reshape(inp, shape)
reshape_back = array_ops.reshape(incompatible_reshape, [-1, 24, 24, 2])
outputs.append(self.trt_incompatible_op(reshape_back))
# Add another block with many reshapes that don't change the batch
# dimension.
compatible_reshape = array_ops.reshape(
inp, [-1, 24 * 24, 2], name="reshape-0")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24, -1], name="reshape-1")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24 * 2, 24], name="reshape-2")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24 * 2], name="reshape-3")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 24, 2], name="reshape-4")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 6, 4, 2, 1], name="reshape-5")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24, 2], name="reshape-6")
outputs.append(self.trt_incompatible_op(compatible_reshape))
return math_ops.add_n(outputs, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 24, 24, 2]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["reshape-%d" % i for i in range(7)] +
["reshape-%d/shape" % i for i in range(7)]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
class TransposeTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
# Add a block with compatible transposes.
compatible_transpose = array_ops.transpose(
inp, [0, 3, 1, 2], name="transpose-1")
compatible_transpose = array_ops.transpose(
compatible_transpose, [0, 2, 3, 1], name="transposeback")
# Add an incompatible op so the first block will not be in the same
# subgraph where the following block belongs.
bridge = self.trt_incompatible_op(compatible_transpose)
# Add a block with incompatible transposes.
#
# Note: by default Grappler will run the TRT optimizer twice. At the
# first time it will group the two transpose ops below to same segment
# then fail the conversion due to the expected batch dimension problem.
# At the second time, since the input of bridge op is TRTEngineOp_0, it
# will fail to do shape
|
inference which then cause conversion to fail.
# TODO(laigd): support shape inference, make TRT optimizer run only
# once, and fix this.
incompatible_transpose = array_ops.transpose(
bridge, [2, 1, 0, 3], name="transpose-2
|
")
excluded_transpose = array_ops.transpose(
incompatible_transpose, [0, 2, 3, 1], name="transpose-3")
return array_ops.identity(excluded_transpose, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[24, 100, 2, 24]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"transpose-1", "transpose-1/perm", "transposeback",
"transposeback/perm"
]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
if __name__ == "__main__":
test.main()
|
indianajohn/ycmd
|
ycmd/tests/clang/diagnostics_test.py
|
Python
|
gpl-3.0
| 7,250
| 0.026207
|
# Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from ...server_utils import SetUpPythonPath
SetUpPythonPath()
from hamcrest import ( assert_that, contains, contains_string, has_entries,
has_entry, has_items, empty, equal_to )
from .clang_handlers_test import Clang_Handlers_test
from ycmd.utils import ReadFile
from pprint import pprint
class Clang_Diagnostics_test( Clang_Handlers_test ):
def ZeroBasedLineAndColumn_test( self ):
contents = """
void foo() {
double baz = "foo";
}
// Padding to 5 lines
// Padding to 5 lines
"""
event_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
event_name = 'FileReadyToParse',
contents = contents,
filetype = 'cpp' )
results = self._app.post_json( '/event_notification', event_data ).json
assert_that( results,
contains(
has_entries( {
'kind': equal_to( 'ERROR' ),
'text': contains_string( 'cannot initialize' ),
'ranges': contains( has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 21,
} ),
} ) ),
'location': has_entries( {
'line_num': 3,
'column_num': 10
} ),
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 10,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 13,
} ),
} )
} ) ) )
def SimpleLocationExtent_test( self ):
contents = """
void foo() {
baz = 5;
}
// Padding to 5 lines
// Padding to 5 lines
"""
event_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
event_name = 'FileReadyToParse',
contents = contents,
filetype = 'cpp' )
results = self._app.post_json( '/event_notification', event_data ).json
assert_that( results,
contains(
has_entries( {
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 3,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 6,
} ),
} )
} ) ) )
def PragmaOnceWarningIgnored_test( self ):
contents = """
#pragma once
struct Foo {
int x;
int y;
int c;
int d;
};
"""
event_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
event_name = 'FileReadyToParse',
contents = contents,
filepath = '/foo.h',
filetype = 'cpp' )
response = self._app.post_json( '/event_notification', event_data ).json
assert_that( response, empty() )
def Works_test( self ):
contents = """
struct Foo {
int x // semicolon missing here!
int y;
int c;
int d;
};
"""
diag_data = self._BuildRequest( compilation_flags = ['-x', 'c++'],
line_num = 3,
contents = contents,
filetype = 'cpp' )
event_data = diag_data.copy()
event_data.update( {
'event_name': 'FileReadyToParse',
} )
self._app.post_json( '/event_notification', event_data )
results = self._app.post_json( '/detailed_diagnostic', diag_data ).json
assert_that( results,
has_entry( 'message', contains_string( "expected ';'" ) ) )
def Multiline_test( self ):
contents = """
struct Foo {
Foo(int z) {}
};
int main() {
Foo foo("goo");
}
"""
|
diag_data = self._BuildRequest( compilation_flags = [ '-x', 'c++' ],
line_num = 7,
contents = contents,
f
|
iletype = 'cpp' )
event_data = diag_data.copy()
event_data.update( {
'event_name': 'FileReadyToParse',
} )
self._app.post_json( '/event_notification', event_data )
results = self._app.post_json( '/detailed_diagnostic', diag_data ).json
assert_that( results,
has_entry( 'message', contains_string( "\n" ) ) )
def FixIt_Available_test( self ):
contents = ReadFile( self._PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) )
event_data = self._BuildRequest( contents = contents,
event_name = 'FileReadyToParse',
filetype = 'cpp',
compilation_flags = [ '-x', 'c++',
'-std=c++03',
'-Wall',
'-Wextra',
'-pedantic' ] )
response = self._app.post_json( '/event_notification', event_data ).json
pprint( response )
assert_that( response, has_items(
has_entries( {
'location': has_entries( { 'line_num': 16, 'column_num': 3 } ),
'text': equal_to( 'switch condition type \'A\' '
'requires explicit conversion to \'int\''),
'fixit_available': True
} ),
has_entries( {
'location': has_entries( { 'line_num': 11, 'column_num': 3 } ),
'text': equal_to(
'explicit conversion functions are a C++11 extension' ),
'fixit_available': False
} ),
) )
|
ysung-pivotal/incubator-hawq
|
tools/bin/gppylib/pgconf.py
|
Python
|
apache-2.0
| 10,433
| 0.001821
|
#!/usr/bin/env python
# $Id: $
"""
postgresql.conf configuration file reader
Module contents:
readfile() - Read postgresql.conf file
class gucdict - Container for postgresql.conf settings
class setting - Holds one setting
class ConfigurationError - a subclass of EnvironmentError
Example:
import lib.pgconf as pgconf
d = pgconf.readfile()
port = d.int('port', 5432)
pe = d.bool('password_encryption', False)
sb = d.kB('shared_buffers')
at = d.time('authentication_timeout', 'ms', 2500)
"""
import os
import os.path
import re
# Max recursion level for postgresql.conf include directives.
# The max value is 10 in the postgres code, so it's the same here.
MAX_RECURSION_LEVEL=10
def readfile(filename='postgresql.conf', defaultpath=None):
"""
Read postgresql.conf file and put the settings into a dictionary.
Returns the dictionary: a newly created pgconf.gucdict object.
If filename does not specify an absolute path, it is treated as relative
to defaultpath, or to the current working directory.
"""
if not os.path.isabs(filename):
if defaultpath is None:
defaultpath = os.getcwd()
filename = os.path.normpath(os.path.join(defaultpath, filename))
fp = open(filename)
try:
dictionary = gucdict()
dictionary.populate(fp, filename)
return dictionary
except Exception:
raise
finally:
fp.close()
class gucdict(dict):
"""
A container for settings from a postgresql.conf file.
Behaves as an ordinary dictionary, with a few added methods.
The keys of the dictionary are GUC names in lower case, and the
values are instances of the pgconf.setting class.
The populate() method loads the dictionary with settings from a file.
The str(), bool(), int(), float(), kB(), and time() methods return a
value from the dictionary, converted to internal form.
"""
def populate(self, lines, filename='', recurLevel=0):
'''
Given a postgresql.conf input file (or a list of strings, or some
iterable object yielding lines), look for lines of the form
name[=][value][#comment]
For each one found, construct a pgconf.setting object and put it
into our dictionary.
'''
if recurLevel == MAX_RECURSION_LEVEL:
raise Exception('could not open configuration file "%s": maximum nesting depth exceeded' % filename)
linenumber = 0
for line in lines:
linenumber += 1
m = _setpat.match(line)
if m:
name, value, pos = m.group(1), m.group(3), m.start(3)
if name == 'include':
try:
# Remove the ' from the filename and then convert to abspath if needed.
incfilename = value.strip("'")
if not incfilename.startswith('/') and filename != '':
incfilename = '%s/%s' % (filename[0:filename.rfind('/')], incfilename)
fp = open(incfilename)
self.populate(fp, incfilename, recurLevel+1)
fp.close()
except IOError:
raise Exception('File %s included from %s:%d does not exist' % (incfilename, filename, linenumber))
else:
self[name.lower()] = setting(name, value, filename, linenumber, pos)
def str(self, name, default=None):
"""
Return string setting, or default if absent.
"""
v = self.get(name)
if v:
return v.str()
else:
return default
def bool(self, name, default=None):
"""
Return Boolean setting, or default if absent.
"""
v = self.get(name)
if v:
return v.bool()
else:
return default
def int(self, name, default=None):
"""
Return integer setting, or default if absent.
"""
v = self.get(name)
if v:
return v.int()
else:
return default
def float(self, name, default=None):
"""
Return floating-point setting, or default if absent.
"""
v = self.get(name)
if v:
return v.float()
else:
return default
def kB(self, name, default=None):
"""
Return memory setting in units o
|
f 1024 bytes, or default if absent.
"""
v = self.get(name)
if v:
return v.kB()
else:
return default
def time(self, name, unit='s', default=None):
"""
Return time setting, or default if absent.
Specify desired unit as 'ms', 's', or 'min'.
"""
v = self.get(name)
if v:
return v.time(unit)
else:
return default
class setting(object):
"
|
""
Holds a GUC setting from a postgresql.conf file.
The str(), bool(), int(), float(), kB(), and time() methods return the
value converted to the requested internal form. pgconf.ConfigurationError
is raised if the conversion fails, i.e. the value does not conform to the
expected syntax.
"""
def __init__(self, name, value, filename='', linenumber=0, pos=0):
self.name = name
self.value = value
self.filename = filename
self.linenumber = linenumber
self.pos = pos # starting offset of value within the input line
def __repr__(self):
return repr(self.value)
def str(self):
"""
Return the value as a string.
"""
v = self.value
if v and v.endswith("'"):
# Single-quoted string. Remove the opening and closing quotes.
# Replace each escape sequence with the character it stands for.
i = v.index("'") + 1
v = _escapepat.sub(_escapefun, v[i:-1])
return v
def bool(self):
"""
Interpret the value as a Boolean. Returns True or False.
"""
s = self.value
if s:
s = s.lower()
n = len(s)
if (s == '1' or
s == 'on' or
s == 'true'[:n] or
s == 'yes'[:n]):
return True
if (s == '0' or
s == 'off'[:n] or
s == 'false'[:n] or
s == 'no'[:n]):
return False
raise self.ConfigurationError('Boolean value should be one of: 1, 0, '
'on, off, true, false, yes, no.')
def int(self):
"""
Interpret the value as an integer. Returns an int or long.
"""
try:
return int(self.value, 0)
except ValueError:
raise self.ConfigurationError('Value should be integer.')
def float(self):
"""
Interpret the value as floating point. Returns a float.
"""
try:
return float(self.value)
except ValueError:
raise self.ConfigurationError('Value should be floating point.')
def kB(self):
"""
Interpret the value as an amount of memory. Returns an int or long,
in units of 1024 bytes.
"""
try:
m = 1
t = re.split('(kB|MB|GB)', self.value)
if len(t) > 1:
i = ['kB', 'MB', 'GB'].index(t[1])
m = (1, 1024, 1024*1024)[i]
try:
return int(t[0], 0) * m
except ValueError:
pass
return int(float(t[0]) * m)
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer or float '
'with optional suffix kB, MB, or GB '
'(kB is default).')
def time(self, unit='s'):
"""
Interpret the value as a time. Returns an int or long.
Specify desired unit as 'ms', 's', or 'min'.
"""
u = ['ms', 's', 'min'].i
|
shirleyChou/zhihu_crawler
|
zhihu.py
|
Python
|
mit
| 19,263
| 0.000537
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
import re
import time
import json
import functools
import requests
from bs4 import BeautifulSoup
import html2text
reload(sys)
sys.setdefaultencoding('utf8')
# global var
_cookies_name = 'cookies.json'
_session = None
_headers = {'Host': 'www.zhihu.com',
'Referer': 'http://www.zhihu.com/',
'User-Agent': 'Mozilla/5.0',
'X-Requested-With': 'XMLHttpRequest'}
# Zhihu login URL
_zhihu_url = 'http://www.zhihu.com'
_zhihu_login_url = _zhihu_url + '/login'
_captcha_url_prefix = _zhihu_url + '/captcha.gif?r='
# zhihu column URL
_column_prefix = 'http://zhuanlan.zhihu.com/'
_column_GET_user = _column_prefix + 'api/columns/{0}'
_column_GET_posts = _column_GET_user + '/posts/{1}'
_column_GET_posts_limit = _column_GET_posts[:-4] + '?limit=10&offset={1}'
# regex
_re_question_url = re.compile(r'http://www\.zhihu\.com/question/\d+/?$')
_re_author_url = re.compile(r'http://www\.zhihu\.com/people/[^/]+/?$')
_re_column_url = re.compile(r'http://zhuanlan\.zhihu\.com/([^/]+)/?$')
_re_column_article_url = re.compile(
r'http://zhuanlan\.zhihu\.com/([^/]+)/(\d+)/?$')
_re_collection_url = re.compile(r'http://www\.zhihu\.com/collection/\d+/?$')
def _init():
global _session
if _session is None:
_session = requests.Session()
_session.headers.update(_headers)
if os.path.isfile(_cookies_name):
with open(_cookies_name, 'r') as f:
cookies_dict = json.load(f)
_session.cookies.update(cookies_dict)
else:
print 'Please run "zhihu.create_cookies()" for further operation.'
else:
raise Exception('Please don\'t call function _init() manually.')
def create_cookies():
if not os.path.isfile(_cookies_name):
email = raw_input('email:')
password = raw_input('password:')
captcha_url = get_captcha_url()
save_captcha(captcha_url)
print 'Please check "captcha.gif" for captcha'
captcha = raw_input('captcha:')
os.remove('captcha.gif')
r, msg = login(email, password, captcha)
if r == 0:
print 'cookies file created!'
elif r == 1:
print 'Failed to login. Error message is:' + msg
print 'Please check the error message and try again.'
else:
print '%s has been created! Please delete it first.' % _cookies_name
def get_captcha_url():
return _captcha_url_prefix + str(int(time.time() * 1000))
def save_captcha(url):
global _session
r = _session.get(url)
with open('captcha.gif', 'w') as f:
f.write(r.content)
def login(email=None, password=None, captcha=None):
global _session
data = {'email': email, 'password': password,
'captcha': captcha, 'rememberme': 'y'}
r = _session.post(_zhihu_login_url, data)
j = r.json()
status = int(j['r'])
msg = j['msg']
if status == 0:
with open(_cookies_name, 'wb') as f:
data = json.dumps(_session.cookies.get_dict())
f.write(data)
cookies_dict = json.loads(data)
_session.cookies.update(cookies_dict)
return status, msg
def set_class_variable(attr):
def decorator(func):
@functools.wraps(func)
def wrapper(self):
value = getattr(self, attr) if hasattr(self, attr) else None
if value is None:
self.make_soup()
value = func(self)
setattr(self, attr, value)
return value
else:
return value
return wrapper
return decorator
def valid_name(text):
invalid_char = ['\\', '/', ':', '*', '?', '<', '>', '|', '"']
valid = ''
for char in text:
if char not in invalid_char:
valid += char
return valid
def create_file(path, filename, mode, defaultpath, defaultname):
if path is None or '.':
path = os.path.join(os.getcwd(), valid_name(defaultpath))
if os.path.isdir(path) is False:
os.makedirs(path)
if filename is None:
filename = valid_name(defaultname)
temp = filename
i = 0
while os.path.isfile(os.path.join(path, filename) + '.' + mode):
i += 1
temp = filename + str(i)
return os.path.join(path, temp) + '.' + mode
class Question:
def __init__(self, url):
if not _re_question_url.match(str(url)):
raise Exception('Hmmm.. Unvalid URL! Please change.')
else:
self.url = str(url)
r = _session.get(self.url)
self.soup = BeautifulSoup(r.content)
# 获取问题标签
@property
def get_tags(self):
items = self.soup.find_all('a', class_='zm-item-tag')
tags = [unicode(item.string.strip()) for item in items]
return tags
# 获取问题标题
@property
def get_question(self):
raw_question = self.soup.find(
'h2',
class_='zm-item-title zm-editable-content')
return raw_question.string.strip()
# 获取问题描述
@property
def ques_description(self):
data = self.soup.find('div', class_='zm-editable-content')
return html2text.html2text(str(data))
# 获取问题关注者人数
@property
def ques_followers(self):
num = self.soup.find('div', class_='zg-gray-normal').a.strong.string
return '关注者人数为' + num
# 获取问题回答数
@property
def answer_num(self):
raw_html = self.soup.find('h3', id='zh-question-answer-num')
num = raw_html.get('data-num')
self.num = num
return '回答人数为:' + num
# 获取排在最前面的回答
@property
def top_answer(self):
self.answer_num
top = self.soup.find('div', class_=' zm-editable-content clearfix')
answer = html2text.html2text(str(top))
return answer
# 获取排名前几位的回答
def top_i_answers(self, num):
self.answer_num
if not isinstance(num, int) or abs(num) != num:
print 'Ohh! Please enter positive integer:'
elif num > int(self.num):
print 'Sorry, The number of answers for' \
'this question is %s. Please enter again.' % self.num
elif num == 1:
return self.top_answer
elif num > 1:
find = self.soup.find_all(class_=' zm-editable-content clearfix',
limit=num)
for index, answer in enumerate(find):
print '第%d个答案:\n' % (index+1)
print html2text.html2text(str(answer))
# 获取所有回答
@property
def all_answers(self):
self.answer_num
find = self.soup.find_all(class_=' zm-editable-content clearfix',
limit=self.num)
for index, answer in enumerate(find):
print '第%d个答案:\n' % (index+1)
print html2text.html2text(str(answer))
class Author:
def __init__(self, url):
if not _re_author_url.match(str(url)):
raise Exception('Hmmm.. Unvalid URL! Please change.')
else:
if not url.endswith('/'):
url += '/'
self.url = str(url)
r = _session.get(self.url)
self.soup = BeautifulSoup(r.content)
#with open('author.html', 'wb') as f:
#f.write(r.content)
# 获取用户名字
@property
def get_people_name(self):
name = self.soup.find('div', class_='zm-profile-header').span.string
#self_intro = self.soup.find('span', class_='bio').get('title')
#return name + ':' + self_intro
return name
# 获取用户所在地点
@property
def get_people_location(self):
locate = self.soup.find('span', class_='location item').get('title')
return locate
# 获取用户的职业介绍
@property
def get_people_career(self):
|
profession = self.soup.find('sp
|
an', class_='business item') \
.get('title')
employment = self.soup.find('span', class_='employment item') \
.get('title')
position = self.soup.find('span', class_='position item').get('title')
return '行业: ' + profession \
+ '\n' + '公司: ' + empl
|
deliarusu/text-annotation
|
knowledgebase/kbgraph.py
|
Python
|
apache-2.0
| 8,665
| 0.013964
|
'''
Graph representation of the knowledge base
'''
import networkx as nx
import math
import sys
from util import exception
MAXLOGDEG = 'maxlogdeg'
SQRTLOGDEG = 'sqrtlogdeg'
MAX_LEVEL = 5
EQUAL_WEIGHT = 'equal_weight'
LEVEL_WEIGHT = 'level_weight'
EPSILON = sys.float_info.epsilon
class Graph(object):
'''
Graph representation for a generic lexical database
'''
def __init__(self):
'''
Class constructor
'''
# Networkx graph
self.G = nx.Graph()
# all shortest path distances in the graph
self.all_dist = {}
# maximum distance between nodes in the graph
self.max_dist = None
def node_degree(self, node):
'''
Degree of a node in the WordNet graph
'''
return self.G.degree(node)
def weight_graph(self, weight):
'''
Add weights to the node edges
:param weight: weight type 'maxlogdeg' or 'sqrtlogdeg'
'''
if weight != MAXLOGDEG and weight != SQRTLOGDEG:
raise exception.GraphException(weight, 'Undefined graph weight')
for edge in self.G.edges_iter():
#print edge, edge[0], edge[1]
deg_node1 = self.node_degree(edge[0])
deg_node2 = self.node_degree(edge[1])
if deg_node1 == 0:
deg_node1 = EPSILON
if deg_node2 == 0:
deg_node2 = EPSILON
if weight == MAXLOGDEG:
self.G.edge[edge[0]][edge[1]]['weight'] = \
max(math.log(deg_node1), math.log(deg_node2))
elif weight == SQRTLOGDEG:
self.G.edge[edge[0]][edge[1]]['weight'] = \
max(math.sqrt(deg_node1, deg_node2))
#print edge, self.G.edge[edge[0]][edge[1]]['weight']
def weighted_concept_path(self, node1, node2):
'''
Shortest path between two nodes
:param node1: id of node 1
:param node2: id of node 2
:return: shortest path between node1 and node2
'''
spath = 0
if self.all_dist:
try:
spath = self.all_dist[node1][node2]
except:
raise exception.GraphException((node1, node2), \
'No path for this node pair')
else:
try:
spath = nx.dijkstra_path_length(self.G, node1, node2)
except:
raise exception.GraphException((node1, node2), \
'No path for this node pair')
return spath
def all_distances(self):
'''
All distances between nodes in the graph
'''
try:
self.all_dist = nx.all_pairs_dijkstra_path_length(self.G)
except:
raise exception.GraphException(self.all_dist, \
'Error computing all pairs path length')
def find_max_distance(self):
'''
Find the maximum distance between nodes in the graph
'''
if not self.all_dist:
self.all_distances()
try:
maxd = -1
for d1 in self.all_dist.itervalues():
for d2 in d1.itervalues():
if d2 > maxd:
|
maxd = d2
self.max_dist = maxd
except:
raise exception.GraphException(self.max_dist, \
|
'Error computing maximum distance')
def connected_concepts(self, node, level, weight_type = None):
'''
Connected concepts for a node in the graph
:param node: the node for which connected concepts are retrieved
:param level: distance between node and connected concepts
:param weight_type: type of weighting for connected concepts
:return: dictionary of connected concepts and their weight
'''
if level > MAX_LEVEL or level < 0:
raise exception.GraphException(level, \
'Level should be greater than 0 and less than %s' %(MAX_LEVEL))
if weight_type == None:
weight_type = EQUAL_WEIGHT
if weight_type != EQUAL_WEIGHT and weight_type != LEVEL_WEIGHT:
raise exception.GraphException(weight_type, \
'Unsupported weight type')
res_nodes = {}
weight = 0
if weight_type == EQUAL_WEIGHT:
weight = 1.0
# find connected concepts and weight them
while level > 0:
nodes = nx.single_source_shortest_path(self.G, node, level)
snodes = set(nodes.keys())
if weight_type == LEVEL_WEIGHT:
weight = 1/float(level)
for c in snodes:
res_nodes[c] = weight
level -= 1
return res_nodes
class WnGraph(Graph):
'''
Graph representation of the WordNet lexical database
'''
def __init__(self, wordnet):
'''
Class constructor
'''
Graph.__init__(self)
self.wordnet = wordnet
def add_edges(self, synset, csynsets):
'''
Add edges between a synset and connected synsets
:param synset: synset representing a concept
:param csynsets: synsets related to synset
'''
for cs in csynsets:
self.G.add_node(cs.name)
self.G.add_edge(synset.name, cs.name)
def build_graph(self):
'''
Build a networkx graph from WordNet
'''
for synset in list(self.wordnet.all_synsets()):
#for synset in list(self.wordnet.all_synsets('n'))[:10]:
self.G.add_node(synset.name)
self.add_edges(synset, synset.hypernyms())
self.add_edges(synset, synset.hyponyms())
self.add_edges(synset, synset.instance_hypernyms())
self.add_edges(synset, synset.instance_hyponyms())
self.add_edges(synset, synset.member_holonyms())
self.add_edges(synset, synset.substance_holonyms())
self.add_edges(synset, synset.part_holonyms())
self.add_edges(synset, synset.member_meronyms())
self.add_edges(synset, synset.substance_meronyms())
self.add_edges(synset, synset.part_meronyms())
self.add_edges(synset, synset.attributes())
self.add_edges(synset, synset.entailments())
self.add_edges(synset, synset.causes())
self.add_edges(synset, synset.also_sees())
self.add_edges(synset, synset.verb_groups())
self.add_edges(synset, synset.similar_tos())
print nx.info(self.G)
def wn_pos(self, pos):
'''
Convert to WordNet part-of-speech
'''
if len(pos) < 1:
return None
if pos[0] == 'N':
return self.wordnet.NOUN
elif pos[0] == 'V':
return self.wordnet.VERB
elif pos[0] == 'J':
return self.wordnet.ADJ
elif pos[0] == 'R':
return self.wordnet.ADV
else:
return None
def concepts(self, token):
'''
Find concepts for a given token
:param token: token for which concepts are to be found
:return list of concepts for the token
'''
concepts = []
word = token.word_str.lower()
word = word.replace(' ', '_')
pos = self.wn_pos(token.pos)
lemma = token.lemma
lemma = lemma.replace(' ', '_')
prin
|
hideoussquid/aureus-12-bitcore
|
qa/rpc-tests/getblocktemplate_longpoll.py
|
Python
|
mit
| 3,620
| 0.005249
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(AureusTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(
|
self.nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is p
|
robed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
gwpy/gwpy.github.io
|
docs/0.9.0/examples/frequencyseries/rayleigh-3.py
|
Python
|
gpl-3.0
| 409
| 0.002445
|
asd = gwdata.asd(2, 1)
plot = asd.plot(figsize=(8, 6))
plot.add_frequ
|
encyseries(rayleigh, newax=True, sharex=plot.axes[0])
asdax, rayax = plot.axes
asdax.set_xlabel('')
asdax.set_xlim(30, 1500)
asdax.set_ylim(5e-24, 1e-21)
asdax.set_ylabel(r'[strain/\rtHz]')
rayax.set_ylim(0, 2)
rayax.set_ylabel('Rayleigh statistic')
asdax.set_title('Sensitivity of LIGO-Livingston around GW
|
151226', fontsize=20)
plot.show()
|
JarbasAI/JarbasAI
|
jarbas_skills/service_client_manager/__init__.py
|
Python
|
gpl-3.0
| 21,611
| 0.000555
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
__author__ = 'jarbas'
LOGGER = getLogger(__name__)
from mycroft.messagebus.message import Message
from mycroft.skills.settings import SkillSettings
from mycroft.configuration import ConfigurationManager
from os.path import dirname, exists
import time, os
from os import mkdir
from jarbas_utils.skill_dev_tools import ResponderBackend
server_config = ConfigurationManager.get().get("jarbas_server", {})
class ClientUser():
def __init__(self, id, name, emitter, reset=False):
self.client_id = id
self.name = name
self.emitter = emitter
default_forbidden_messages = server_config.get("forbidden_messages",
[])
default_forbidden_skills = server_config.get("forbidden_skills", [])
default_forbidden_intents = server_config.get("forbidden_intents", [])
self.default_forbidden_messages = default_forbidden_messages
self.default_forbidden_skills = default_forbidden_skills
# TODO get parser skills names, not ids
self.default_forbidden_intents = default_forbidden_intents
self.init_user_settings()
if reset:
self.reset()
self.load_user()
self.save_user()
# session data
self.session_key = None # encrypt everything with this shared key
self.current_sock = None
self.current_ip = None
self.status = "offline"
self.user_type = "client"
def init_user_settings(self, path=None):
if path is None:
path = dirname(__file__) + "/users"
# check if folders exist
if not os.path.exists(path):
os.makedirs(path)
path += "/" + str(self.client_id) + ".json"
self.settings = SkillSettings(path, autopath=False)
if self.client_id not in self.settings.keys():
self.settings[self.client_id] = {}
def load_user(self):
self.name = self.settings[self.client_id].get("name", "user")
self.nicknames = self.settings[self.client_id].get("nicknames", [])
self.public_key = self.settings[self.client_id].get("public_key")
self.security_level = self.settings[self.client_id].get(
"security_level", 0)
self.forbidden_skills = self.settings[self.client_id].get(
"forbidden_skills", self.default_forbidden_skills)
self.forbidden_messages
|
= self.settings[self.client_id].get(
"forbidden_messages", self.default_forbidden_messages)
self.forbidden_intents = self.settings[self.client_id].get(
"forbidden_intents", self.default_forbidden_intents)
self.last_seen = self.settings[self.client_id].get("last_seen",
"never")
self.last_timestamp = self.se
|
ttings[self.client_id].get("last_ts", 0)
self.known_ips = self.settings[self.client_id].get("known_ips", [])
self.timestamp_history = self.settings[self.client_id].get(
"timestamp_history", [])
self.photo = self.settings[self.client_id].get("photo")
self.user_type = self.settings[self.client_id].get("user_type",
"client")
def save_user(self):
self.settings[self.client_id]["name"] = self.name
self.settings[self.client_id]["nicknames"] = self.nicknames
self.settings[self.client_id]["public_key"] = self.public_key
self.settings[self.client_id]["security_level"] = self.security_level
self.settings[self.client_id][
"forbidden_skills"] = self.forbidden_skills
self.settings[self.client_id][
"forbidden_intents"] = self.forbidden_intents
self.settings[self.client_id]["last_seen"] = self.last_seen
self.settings[self.client_id]["last_ts"] = self.last_timestamp
self.settings[self.client_id]["known_ips"] = self.known_ips
self.settings[self.client_id][
"timestamp_history"] = self.timestamp_history
self.settings[self.client_id]["photo"] = self.photo
self.settings[self.client_id]["user_type"] = self.user_type
self.settings[self.client_id][
"forbidden_messages"] = self.forbidden_messages
self.settings.store()
def add_new_ip(self, ip, emit=True):
if ip not in self.known_ips:
self.known_ips.append(ip)
if emit:
self.emitter.emit(Message("user.new_ip", {"ts": time.time(),
"name": self.name,
"last_seen": self.last_seen}))
def update_timestamp(self):
self.last_timestamp = time.time()
self.timestamp_history.append(self.last_timestamp)
def update_last_seen(self, last_seen):
self.last_seen = last_seen
def set_key(self, key):
self.public_key = key
def authenticate(self):
pass
def add_nicknames(self, names):
if self.name == "user":
self.name = names[0]
for name in names:
if name not in self.nicknames:
self.nicknames.append(name)
def reset(self):
self.name = "user"
self.nicknames = []
self.public_key = "todo"
self.security_level = 0
self.forbidden_messages = self.default_forbidden_messages
self.forbidden_skills = self.default_forbidden_skills
self.forbidden_intents = self.default_forbidden_intents
self.last_seen = "never"
self.last_timestamp = 0
self.known_ips = []
self.timestamp_history = []
self.photo = None
self.user_type = "client"
self.save_user()
class ClientManagerSkill(MycroftSkill):
def __init__(self):
super(ClientManagerSkill, self).__init__()
self.reload_skill = False
self.user_list = {} # id, sock
self.users = {} # id, user object
self.facebook_users = {} # fb_id, user object
def initialize(self):
# listen for status updates
self.emitter.on("user.connect", self.handle_user_connect)
self.emitter.on("user.names", self.handle_user_names)
self.emitter.on("user.request", self.handle_user_request)
self.emitter.on("user.disconnect", self.handle_user_disconnect)
self.emitter.on("fb.chat.message", self.handle_fb_message_received)
self.emitter.on("fb.chat.message.seen", self.handle_fb_message_seen)
self.emitter.on("fb.last.seen.timestamps", self.handle_fb_timestamp)
# build users id list db from disk
if not exists(dirname(__file__) + "/users"):
mkdir(dirname(__file__) + "/users")
user_files = os.listdir(dirname(__file__) + "/users")
for file in user_files:
if ".json" in file:
user_id = file.replace(".json", "")
self.user_list[user_id] = None
# build user objects
for user_id in self.user_list.keys():
user = ClientUser(id=user_id, emitter=self.emitter, name="user")
user.status = "offline"
self.users[user_id] = user
# sort facebook users
for user_id in self.users:
user = self.users[user_id]
if user.user_type == "facebo
|
alxgu/ansible
|
lib/ansible/modules/packaging/os/dpkg_selections.py
|
Python
|
gpl-3.0
| 2,174
| 0.00276
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- Change dpkg package selection state via --get-selections and --set-selections.
version_added: "2.0"
author:
- Brian Brazil (@brian-brazil) <brian.brazil@boxever.com>
options:
name:
description:
- Name of the package
required: true
selection:
description:
- The selection state to set the package to.
choices: [ 'install', 'hold', 'deinstall', 'purge' ]
required: true
notes:
- This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
'''
EXAMPLES = '''
# Prevent python from being upgraded.
- dpkg_selections:
name: python
selection: hold
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
selection=dict(choices=['install', 'hold', 'deinstall', 'purge'])
),
supports_check_mode=True,
)
dpkg = module.get_bin_path('dpkg', True)
name = module.params['name']
selection = module.params['selection']
# Get current settings.
rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
if not out:
current = 'not present'
else:
current =
|
out.split()[1]
changed = current != selection
if module.check_mode or not changed:
module.exit_json(changed=changed,
|
before=current, after=selection)
module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
module.exit_json(changed=changed, before=current, after=selection)
if __name__ == '__main__':
main()
|
e-koch/ewky_scripts
|
multiprocess.py
|
Python
|
mit
| 606
| 0.00165
|
'''
Wrap a given function into a pool
'''
from multiprocessing import Pool
from itertools import izip, repeat
def pool_process(func, args=None, ncores=1):
def single_
|
input(a):
return func(*a)
# Check for inputs which need to be repeated.
for arg in args:
try:
if len(arg) == 1:
repeat_it
|
= True
except TypeError:
repeat_it = True
if repeat_it:
arg = repeat(arg)
pool = Pool(pool_process=ncores)
output = pool.map(single_input, izip(*args))
pool.close()
pool.join()
return output
|
boutiques/schema
|
tools/python/boutiques/bosh.py
|
Python
|
gpl-2.0
| 26,811
| 0.000298
|
#!/usr/bin/env python
import jsonschema
import json
import os
import sys
import os.path as op
import tempfile
import pytest
from argparse import ArgumentParser, RawTextHelpFormatter
from jsonschema import ValidationError
from boutiques.validator import DescriptorValidationError
from boutiques.publisher import ZenodoError
from boutiques.invocationSchemaHandler import InvocationValidationError
from boutiques.localExec import ExecutorOutput
from boutiques.localExec import ExecutorError
from boutiques.exporter import ExportError
from boutiques.importer import ImportError
from boutiques.localExec import loadJson, addDefaultValues
from boutiques.logger import raise_error
from tabulate import tabulate
def prettyprint(*params):
parser = ArgumentParser("Boutiques pretty-print for generating help text")
parser.add_argument("descriptor", action="store",
help="The Boutiques descriptor.")
results = parser.parse_args(params)
from boutiques.prettyprint import PrettyPrinter
desc = loadJson(results.descriptor)
prettyclass = PrettyPrinter(desc)
return prettyclass.docstring
def create(*params):
parser = ArgumentParser("Boutiques descriptor creator")
parser.add_argument("descriptor", action="store",
help="Output file to store descriptor in.")
parser.add_argument("--docker-image", '-d', action="store",
help="Name of Docker image on DockerHub.")
parser.add_argument("--use-singularity", '-u', action="store_true",
help="When --docker-image is used. Specify to "
"use singularity to run it.")
results = parser.parse_args(params)
from boutiques.creator import CreateDescriptor
new = CreateDescriptor(parser=None,
docker_image=results.docker_image,
use_singularity=results.use_singularity)
new.save(results.descriptor)
return None
def validate(*params):
parser = ArgumentParser("Boutiques descriptor validator")
parser.add_argument("descriptor", action="store",
help="The Boutiques descriptor as a JSON file, JSON "
"string or Zenodo ID (prefixed by 'zenodo.').")
parser.add_argument("--bids", "-b", action="store_true",
help="Flag indicating if descriptor is a BIDS app")
parser.add_argument("--format", "-f", action="store_true",
help="If descriptor is valid, rewrite it with sorted"
" keys.")
results = parser.parse_args(params)
from boutiques.validator import validate_descriptor
descriptor = validate_descriptor(results.descriptor,
format_output=results.format)
if results.bids:
from boutiques.bids import validate_bids
validate_bids(descriptor, valid=True)
def execute(*params):
parser = ArgumentParser("Boutiques local executor", add_help=False)
parser.add_argument("mode", action="store",
help="Mode of operation to use. Launch: takes a "
"set of inputs compliant with invocation schema "
"and launches the tool. Simulate: shows sample "
"command-lines based on the provided descriptor"
" based on provided or randomly generated inputs. "
"Prepare: pulls the Docker or Singularity container "
"image for a given descriptor. ",
choices=["launch", "simulate", "prepare"])
parser.add_argument("--help", "-h", action="store_true",
help="show this help message and exit")
helps = any([True for ht in ["--help", "-h"] if ht in params])
if len(params) <= 1 and helps:
parser.print_help()
raise SystemExit
args, params = parser.parse_known_args(params)
mode = args.mode
params += ["--help"] if args.help is True else []
if mode == "launch":
parser = ArgumentParser("Launches an invocation.")
parser.add_argument("descriptor", action="store",
help="The Boutiques descriptor as a JSON file, "
"JSON string or Zenodo ID (prefixed by 'zenodo.').")
parser.add_argument("invocation", action="store",
help="Input JSON complying to invocation.")
parser.add_argument("-v", "--volumes", action="append", type=str,
help="Volumes to mount when launching the "
"container. Format consistently the following:"
" /a:/b will mount local directory /a to "
"container directory /b.")
parser.add_argument("-x", "--debug", action="store_true",
help="Keeps temporary scripts used during "
"execution, and prints additional debug "
"messages.")
parser.add_argument("-u", "--user", action="store_true",
help="Runs the container as local user ({0})"
" instead of root.".format(os.getenv("USER")))
parser.add_argument("-s", "--stream", action="store_true",
help="Streams stdout and stderr in real time "
"during execution.")
parser.add_argument("--imagepath", action="store",
help="Path to Singularity image. "
"If not specified, will use current directory.")
results = parser.parse_args(params)
descriptor = results.descriptor
inp = results.invocation
# Validate invocation and descriptor
valid = invocation(descriptor, '-i', inp)
# Generate object that will perform the commands
from boutiques.localExec import LocalExecutor
executor = LocalExecutor(descriptor, inp,
{"forcePathType": True,
"debug": results.debug,
"changeUser": results.user,
"stream": results.stream,
"imagePath": results.imagepath})
# Execute it
return executor.execute(results.volumes)
if mode == "simulate":
parser = ArgumentParser("Simulates an invocation.")
parser.add_argument("descriptor", action="store",
help="The Boutiques descriptor as a JSON file, "
"JSON string or Zenodo ID (prefixed by 'zenodo.').")
parser.add_argument("-i", "--input", action="store",
help="Input JSON complying to invocation.")
parser.add_argument("-j", "--json", action="store_true",
help="Flag to generate invocation in JSON format.")
results = parser.parse_args(params)
descriptor = results.descriptor
# Do some basic input scrubbing
inp = results.input
valid = invocation(descriptor, '-i', inp) if inp else\
invocation(descriptor)
# Generate object that will perform the commands
from boutiques.localExec import LocalExecutor
executor = LocalExecutor(descriptor, inp,
|
{"forcePathType": True,
"destroyTempScripts": True,
"changeUser": True})
if not inp:
executor.generateRandomParams(1)
if results.json:
sout = [json.dumps(executor.in_dict, indent=4, sort_keys=True)]
print(sout[0])
else:
|
executor.printCmdLine()
sout = executor.cmd_line
# for consistency with execute
# Adding hide to "container location" field since it's an invalid
# value, can parse that to hide the summary print
return ExecutorOutput(os.linesep.join(sout), "",
0, "", [], [], os.linesep.join(sout), ""
|
scottrice/Ice
|
ice/backups.py
|
Python
|
mit
| 1,971
| 0.014206
|
# encoding: utf-8
import datetime
import os
from pysteam import shortcuts
import paths
from logs import logger
def default_backups_directory():
return os.path.join(paths.application_data_directory(), 'Backups')
def backup_filename(user, timestamp_format):
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
return "sho
|
rtcuts." + timestamp + ".vdf"
def shortcuts_backup_path(directory, user, timestamp_
|
format="%Y%m%d%H%M%S"):
"""
Returns the path for a shortcuts.vdf backup file.
This path is in the designated backup directory, and includes a timestamp
before the extension to allow many backups to exist at once.
"""
assert(directory is not None)
return os.path.join(
directory,
str(user.user_id),
backup_filename(user, timestamp_format)
)
def backup_directory(config):
backup_dir = config.backup_directory
if backup_dir is None:
return None
if backup_dir == "":
backup_dir = default_backups_directory()
logger.debug("Specified empty string as backup directory. Defaulting to %s" % backup_dir)
return backup_dir
def create_backup_of_shortcuts(config, user, dry_run=False):
def _create_directory_if_needed(directory):
if os.path.exists(directory):
return
logger.debug("Creating directory: %s" % directory)
os.makedirs(directory)
backup_dir = backup_directory(config)
if backup_dir is None:
logger.info("No backups directory specified, so not backing up shortcuts.vdf before overwriting. See config.txt for more info")
return
_create_directory_if_needed(backup_dir)
if not os.path.isdir(backup_dir):
logger.warning("Backup directory path is something other than a directory. Skipping backups")
return
backup_path = shortcuts_backup_path(backup_dir, user)
# Make sure the user-specific backups dir exists
_create_directory_if_needed(os.path.dirname(backup_path))
shortcuts.write_shortcuts(backup_path, shortcuts.get_shortcuts(user))
|
jatinmistry13/BasicNeuralNetwork
|
two_layer_neural_network.py
|
Python
|
mit
| 877
| 0.023945
|
import sys
import numpy as np
# sigmoid function
def nonlin(x, deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.
|
exp(-x))
# input dataset
X = np.array([[0,0,1],
[0,1,1],
|
[1,0,1],
[1,1,1]])
# output dataset
y = np.array([[0,0,1,1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 2*np.random.random((3,1)) - 1
for iter in xrange(10000):
# forward propagation
l0 = X
l1 = nonlin(np.dot(l0,syn0))
# how much did we miss?
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1,True)
# update weights
syn0 += np.dot(l0.T,l1_delta)
print "Output After Training:"
print l1
|
elioth010/lugama
|
venv/lib/python2.7/site-packages/mongoengine/queryset/visitor.py
|
Python
|
gpl-2.0
| 4,434
| 0
|
import copy
from mongoengine.errors import InvalidQueryError
from mongoengine.queryset import transform
__all__ = ('Q',)
class QNodeVisitor(object):
"""Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""Called by (New)Q objects.
"""
return query
class DuplicateQueryConditionsError(InvalidQueryError):
pass
class SimplificationVisitor(QNodeVisitor):
"""Simplifies query trees by combining unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# The simplification only applies to 'simple' queries
if all(isinstance(node, Q) for node in combination.children):
queries = [n.query for n in combination.children]
try:
return Q(**self._query_conjunction(queries))
except DuplicateQueryConditionsError:
# Cannot be simplified
pass
return combination
def _query_conjunction(self, queries):
"""Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops.intersection(query_ops)
if intersection:
raise DuplicateQueryConditionsError()
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryCompilerVisitor(QNodeVisitor):
"""Compiles the nodes in a query tree to a PyMongo-compatible query
dictionary.
"""
def __init__(self, document):
self.document = document
def visit_combination(self, combination):
operator = "$and"
if combination.operation == combination.OR:
operator = "$or"
return {operator: combination.children}
def visit_query(self, query):
return transform.query(self.document, **query.query)
class QNode(object):
"""Base class for nodes in query trees.
"""
AND = 0
OR = 1
def to_query(self, document):
query = self.accept(SimplificationVisitor())
query = query.accept(QueryCompilerVisitor(document))
return query
def accept(self, visitor):
raise NotImplementedError
def _combine(self, other, operation):
"""Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty', True):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
class QCombination(QNode):
"""Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self
|
, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
|
@property
def empty(self):
return not bool(self.children)
class Q(QNode):
"""A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not bool(self.query)
|
laysakura/relshell
|
relshell/test/test_record.py
|
Python
|
apache-2.0
| 461
| 0
|
# -*- coding: utf
|
-8 -*-
from nose.tools import *
from relshell.record import Record
def test_record_usage():
rec = Record('good evening')
eq_(len(rec), 1)
rec = Record('
|
Hello', 'World')
eq_(len(rec), 2)
rec = Record('lucky', 777, 'number')
eq_(len(rec), 3)
# get column by index
eq_(rec[0], 'lucky')
# iterate all columns
cols = []
for col in rec:
cols.append(col)
eq_(cols, ['lucky', 777, 'number'])
|
Lind-Project/native_client
|
build/directory_storage_test.py
|
Python
|
bsd-3-clause
| 3,028
| 0.010568
|
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests of directory storage adapter."""
import os
import unittest
import directory_storage
import fake_storage
import gsd_storage
import hashing_tools
import hashing_tools_test
import working_directory
class TestDirectoryStorage(unittest.TestCase):
def setUp(self):
storage = fake_storage.FakeStorage()
self._dir_storage = directory_storage.DirectoryStorageAdapter(storage)
def test_WriteRead(self):
# Check that a directory can be written and then read back.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
temp2 = os.path.join(work_dir, 'temp2')
hashing_tools_test.GenerateTestTree('write_read', temp1)
self._dir_storage.PutDirectory(temp1, 'foo')
self._dir_storage.GetDirectory('foo', temp2)
self.assertEqual(hashing_tools.StableHashPath(temp1),
hashing_tools.StableHashPath(temp2))
def test_InputUntouched(self):
# Check that PutDirectory doesn't alter its inputs.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
hashing_tools_test.GenerateTestTree('input_untouched', temp1)
h1 = hashing_tools.StableHashPath(temp1)
self._dir_storage.PutDirectory(temp1, 'hello')
h2 = hashing_tools.StableHashPath(temp1)
self.assertEqual(h1, h2)
def test_URLsPropagate(self):
# Check that consistent non-None URLs come from get and put.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
temp2 = os.path.join(work_dir, 'temp2')
hashing_tools_test.GenerateTestTree('url_propagate', temp1)
url1 = self._dir_storage.PutDirectory(temp1, 'me')
url2 = self._dir_storage.GetDirectory('me', temp2)
self.assertEqual(url1, url2)
self.assertNotEqual(None
|
, url1)
def test_BadWrite(self):
def call(cmd):
return 1
storage = gsd_storage.GSDStorage(
gsutil=['mygsutil'],
write_bucket='mybucket',
read_buckets=[],
call=call)
dir_storage = directory_storage.DirectoryStorageAdapter(storage)
|
# Check that storage exceptions come thru on failure.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
hashing_tools_test.GenerateTestTree('bad_write', temp1)
self.assertRaises(gsd_storage.GSDStorageError,
dir_storage.PutDirectory, temp1, 'bad')
def test_BadRead(self):
# Check that storage exceptions come thru on failure.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
self.assertEqual(None, self._dir_storage.GetDirectory('foo', temp1))
if __name__ == '__main__':
unittest.main()
|
GabrielNicolasAvellaneda/dd-agent
|
tests/checks/mock/test_wmi_check.py
|
Python
|
bsd-3-clause
| 6,955
| 0.00115
|
# project
from tests.checks.common import AgentCheckTest
Win32_OperatingSystem_attr = {
'BootDevice': "\\Device\\HarddiskVolume1",
'BuildNumber': "9600",
'BuildType': "Multiprocessor Free",
'Caption': "Microsoft Windows Server 2012 R2 Standard Evaluation",
'CodeSet': "1252",
'CountryCode': "1",
'CreationClassName': "Win32_OperatingSystem",
'CSCreationClassName': "Win32_ComputerSystem",
'CSName': "WIN-7022K3K6GF8",
'CurrentTimeZone': -420,
'DataExecutionPrevention_32BitApplications': True,
'DataExecutionPrevention_Available': True,
'DataExecutionPrevention_Drivers': True,
'DataExecutionPrevention_SupportPolicy': 3,
'Debug': False,
'Description': "",
'Distributed': False,
'EncryptionLevel': 256,
'ForegroundApplicationBoost': 2,
'FreePhysicalMemory': "3238796",
'FreeSpaceInPagingFiles': "720896",
'FreeVirtualMemory': "3936028",
'InstallDate': "20140729152415.000000-420",
'LastBootUpTime': "20150331151024.957920-420",
'LocalDateTime': "20150331152210.670000-420",
'Locale': "0409",
'Manufacturer': "Microsoft Corporation",
'MaxNumberOfProcesses': 4294967295,
'MaxProcessMemorySize': "137438953344",
'MUILanguages': "en-US",
'Name': "Microsoft Windows Server 2012 R2 Standard Evaluation"
"|C:\\Windows|\\Device\\Harddisk0\\Partition2",
'NumberOfProcesses': 60,
'NumberOfUsers': 2,
'OperatingSystemSKU': 79,
'Organization': "",
'OSArchitecture': "64-bit",
'OSLanguage': 1033,
'OSProductSuite': 272,
'OSType': 18,
'PortableOperatingSystem': False,
'Primary': True,
'ProductType': 3,
'RegisteredUser': "Windows User",
'SerialNumber': "00252-10000-00000-AA228",
'ServicePackMajorVersion': 0,
'ServicePackMinorVersion': 0,
'SizeStoredInPagingFiles': "720896",
'Status': "OK",
'SuiteMask': 272,
'SystemDevice': "\\Device\\HarddiskVolume2",
'SystemDirectory': "C:\\Windows\\system32",
'SystemDrive': "C:",
'TotalVirtualMemorySize': "4914744",
'TotalVisibleMemorySize': "4193848",
'Version': "6.3.9600",
'WindowsDirectory': "C:\\Windows",
}
Win32_PerfFormattedData_PerfProc_Process_attr = {
'CreatingProcessID': 2976,
'ElapsedTime': "2673",
'HandleCount': 461,
'IDProcess': 4036,
'IODataBytesPersec': "219808",
'IODataOperationsPersec': "1049",
'IOOtherBytesPersec': "0",
'IOOtherOperationsPersec': "1699",
'IOReadBytesPerSec': "20455",
'IOReadOperationsPersec': "505",
'IOWriteBytesPersec': "199353",
'IOWriteOperationsPersec': "544",
'Name': "chrome",
'PageFaultsPersec': 3,
'PageFileBytes': "98619392",
'PageFileBytesPeak': "98619392",
'PercentPrivilegedTime': "12",
'PercentProcessorTime': "18",
'PercentUserTime': "6",
'PoolNonpagedBytes': 28128,
'PoolPagedBytes': 325216,
'PriorityBase': 8,
'PrivateBytes': "98619392",
'ThreadCount': 9,
'VirtualBytes': "303472640",
'VirtualBytesPeak': "304521216",
'WorkingSet': "112803840",
'WorkingSetPeak': "112803840",
'WorkingSetPrivate': "82731008",
}
Win32_Process_attr = {
'CommandLine': "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\"",
'Handle': "3264"
}
class Mocked_Win32_Service(object):
"""
Generate Mocked Win32 Service from given attributes
"""
def __init__(self, **entries):
self.__dict__.update(entries)
def query(self, q):
if q == "SELECT CommandLine FROM Win32_Process WHERE Handle = 4036":
return [Mocked_Win32_Service(**Win32_Process_attr)]
else:
return []
class Mocked_WMI(object):
"""
Mock WMI methods for test purpose
"""
def __init__(self, mocked_wmi_classes):
# Make WMI classes callable
def get_wmi_obj(wmi_obj):
return lambda Name=None: [wmi_obj] if not Name or wmi_obj.Name == Name else []
for wmi_class, wmi_obj in mocked_wmi_classes.iteritems():
mocked_wmi_classes[wmi_class] = get_wmi_obj(wmi_obj)
self._mocked_classes = mocked_wmi_classes
def WMI(self, host, user, password):
"""
Return a mock WMI object with a mock class
"""
return Mocked_Win32_Service(**self._mocked_classes)
class WMITestCase(AgentCheckTest):
CHECK_NAME = 'wmi_check'
CONFIG = {
'class': "Win32_OperatingSystem",
'metrics': [["NumberOfProcesses", "system.proc.count", "gauge"],
["NumberOfUsers", "system.users.count", "gauge"]],
'constant_tags': ["mytag"]
}
FILTER_CONFIG = {
'class': "Win32_PerfFormattedData_PerfProc_Process",
'metrics': [["ThreadCount", "my_app.threads.count", "gauge"],
["VirtualBytes", "my_app.mem.virtual", "gauge"]],
'filters': [{'Name': "chrome"}],
'tag_by': "Name"
}
TAG_QUERY_CONFIG = {
'class': "Win32_PerfFormattedData_PerfProc_Process",
'metrics': [["IOReadBytesPerSec", "proc.io.bytes_read", "gauge"]],
'filters': [{'Name': "chrome"}],
'tag_queries': [["IDProcess", "Win32_Process", "Handle", "CommandLine"]]
}
def setUp(self):
# Mocking `wmi` Python package
import sys
sys.modules['wmi'] = Mocked_WMI(
{
'Win32_OperatingSystem': Mocked_Win32_Service(**Win32_OperatingSystem_attr),
'Win32_PerfFormattedData_PerfProc_Process':
Mocked_Win32_Service(**Win32_PerfFormattedData_PerfProc_Process_attr),
})
def test_check(self):
"""
Collect WMI metrics + `constant_tags`
"""
# Run check
config = {
'instances
|
': [self.CONFIG]
}
self.run_check(config)
# Test metrics
for _, mname, _ in self.CONFIG['metrics']:
self.assertMetric(mname, tags=self.CONFIG['constant_tags'], count=1)
self.coverage_report()
def test_filter_and_tagging(self):
"""
Test `filters` and `tag_by` parameters
"""
# Run check
config = {
'instances': [s
|
elf.FILTER_CONFIG]
}
self.run_check(config)
# Test metrics
for _, mname, _ in self.FILTER_CONFIG['metrics']:
self.assertMetric(mname, tags=["name:chrome"], count=1)
self.coverage_report()
def test_tag_queries(self):
"""
Test `tag_queries` parameter
"""
# Run check
config = {
'instances': [self.TAG_QUERY_CONFIG]
}
self.run_check(config)
# Test metrics
for _, mname, _ in self.TAG_QUERY_CONFIG['metrics']:
self.assertMetric(mname, tags=['commandline:c:\\program_files_(x86)\\google'
'\\chrome\\application\\chrome.exe"'], count=1)
self.coverage_report()
|
penzance/canvas_python_sdk
|
static_methods/users.py
|
Python
|
mit
| 2,009
| 0.001991
|
def list_users_in_account(request_ctx, account_id, search_term=None,
include=None, per_page=None, **request_kwargs):
"""
Retrieve the list of users associated with this account.
@example_request
curl https://<canvas>/api/v1/accounts/self/users?search_term=<search value> \
-X GET \
-H 'Authorization: Bearer <token>'
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param search_term: (optional) The partial name or full ID of the users to match and return in the
results list. Must be at least 3 characters.
Note that the API will prefer matching on canonical user ID if the ID
|
has
a numeric form. It will only search against other fields if non-numeric
in form, or if the numeric value doesn't yield any matches. Queries by
administrative users will search on SIS ID, name, or email address; non-
administrative queries will only be compared against name.
:type search_term: string or None
:param include: (optional) One of (avatar_url, email, last_login, time_zone)
:type include: array or None
:param per_page: (optional) Set how many
|
results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List users in account
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
include_types = ('avatar_url', 'email', 'last_login', 'time_zone')
utils.validate_attr_is_acceptable(include, include_types)
path = '/v1/accounts/{account_id}/users'
payload = {
'include[]': include,
'search_term': search_term,
'per_page': per_page,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
|
pudo/nomenklatura
|
nomenklatura/dataset.py
|
Python
|
mit
| 662
| 0
|
from typing import Any
clas
|
s Dataset(object):
"""A unit of entities. A dataset is a set of data, sez W3C."""
def __init__(self, name: str, title: str) -> None:
self.name = name
self.title = title
def __eq__(self, other: Any
|
) -> bool:
try:
return not not self.name == other.name
except AttributeError:
return False
def __lt__(self, other: "Dataset") -> bool:
return self.name.__lt__(other.name)
def __hash__(self) -> int:
return hash((self.__class__.__name__, self.name))
def __repr__(self) -> str:
return f"<{self.__class__.__name__}({self.name!r})>"
|
antoinecarme/pyaf
|
tests/neuralnet/test_ozone_rnn_only_LSTM.py
|
Python
|
bsd-3-clause
| 1,418
| 0.019746
|
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import logging
import logging.config
#logging.config.fileConfig('logging.conf')
logging.basicConfig(level=logging.INFO)
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mParallelMode = True;
lEngine.mOptions.set_active_autoregressions(['LSTM']);
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_rnn_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/rnn_ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Foreca
|
sts\n" , For
|
ecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
|
osrf/rosbook
|
code/basics/src/message_publisher.py
|
Python
|
apache-2.0
| 344
| 0.002907
|
#!/usr/bin/env python
import rospy
from basics.msg
|
import Complex
from random import random
rospy.init_node('message_publisher')
pub = rospy.Publisher('complex', Complex)
rate = rospy.Rate(2)
while not rospy.is_shutdown():
msg = Complex()
msg.real = random()
msg.imaginary = random()
pub.publish(msg)
|
rate.sleep()
|
ijat/Hotspot-PUTRA-Auto-login
|
PyInstaller-3.2/PyInstaller/hooks/hook-wx.lib.activex.py
|
Python
|
gpl-3.0
| 571
| 0.005254
|
#-----------------------------------------------------------------------------
# C
|
opyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-------------------------------------------------------
|
----------------------
from PyInstaller.utils.hooks import exec_statement
# This needed because comtypes wx.lib.activex generates some stuff.
exec_statement("import wx.lib.activex")
|
mccorkle/seds-utils
|
KnownUsers.py
|
Python
|
gpl-3.0
| 4,260
| 0.004225
|
class KnownUsers:
"""
This deals with both the dedicated server log AND the xml "user" file, as related to the known users.
TODO: refactor this to have a distinct class for the dedicatedLog and the users.xml
"""
def __init__(self, existing_users_filename):
"""Setup init variables parsing the existing user xml."""
import xml.etree.ElementTree as ET
try:
known_users = ET.parse(existing_users_filename)
self.knownUsersRoot = known_users.getroot()
self.existingUsers = self.knownUsersRoot.findall("./user")
except IOError:
print('warning: no known users file: %s' % existing_users_filename)
return
def getTodayLogins(self, serverLogLines):
import re
userLogins = {}
user = {}
# run through the server log lines, find this guy based on his displayname
for line in serverLogLines:
# 2015-11-10 19:51:36.696 - Thread: 8 -> OnConnectedClient mccorkle attempt
if ('OnConnectedClient' in line): # should catch only the FIRST login
matchObject = re.match(r'(\d+-\d+-\d+)\s(\d+:\d+:\d+\.\d+).*OnConnectedClient\s(.*?)\sattempt', line)
if (matchObject):
user['loginTime'] = matchObject.group(1) + " " + matchObject.group(2)
user['loginName'] = matchObject.group(3)
# print ("LOGIN: %s %s" % (user['loginName'], user['loginTime']))
if ('User left' in line and user['loginName'] in line): # and will overwrite until the last logout time
matchObject = re.match(r'(\d+-\d+-\d+)\s(\d+:\d+:\d+\.\d+)\s', line)
user['logoutTime'] = matchObject.group(1) + " " + matchObject.group(2)
# if user['logoutTime']:
# print ("LOGOUT: %s %s" % (user['loginNam
|
e'], user['logoutTime']))
userLogins[user['loginName']] = user
del user
return userLogins
def getExistingUsers(self):
return self.existingUsers
# return self.knownUsersRoot.findall("./user")
def getNewUsers(self, playersDict):
newUsers = {}
for player in playersDict:
# print ("checking player %s to see if he is in users" % player)
foundKnownUser = 0
for knownUser in self.knownUsers
|
Root.findall("./user"):
if knownUser.get('inGameID') == playersDict[player]["inGameID"]:
foundKnownUser = 1
# childBranch = knownUser
if foundKnownUser == 0:
# print ("** NEW USER")
newUsers[playersDict[player]["inGameID"]] = playersDict[player]
return newUsers
def updateUsersFile(self, playersDict, existingUsersFilename):
import xml.etree.ElementTree as ET
for player in playersDict:
# print ("checking player %s to see if he is in users" % player)
foundKnownUser = 0
for knownUser in self.knownUsersRoot.findall("./user"):
if knownUser.get('inGameID') == playersDict[player]["inGameID"]:
foundKnownUser = 1
if foundKnownUser == 0:
# print ("** NEW USER")
# add all the new users to the knownUsers file, so next time we read, they aren't new
child = ET.SubElement(self.knownUsersRoot, "user")
child.set("username", playersDict[player]["username"])
child.set("inGameID", playersDict[player]["inGameID"])
child.set("steamID", playersDict[player]["steamID"])
child.set("playerToolbarSlotCount", playersDict[player]["playerToolbarSlotCount"])
# firstSeen is a combination of loginTime || users.xml where we stored the first time we saw this user
# we don't have those inside of parsing users, so need to parse and class log file first
# child.set("firstSeen", today)
# child.set("loginTime", loginTime)
# child.set("logoutTime", logoutTime)
testOut = ET.ElementTree(self.knownUsersRoot)
testOut.write(existingUsersFilename, encoding='utf-8', xml_declaration=True)
|
RossMeikleham/Numerical-Methods
|
tests/vandermonde_tests.py
|
Python
|
mit
| 742
| 0.040431
|
import unittest
from numericalmethods.vandermonde import *
class TestsVandermonde(unittest.TestCase):
#Test generating Vandermonde matrix
def test_matrix(self):
v = vandermonde_matrix([1,2,3,4])
self.assertEqual(v.tolist(), [[1,1,1,1],[1,2,4,8],[1,3,9,27],[1,4,16,64]
|
])
#Test determinant of Vandermonde matrix with more than 1 element
def test_det_multi(self):
d = vandermonde_det(vandermonde_matrix([1,2,3,4]))
self.assertEqual(d, 12)
#Tes
|
t determinant of Vandermonde matrix with exactly 1 element
def test_det_single(self):
d = vandermonde_det(vandermonde_matrix([1]))
self.assertEqual(d, 1)
#vandermonde_det
if __name__ == '__main__':
unittest.main()
|
riquito/Baobab
|
doc/source/conf.py
|
Python
|
apache-2.0
| 7,266
| 0.006744
|
# -*- coding: utf-8 -*-
#
# Baobab documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 7 00:44:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modu
|
les to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minim
|
al Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo','jsonext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates/sphinxdoc']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Baobab'
copyright = u'2010, Riccardo Attilio Galli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.1'
# The full version, including alpha/beta/rc tags.
release = '1.3.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logo_baobab_200.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'*': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
'index': ['download.html','globaltoc.html', 'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'example_animals':'animals.html','example_forum':'forum.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Baobabdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Baobab.tex', u'Baobab Documentation',
u'Riccardo Attilio Galli', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'baobab', u'Baobab Documentation',
[u'Riccardo Attilio Galli'], 1)
]
|
chainer/chainercv
|
chainercv/extensions/evaluator/instance_segmentation_coco_evaluator.py
|
Python
|
mit
| 7,737
| 0.000258
|
import copy
import numpy as np
from chainer import reporter
import chainer.training.extensions
from chainercv.evaluations import eval_instance_segmentation_coco
from chainercv.utils import apply_to_iterator
try:
import pycocotools.coco # NOQA
_available = True
except ImportError:
_available = False
class InstanceSegmentationCOCOEvaluator(chainer.training.extensions.Evaluator):
"""An extension that evaluates a instance segmentation model by MS COCO metric.
This extension iterates over an iterator and evaluates the prediction
results.
The results consist of average precisions (APs) and average
recalls (ARs) as well as the mean of each (mean average precision and mean
average recall).
This extension reports the following values with keys.
Please note that if
:obj:`label_names` is not specified, only the mAPs and mARs are reported.
The underlying dataset of the iterator is assumed to return
:obj:`img, mask, label` or :obj:`img, mask, label, area, crowded`.
.. csv-table::
:header: key, description
ap/iou=0.50:0.95/area=all/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_1]_
ap/iou=0.50/area=all/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_1]_
ap/iou=0.75/area=all/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_1]_
ap/iou=0.50:0.95/area=small/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_1]_ [#coco_ins_ext_5]_
ap/iou=0.50:0.95/area=medium/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_1]_ [#coco_ins_ext_5]_
ap/iou=0.50:0.95/area=large/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_1]_ [#coco_ins_ext_5]_
ar/iou=0.50:0.95/area=all/max_dets=1/<label_names[l]>, \
[#coco_ins_ext_2]_
ar/iou=0.50/area=all/max_dets=10/<label_names[l]>, \
[#coco_ins_ext_2]_
ar/iou=0.75/area=all/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_2]_
ar/iou=0.50:0.95/area=small/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_2]_ [#coco_ins_ext_5]_
ar/iou=0.50:0.95/area=medium/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_2]_ [#coco_ins_ext_5]_
ar/iou=0.50:0.95/area=large/max_dets=100/<label_names[l]>, \
[#coco_ins_ext_2]_ [#coco_ins_ext_5]_
map/iou=0.50:0.95/area=all/max_dets=100, \
[#coco_ins_ext_3]_
map/iou=0.50/area=all/max_dets=100, \
[#coco_ins_ext_3]_
map/iou=0.75/area=all/max_dets=100, \
[#coco_ins_ext_3]_
map/iou=0.50:0.95/area=small/max_dets=100, \
[#coco_ins_ext_3]_ [#coco_ins_ext_5]_
map/iou=0.50:0.95/area=medium/max_dets=100, \
|
[#coco_ins_ext_3]_ [#coco_ins_ext_5]_
map/iou=0.50:0.95/area=large/max_dets=100, \
[#coco_ins_ext_3]_ [#coco_ins_ext_5]_
ar/iou=0.50:0.95/area=all/max_dets=1, \
[#coco_ins_ext_4]_
ar/iou=0.50/area=all/max_dets=10, \
[#coco_ins_ext_4]_
ar/iou=0.75/area=all/max_dets=100, \
[#coco_ins_ext_4]_
ar/iou=0.50:0.95/area=small/max_dets=100, \
[#coco_ins_ext_4]
|
_ [#coco_ins_ext_5]_
ar/iou=0.50:0.95/area=medium/max_dets=100, \
[#coco_ins_ext_4]_ [#coco_ins_ext_5]_
ar/iou=0.50:0.95/area=large/max_dets=100, \
[#coco_ins_ext_4]_ [#coco_ins_ext_5]_
.. [#coco_ins_ext_1] Average precision for class \
:obj:`label_names[l]`, where :math:`l` is the index of the class. \
If class :math:`l` does not exist in either :obj:`pred_labels` or \
:obj:`gt_labels`, the corresponding value is set to :obj:`numpy.nan`.
.. [#coco_ins_ext_2] Average recall for class \
:obj:`label_names[l]`, where :math:`l` is the index of the class. \
If class :math:`l` does not exist in either :obj:`pred_labels` or \
:obj:`gt_labels`, the corresponding value is set to :obj:`numpy.nan`.
.. [#coco_ins_ext_3] The average of average precisions over classes.
.. [#coco_ins_ext_4] The average of average recalls over classes.
.. [#coco_ins_ext_5] Skip if :obj:`gt_areas` is :obj:`None`.
Args:
iterator (chainer.Iterator): An iterator. Each sample should be
following tuple :obj:`img, mask, label, area, crowded`.
target (chainer.Link): A detection link. This link must have
:meth:`predict` method that takes a list of images and returns
:obj:`masks`, :obj:`labels` and :obj:`scores`.
label_names (iterable of strings): An iterable of names of classes.
If this value is specified, average precision and average
recalls for each class are reported.
comm (~chainermn.communicators.CommunicatorBase):
A ChainerMN communicator.
If it is specified, this extension scatters the iterator of
root worker and gathers the results to the root worker.
"""
trigger = 1, 'epoch'
default_name = 'validation'
priority = chainer.training.PRIORITY_WRITER
def __init__(
self, iterator, target,
label_names=None, comm=None):
if not _available:
raise ValueError(
'Please install pycocotools \n'
'pip install -e \'git+https://github.com/cocodataset/coco.git'
'#egg=pycocotools&subdirectory=PythonAPI\'')
if iterator is None:
iterator = {}
super(InstanceSegmentationCOCOEvaluator, self).__init__(
iterator, target)
self.label_names = label_names
self.comm = comm
def evaluate(self):
target = self._targets['main']
if self.comm is not None and self.comm.rank != 0:
apply_to_iterator(target.predict, None, comm=self.comm)
return {}
iterator = self._iterators['main']
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
in_values, out_values, rest_values = apply_to_iterator(
target.predict, it, comm=self.comm)
# delete unused iterators explicitly
del in_values
pred_masks, pred_labels, pred_scores = out_values
if len(rest_values) == 2:
gt_masks, gt_labels = rest_values
gt_areas = None
gt_crowdeds = None
elif len(rest_values) == 4:
gt_masks, gt_labels, gt_areas, gt_crowdeds =\
rest_values
else:
raise ValueError('the dataset should return '
'sets of (img, mask, label) or sets of '
'(img, mask, label, area, crowded).')
result = eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)
report = {}
for key in result.keys():
if key.startswith('map') or key.startswith('mar'):
report[key] = result[key]
if self.label_names is not None:
for key in result.keys():
if key.startswith('ap') or key.startswith('ar'):
for l, label_name in enumerate(self.label_names):
report_key = '{}/{:s}'.format(key, label_name)
try:
report[report_key] = result[key][l]
except IndexError:
report[report_key] = np.nan
observation = {}
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
|
StructuralNeurobiologyLab/SyConnFS
|
syconnfs/representations/segmentation_helper.py
|
Python
|
gpl-2.0
| 9,628
| 0.001246
|
import cPickle as pkl
import networkx as nx
import numpy as np
import os
import scipy.spatial
from ..handler.basics import chunkify
from ..processing.general import single_conn_comp_img
from syconnmp.shared_mem import start_multiprocess_obj, start_multiprocess
from ..handler.compression import VoxelDict, AttributeDict
from syconnmp.shared_mem import start_multiprocess_obj
from .utils import subfold_from_ix
import segmentation
from knossos_utils import knossosdataset
def dataset_analysis_thread(args):
paths = args[0]
obj_type = args[1]
version = args[2]
working_dir = args[3]
recompute = args[4]
attr_dict = dict(id=[], size=[], bounding_box=[], rep_coord=[])
for p in paths:
if not len(os.listdir(p)) > 0:
os.rmdir(p)
else:
this_attr_dc = AttributeDict(p + "/attr_dict.pkl",
read_only=not recompute, timeout=3600)
if recompute:
this_vx_dc = VoxelDict(p + "/voxel.pkl", read_only=True,
timeout=3600)
for so_id in this_attr_dc.keys():
attr_dict["id"].append(so_id)
so = segmentation.SegmentationObject(so_id,
obj_type,
version,
working_dir)
so.attr_dict = this_attr_dc[so_id]
if recompute:
so.load_voxels(voxel_dc=this_vx_dc)
so.calculate_rep_coord(voxel_dc=this_vx_dc)
if recompute: # or "rep_coord" not in so.attr_dict:
so.attr_dict["rep_coord"] = so.rep_coord
if recompute: # or "bounding_box" not in so.attr_dict:
so.attr_dict["bounding_box"] = so.bounding_box
if recompute: #or "size" not in so.attr_dict:
so.attr_dict["size"] = so.size
for attribute in ["rep_coord", "size", "bounding_box"]:#so.attr_dict.keys():
if attribute not in attr_dict:
attr_dict[attribute] = []
attr_dict[attribute].append(so.attr_dict[attribute])
# so.save_attr_dict()
this_attr_dc[so_id] = so.attr_dict
if recompute:
this_attr_dc.save2pkl()
return attr_dict
def map_objects_thread(args):
paths = args[0]
obj_type = args[1]
obj_version = args[2]
working_dir = args[3]
kd_path = args[4]
readonly = args[5]
if len(args) > 6:
datatype = args[6]
else:
datatype = np.uint64
kd = knossosdataset.KnossosDataset()
kd.initialize_from_knossos_path(kd_path)
seg_dataset = segmentation.SegmentationDataset(obj_type,
version=obj_version,
working_dir=working_dir)
sv_id_dict = {}
for p in paths:
this_attr_dc = AttributeDict(p + "/attr_dict.pkl",
read_only=readonly, timeout=3600)
this_vx_dc = VoxelDict(p + "/voxel.pkl", read_only=True,
timeout=3600)
for so_id in this_vx_dc.keys():
so = seg_dataset.get_segmentation_object(so_id)
so.attr_dict = this_attr_dc[so_id]
so.load_voxels(voxel_dc=this_vx_dc)
if readonly:
i
|
f "mapping_ids" in so.attr_dict:
ids = so.attr_dict["mapping_ids"]
id_ratios = so.attr_dict["mapping_ratios"]
for i_id in range(len(ids)):
|
if ids[i_id] in sv_id_dict:
sv_id_dict[ids[i_id]][so_id] = id_ratios[i_id]
else:
sv_id_dict[ids[i_id]] = {so_id: id_ratios[i_id]}
else:
if np.product(so.shape) > 1e8:
continue
vx_list = np.argwhere(so.voxels) + so.bounding_box[0]
try:
id_list = kd.from_overlaycubes_to_list(vx_list,
datatype=datatype)
except:
continue
ids, id_counts = np.unique(id_list, return_counts=True)
id_ratios = id_counts / float(np.sum(id_counts))
for i_id in range(len(ids)):
if ids[i_id] in sv_id_dict:
sv_id_dict[ids[i_id]][so_id] = id_ratios[i_id]
else:
sv_id_dict[ids[i_id]] = {so_id: id_ratios[i_id]}
so.attr_dict["mapping_ids"] = ids
so.attr_dict["mapping_ratios"] = id_ratios
this_attr_dc[so_id] = so.attr_dict
if not readonly:
this_attr_dc.save2pkl()
return sv_id_dict
def write_mapping_to_sv_thread(args):
paths = args[0]
obj_type = args[1]
mapping_dict_path = args[2]
with open(mapping_dict_path, "r") as f:
mapping_dict = pkl.load(f)
for p in paths:
this_attr_dc = AttributeDict(p + "/attr_dict.pkl",
read_only=False, timeout=3600)
for sv_id in this_attr_dc.keys():
this_attr_dc[sv_id]["mapping_%s_ids" % obj_type] = \
mapping_dict[sv_id].keys()
this_attr_dc[sv_id]["mapping_%s_ratios" % obj_type] = \
mapping_dict[sv_id].values()
this_attr_dc.save2pkl()
def split_components_thread(args):
i_id = args[0]
stride = args[1]
obj_type = args[2]
new_obj_type = args[3]
version = args[4]
new_version = args[5]
version_dict = args[6]
working_dir = args[7]
dist = args[8]
new_id = args[9]
sd = segmentation.SegmentationDataset(obj_type=obj_type, version=version,
working_dir=working_dir,
version_dict=version_dict)
ids = sd.ids[i_id * stride: (i_id + 1) * stride]
new_sd = segmentation.SegmentationDataset(obj_type=new_obj_type,
version=new_version,
working_dir=working_dir,
version_dict=version_dict)
for i_id in range(len(ids)):
so_obj = sd.get_segmentationdataset(ids[i_id])
kdtree = scipy.spatial.cKDTree(so_obj.voxel_list)
graph = nx.from_edgelist(kdtree.query_pairs(dist))
ccs = list(nx.connected_components(graph))
if len(ccs) == 1:
new_so_obj = new_sd.get_segmentationdataset(new_id)
new_id += 1
new_so_obj.attr_dict["paths_to_voxels"] = so_obj.paths_to_voxels
new_so_obj.save_attr_dict()
else:
for cc in ccs:
new_so_obj = new_sd.get_segmentationdataset(new_id)
new_id += 1
voxel_ids = np.array(list(cc), dtype=np.int32)
this_voxel_list = so_obj.voxel_list[voxel_ids]
bb = [np.min(this_voxel_list, axis=1),
np.max(this_voxel_list, axis=1)]
this_voxel_list -= bb[0]
this_voxels = np.zeros(bb[1]-bb[0], dtype=np.bool)
this_voxels[this_voxel_list[:, 0],
this_voxel_list[:, 1],
this_voxel_list[:, 2]] = True
new_so_obj.save_voxels(this_voxels)
def init_sos(sos_dict):
loc_dict = sos_dict.copy()
svixs = loc_dict["svixs"]
del loc_dict["svixs"]
sos = [segmentation.SegmentationObject(ix, **loc_dict) for ix in svixs]
return sos
def sos_dict_fact(svixs, version="0", scaling=(10, 10, 20), obj_type="sv",
working_dir="/wholebrain/scratch/areaxfs/", create=False):
sos_dict = {"svixs": svixs, "version": version,
"working_dir": working_dir, "scaling": scaling,
"create": create, "obj_type": obj_type}
|
jmd-dk/concept
|
test/fluid_pressure/gen_ic.py
|
Python
|
gpl-3.0
| 976
| 0.004171
|
# This file has to be run in pure Python mode!
# Imports from the CO𝘕CEPT code
from commons import *
from species import Component
from snapshot import save
# Create stationary, homogeneous matter distribution,
# perturbed with global, stationary sine wave along
# the x-direction.
w = user_params['_w']
ρ0 = user_params['_ρ0']
A = user_params['_A']
σ = user_params['_σ']
gridsize = 4*16 # Should be a multiple of 4
compone
|
nt = Component('test fluid', 'matter', gridsize=gridsize, boltzmann_order=2)
ρ = empty([gridsize]*3, dtype=float)
for i in range(gridsize):
x = boxsize*i/gridsize
ρ[i, :, :] = ρ0 + A*sin(x/boxsize*2*π)
component.populate(ρ, 'ϱ')
for multi_index in component.J.multi_indices:
component.populate(zer
|
os([gridsize]*3, dtype=float), 'J', multi_index)
for multi_index in component.ς.multi_indices:
component.populate(ones([gridsize]*3)*ρ*(1 + w)*σ, 'ς', multi_index)
# Save snapshot
save(component, initial_conditions)
|
thumbor-community/shortener
|
vows/generators/short_generator_vows.py
|
Python
|
mit
| 1,326
| 0.002262
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, thumbor-community
# Use of this source code is governed by the MIT license that can be
# found in the LICENSE file.
from pyvows import Vows, expect
from tc_shortener.generators.short_generator import Generator
from tc_core.context import Context
from thumbor.config import Config
from thu
|
mbor.importer import Importer
@Vows.batch
class ShortGeneratorVows(Vows.Context):
|
class AShortGenerator(Vows.Context):
def topic(self):
config = Config()
importer = Importer(config)
context = Context(None, config, importer)
return Generator(context)
class WithIncorrectUrl(Vows.Context):
@Vows.capture_error
def topic(self, short_generator):
return short_generator.get('')
def should_raise_error(self, topic):
expect(topic).to_be_an_error_like(ValueError)
class WhenShortening(Vows.Context):
def topic(self, short_generator):
return short_generator.get('/unsafe/200x300/image.jpg')
def should_preserve_image(self, topic):
expect(topic).to_match(r'^.*/image.jpg$')
def should_be_fixed_length(self, topic):
expect(topic).to_length(22+len('/image.jpg'))
|
haohaibo/tutorial
|
python/web-dev/server.py
|
Python
|
mit
| 303
| 0
|
# impo
|
rt module from wsgifref
from wsgiref.simple_server import make_server
# import our application
from hello import application
# create a server,IP NULL, port 8000
httpd = make_server('', 8000, application)
print "Serving HTTP on port 8000..."
# st
|
art listening HTTP request
httpd.serve_forever()
|
joberreiter/pyload
|
module/plugins/hooks/AntiStandby.py
|
Python
|
gpl-3.0
| 4,700
| 0.009574
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import time
import subprocess
import sys
try:
import caffeine
except ImportError:
pass
from module.plugins.internal.Addon import Addon, Expose
from module.utils import fs_encode, save_join as fs_join
class Kernel32(object):
ES_AWAYMODE_REQUIRED = 0x00000040
ES_CONTINUOUS = 0x80000000
ES_DISPLAY_REQUIRED = 0x00000002
ES_SYSTEM_REQUIRED = 0x00000001
ES_USER_PRESENT = 0x00000004
class AntiStandby(Addon):
__name__ = "AntiStandby"
__type__ = "hook"
__version__ = "0.12"
__status__ = "testing"
__config__ = [("activated", "bool", "Activated" , True ),
("hdd" , "bool", "Prevent HDD standby" , True ),
("system" , "bool", "Prevent OS standby" , True ),
("display" , "bool", "Prevent display standby" , False),
("interval" , "int" , "HDD touching interval in second
|
s", 25 )]
__description__ = """Prevent OS, HDD and display standby"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
TMP_FILE = ".antistandby"
PERIODICAL_INTERVAL = 5
def init(self):
self.pid = None
self.mtime
|
= 0
def activate(self):
hdd = self.get_config('hdd')
system = not self.get_config('system')
display = not self.get_config('display')
if hdd:
self.start_periodical(self.get_config('interval'), threaded=True)
if os.name is "nt":
self.win_standby(system, display)
elif sys.platform == "darwin":
self.osx_standby(system, display)
else:
self.linux_standby(system, display)
def deactivate(self):
try:
os.remove(self.TMP_FILE)
except OSError:
pass
if os.name is "nt":
self.win_standby(True)
elif sys.platform == "darwin":
self.osx_standby(True)
else:
self.linux_standby(True)
@Expose
def win_standby(self, system=True, display=True):
import ctypes
set = ctypes.windll.kernel32.SetThreadExecutionState
if system:
if display:
set(Kernel32.ES_CONTINUOUS)
else:
set(Kernel32.ES_CONTINUOUS | Kernel32.ES_DISPLAY_REQUIRED)
else:
if display:
set(Kernel32.ES_CONTINUOUS | Kernel32.ES_SYSTEM_REQUIRED)
else:
set(Kernel32.ES_CONTINUOUS | Kernel32.ES_SYSTEM_REQUIRED | Kernel32.ES_DISPLAY_REQUIRED)
@Expose
def osx_standby(self, system=True, display=True):
try:
if system:
caffeine.off()
else:
caffeine.on(display)
except NameError:
self.log_warning(_("Unable to change power state"),
_("caffeine lib not found"))
except Exception, e:
self.log_warning(_("Unable to change power state"), e)
@Expose
def linux_standby(self, system=True, display=True):
try:
if system:
if self.pid:
self.pid.kill()
elif not self.pid:
self.pid = subprocess.Popen(["caffeine"])
except Exception, e:
self.log_warning(_("Unable to change system power state"), e)
try:
if display:
subprocess.call(["xset", "+dpms", "s", "default"])
else:
subprocess.call(["xset", "-dpms", "s", "off"])
except Exception, e:
self.log_warning(_("Unable to change display power state"), e)
@Expose
def touch(self, path):
with open(path, 'w'):
os.utime(path, None)
self.mtime = time.time()
@Expose
def max_mtime(self, path):
return max(0, 0,
*(os.path.getmtime(fs_join(root, file))
for root, dirs, files in os.walk(fs_encode(path), topdown=False)
for file in files))
def periodical(self):
if self.get_config('hdd') is False:
return
if (self.pyload.threadManager.pause or
not self.pyload.api.isTimeDownload() or
not self.pyload.threadManager.getActiveFiles()):
return
download_folder = self.pyload.config.get("general", "download_folder")
if (self.max_mtime(download_folder) - self.mtime) < self.interval:
return
self.touch(self.TMP_FILE)
|
paulsheridan/data-structures
|
src/deque.py
|
Python
|
mit
| 964
| 0
|
# -*- coding: utf-8 -*-
from double_linked import DoubleLinkedList
class Deque(object):
'''Deque is a composition of Double Linked List'''
def __init__(self, in
|
put=None):
'''create doubly linked list'''
self.deque = DoubleLinkedList(input)
def append(self, val):
self.deque.append(val)
def append_left(self, val):
self.deque.insert(val)
def pop(self):
return self.deque.pop()
def pop_left(self):
return self.deque.shift()
def peek(self):
try:
return self.deque.head.data
except AttributeError:
return None
def peek_left(self):
|
try:
return self.deque.tail.data
except AttributeError:
return None
def size(self):
size = 0
current_spot = self.deque.head
while current_spot:
size += 1
current_spot = current_spot.toward_tail
return size
|
lilchurro/vent
|
vent/menus/main.py
|
Python
|
apache-2.0
| 30,458
| 0.000328
|
import npyscreen
import os
import re
import sys
import time
from docker.errors import DockerException
from npyscreen import notify_confirm
from threading import Thread
from vent.api.actions import Action
from vent.api.menu_helpers import MenuHelper
from vent.helpers.meta import Containers
from vent.helpers.meta import Cpu
from vent.helpers.meta import DropLocation
from vent.helpers.meta import Gpu
from vent.helpers.meta import Images
from vent.helpers.meta import Jobs
from vent.helpers.meta import Timestamp
from vent.helpers.meta import Uptime
from vent.helpers.logs import Logger
from vent.helpers.paths import PathDirs
from vent.menus.add import AddForm
from vent.menus.ntap import CreateNTap
from vent.menus.ntap import DeleteNTap
from vent.menus.ntap import ListNTap
from vent.menus.ntap import NICsNTap
from vent.menus.ntap import StartNTap
from vent.menus.ntap import StopNTap
from vent.menus.backup import BackupForm
from vent.menus.editor import EditorForm
from vent.menus.inventory_forms import InventoryCoreToolsForm
from vent.menus.inventory_forms import InventoryToolsForm
from vent.menus.logs import LogsForm
from vent.menus.services import ServicesForm
from vent.menus.tools import ToolForm
class MainForm(npyscreen.FormBaseNewWithMenus):
""" Main information landing form for the Vent CLI """
@staticmethod
def exit(*args, **kwargs):
os.system('reset')
os.system('stty sane')
try:
sys.exit(0)
except SystemExit: # pragma: no cover
os._exit(0)
@staticmethod
def t_status(core):
""" Get status of tools for either plugins or core """
m_helper = MenuHelper()
repos, tools = m_helper.tools_status(core)
installed = 0
custom_installed = 0
built = 0
custom_built = 0
running = 0
custom_running = 0
normal = str(len(tools['normal']))
# determine how many extra instances should be shown for running
norm = set(tools['normal'])
inst = set(tools['installed'])
run_str = str(len(tools['normal']) + len(inst - norm))
for tool in tools['running']:
# check for multi instances too for running
if tool in tools['normal']:
running += 1
elif re.sub(r'\d+$', '', tool) in tools['normal']:
running += 1
else:
custom_running += 1
for tool in tools['built']:
if tool in tools['normal']:
built += 1
else:
custom_built += 1
for tool in tools['installed']:
if tool in tools['normal']:
installed += 1
elif re.sub(r'\d+$', '', tool) not in tools['normal']:
custom_installed += 1
tools_str = str(running + custom_running) + "/" + run_str + " running"
if custom_running > 0:
tools_str += " (" + str(custom_running) + " custom)"
tools_str += ", " + str(built + custom_built) + "/" + normal + " built"
if custom_built > 0:
tools_str += " (" + str(custom_built) + " custom)"
tools_str += ", " + str(installed + custom_installed) + "/" + normal
tools_str += " installed"
if custom_built > 0:
tools_str += " (" + str(custom_installed) + " custom)"
return tools_str, (running, custom_running, normal, repos)
def while_waiting(self):
""" Update fields periodically if nothi
|
ng is happening """
# give a little extra time for file descriptors to close
time.sleep(0.1)
self.addfield.value = Timestamp()
self.addfield.display()
self.addfield2.value = Uptime()
self.addfield2.display()
self.addfield3.value = str(len(Containers()))+" running"
if len(Containers()) > 0:
self.addfield3.labelColor = "GOOD"
else:
self.addfield3.labelColor = "DEFAULT"
self.addfi
|
eld3.display()
# update core tool status
self.addfield5.value, values = MainForm.t_status(True)
if values[0] + values[1] == 0:
color = "DANGER"
self.addfield4.labelColor = "CAUTION"
self.addfield4.value = "Idle"
elif values[0] >= int(values[2]):
color = "GOOD"
self.addfield4.labelColor = color
self.addfield4.value = "Ready to start jobs"
else:
color = "CAUTION"
self.addfield4.labelColor = color
self.addfield4.value = "Ready to start jobs"
self.addfield5.labelColor = color
# update plugin tool status
plugin_str, values = MainForm.t_status(False)
plugin_str += ", " + str(values[3]) + " plugin(s) installed"
self.addfield6.value = plugin_str
# get jobs
jobs = Jobs()
# number of jobs, number of tool containers
self.addfield7.value = str(jobs[0]) + " jobs running (" + str(jobs[1])
self.addfield7.value += " tool containers), " + str(jobs[2])
self.addfield7.value += " completed jobs"
if jobs[0] > 0:
self.addfield4.labelColor = "GOOD"
self.addfield4.value = "Processing jobs"
self.addfield7.labelColor = "GOOD"
else:
self.addfield7.labelColor = "DEFAULT"
self.addfield4.display()
self.addfield5.display()
self.addfield6.display()
self.addfield7.display()
# if file drop location changes deal with it
logger = Logger(__name__)
status = (False, None)
if self.file_drop.value != DropLocation()[1]:
logger.info("Starting: file drop restart")
try:
self.file_drop.value = DropLocation()[1]
logger.info("Path given: " + str(self.file_drop.value))
# restart if the path is valid
if DropLocation()[0]:
status = self.api_action.clean(name='file_drop')
status = self.api_action.prep_start(name='file_drop')
else:
logger.error("file drop path name invalid" +
DropLocation()[1])
if status[0]:
tool_d = status[1]
status = self.api_action.start(tool_d)
logger.info("Status of file drop restart: " +
str(status[0]))
except Exception as e: # pragma no cover
logger.error("file drop restart failed with error: " + str(e))
logger.info("Finished: file drop restart")
self.file_drop.display()
return
@staticmethod
def core_tools(action):
""" Perform actions for core tools """
def diff(first, second):
"""
Get the elements that exist in the first list and not in the second
"""
second = set(second)
return [item for item in first if item not in second]
def popup(original, orig_type, thr, title):
"""
Start the thread and display a popup of info
until the thread is finished
"""
thr.start()
info_str = ""
while thr.is_alive():
if orig_type == 'containers':
info = diff(Containers(), original)
elif orig_type == 'images':
info = diff(Images(), original)
if info:
info_str = ""
for entry in info:
# TODO limit length of info_str to fit box
info_str += entry[0]+": "+entry[1]+"\n"
npyscreen.notify_wait(info_str, title=title)
time.sleep(1)
return
if action == 'install':
original_images = Images()
m_helper = MenuHelper()
thr = Thread(target=m_helper.cores, args=(),
kwargs={"action": "install"})
popup(original_images, "images", thr,
'Please wait, installing core containers...')
not
|
StackStorm/st2
|
st2actions/setup.py
|
Python
|
apache-2.0
| 1,772
| 0.000564
|
# -*- coding: utf-8 -*-
# Copyright
|
2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License
|
is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os.path
from setuptools import setup, find_packages
from dist_utils import fetch_requirements
from dist_utils import apply_vagrant_workaround
from st2actions import __version__
ST2_COMPONENT = "st2actions"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt")
install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
apply_vagrant_workaround()
setup(
name=ST2_COMPONENT,
version=__version__,
description="{} StackStorm event-driven automation platform component".format(
ST2_COMPONENT
),
author="StackStorm",
author_email="info@stackstorm.com",
license="Apache License (2.0)",
url="https://stackstorm.com/",
install_requires=install_reqs,
dependency_links=dep_links,
test_suite=ST2_COMPONENT,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=["setuptools", "tests"]),
scripts=[
"bin/st2actionrunner",
"bin/st2notifier",
"bin/st2workflowengine",
"bin/st2scheduler",
],
)
|
rtx3/deyun.io
|
smwds/api/models.py
|
Python
|
mit
| 8,469
| 0.002634
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, desc, func
from sqlalchemy.orm import backref
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from extensions import db, cache
from utils import get_current_time
from constants import USER, USER_ROLE, ADMIN, INACTIVE, USER_STATUS, \
SEX_TYPES, STRING_LEN
from sqlalchemy_utils import UUIDType, JSONType
from flask_sqlalchemy import Pagination
import uuid
class Masterdb(db.Model):
__tablename__ = 'masterdb'
def __repr__(self):
return '<Master APi %r>' % self.master_name
#id = Column(db.Integer, primary_key=True)
id = Column(UUIDType(binary=False), primary_key=True)
master_name = Column(db.String(STRING_LEN), nullable=False,
unique=True, index=True, info={'verbose_name': u'主机名', })
master_ip = Column(db.String(STRING_LEN), nullable=False,
unique=False, info={'verbose_name': u'主机IP', })
master_port = Column(db.String(STRING_LEN), nullable=False,
default="", info={'verbose_name': u'主机端口', })
master_api_url = Column(db.String(STRING_LEN), nullable=False, default="", info={
'verbose_name': u'主机API地址', })
master_api_port = Column(db.Integer, nullable=False, default=0, info={
'verbose_name': u'主机API端口', })
username = Column(db.String(STRING_LEN), nullable=False, default='salt')
password = Column(db.String(STRING_LEN), nullable=False, default='sugar')
#location = Column(db.String(STRING_LEN), nullable=False, default="")
location_id = Column(UUIDType(binary=False), db.ForeignKey(
'location.id'), nullable=False, default="", info={'verbose_name': u'提供商', })
location = db.relationship('Location', backref='masters')
bio = Column(db.Text, default="", info={'verbose_name': u'备注', })
ssh_key = Column(db.String(STRING_LEN))
create_at = Column(db.DateTime, nullable=False, default=get_current_time, info={
'verbose_name': u'创建时间', })
update_at = Column(db.DateTime, info={'verbose_name': u'更新时间', })
operator = Column(UUIDType(binary=False), nullable=True,
info={'verbose_name': u'Master', })
avatar = Column(db.String(STRING_LEN), nullable=False, default='')
token = Column(db.String(STRING_LEN), nullable=False, default='')
token_expire = Column(db.Float, nullable=False, default=0.0)
minion_data = Column(JSONType(1000), nullable=False, default='')
def ret_api(self):
#return self.master_api_url + ":" + str(self.master_api_port)
return self.master_api_url
@classmethod
def get_count(cls):
count_q = cls.query.statement.with_only_columns(
[func.count()]).order_by(None)
count = db.session.execute(count_q).scalar()
return count
@classmethod
def get_list(cls, page=1):
q = cls.query.order_by(cls.update_at.desc())
return cls.paginate(query=q, page=page)
@staticmethod
def paginate(query, page, per_page=20, error_out=False):
if error_out and page < 1:
abort(404)
items = query.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
# No need to count if we're on the first page and there are fewer
# items than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = query.order_by(None).count()
return Pagination(query, page, per_page, total, items)
'''
Tag = server role
'''
class Tag(db.Model):
__tablename__ = 'tag'
def __repr__(self):
return '<Tag %r>' % self.name
id = Column(db.Integer, primary_key=True)
node_id = Column(UUIDType(binary=False), db.ForeignKey('nodedb.id'))
node = db.relationship('Nodedb', backref='tags',foreign_keys="Tag.node_id")
name = Column(db.String(STRING_LEN), nullable=False,
default='', info={'verbose_name': u'名称', })
type = Column(db.String(STRING_LEN), nullable=False,
default='', info={'verbose_name': u'类型', })
url = Column(db.String(STRING_LEN), nullable=False, default='')
class Location(db.Model):
id = Column(UUIDType(binary=False), primary_key=True)
name = Column(db.String(STRING_LEN), nullable=False,
default='', info={'verbose_name': u'名称', })
type = Column(db.String(STRING_LEN), nullable=False,
default='', info={'verbose_name': u'类型', })
bandwidth = Column(db.String(STRING_LEN), nullable=False,
default='', info={'verbose_name': u'带宽', })
avatar = Column(db.String(STRING_LEN), nullable=False, default='')
address = Column(db.String(STRING_LEN), nullable=False,
default='', info={'verbose_name': u'网址', })
class Nodedb(db.Model):
__tablename__ = 'nodedb'
def __repr__(self):
return '<node %r>' % self.node_name
#id = Column(db.Integer, primary_key=True)
id = Column(UUIDType(binary=False), default=uuid.uuid4, primary_key=True)
node_name = Column(db.String(STRING_LEN), nullable=False,
unique=True, index=True, info={'verbose_name': u'Node名', })
#node_ip = Column(db.String(STRING_LEN), nullable=False,
# unique=False, info={'verbose_name': u'Node IP', })
node_ip = Column(JSONType(10000), nullable=False, default='')
node_port = Column(db.String(STRING_LEN), nullable=False,
default="", info={'verbose_name': u'Node 端口', })
username = Column(db.String(STRING_LEN), nullable=False, default='salt')
password = Column(db.String(STRING_LEN), nullable=False, default='sugar')
#location = Column(db.String(STRING_LEN), nullable=False, defau
|
lt="")
#location_id = Column(UUIDType(binary=False), db.ForeignKey(
# 'location.id'), nullable=False, default="", info={'verbose_name': u'提供商', })
#location = db.relationship('Location', backref='nodes')
bio = Column(db.Text, default="", info={'verbose_name': u'备注', })
ssh_key = Column(db.String(STRING_LEN))
create_at = Column(db.DateTime, nullable=False, default
|
=get_current_time, info={
'verbose_name': u'创建时间', })
update_at = Column(db.DateTime, info={'verbose_name': u'更新时间', })
master_id = Column(UUIDType(binary=False), db.ForeignKey(
'masterdb.id'), nullable=False, default="", info={'verbose_name': u'Master', })
master = db.relationship('Masterdb', backref='nodes')
avatar = Column(db.String(STRING_LEN), nullable=False, default='')
minion_data = Column(JSONType(10000), nullable=False, default='')
os = Column(db.String(STRING_LEN), nullable=False, default='')
kenel = Column(db.String(STRING_LEN), nullable=False, default='')
core = Column(db.Integer, nullable=False, default=0)
cpu = Column(db.String(STRING_LEN), nullable=False, default='')
mem = Column(db.String(STRING_LEN), nullable=False, default='')
host = Column(db.String(STRING_LEN), nullable=False, default='')
status = Column(db.String(STRING_LEN), nullable=False, default='')
@classmethod
def get_nodes(cls):
q = cls.query.with_entities(cls.node_name).all()
return q
@classmethod
def get_count(cls):
count_q = cls.query.statement.with_only_columns(
[func.count()]).order_by(None)
count = db.session.execute(count_q).scalar()
return count
@classmethod
def get_list(cls, page=1):
q = cls.query.order_by(cls.update_at.desc())
return cls.paginate(query=q, page=page)
@staticmethod
def paginate(query, page, per_page=20, error_out=False):
if error_out and page < 1:
abort(404)
items = query.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
# No need to count if we're on the first page and there are fewer
# items than we expected.
if page == 1 and len(item
|
dewtx29/python_ann
|
project/num/c++/testGraph.py
|
Python
|
gpl-3.0
| 277
| 0.021661
|
import matplotlib.pyplot as plt
import random
dotList = []
xMax = 100
yMax
|
= 100
for i in range (0,xMax):
y = random.random()*yMax
y = i * 2
bb = [y]
dotList.append(bb)
print
|
dotList
plt.plot(dotList, 'ro')
#plt.axis([0, xMax, 0, yMax])
plt.show()
|
5monkeys/reqlice
|
reqlice/requirement.py
|
Python
|
mit
| 926
| 0
|
from pip.req import parse_requirements as pip_parse_requirements
from pip.req import InstallRequirement
def is_pypi_requirement(requirement):
return r
|
equirement.req and not requirement.link
def parse_requirements(path_to_requirements):
""" Parse requirements
:param path_to_requirements: path/to/requirements.txt
:return: ['package name', ..]
"""
parsed_reqs = []
for requirement in pip_parse_requirements(path_to_requirements,
session=False):
|
if not is_pypi_requirement(requirement):
continue
parsed_reqs.append(requirement.req.project_name)
return parsed_reqs
def get_valid_pypi_requirement(line):
try:
requirement = InstallRequirement.from_line(line)
if not is_pypi_requirement(requirement):
raise ValueError
except ValueError:
return None
return requirement
|
OptimalPayments/Python_SDK
|
src/PythonNetBanxSDK/CardPayments/Pagination.py
|
Python
|
mit
| 1,081
| 0.007401
|
'''
Created on 17-Feb-2015
@author: Asawari.Vaidya
'''
from PythonNetBanxSDK.common.DomainObject import DomainObject
class Pagination(DomainObject):
'''
classdocs
'''
def __init__(self,obj):
'''
Constructor
'''
# Handler dictionary
handler = dict()
handler['limit'] = self.limit
handler['offset'] = self.offset
handler['startDate'] = self.startDate
handler['endDate'] = self.endDate
if obj is not None:
self.setProperties(obj, handler=handler)
else:
pass
'''
Proper
|
ty Limit
'''
def limit(self, limit):
self.__dict__['limit'] = limit
'''
Property Offset
'''
def offset(self, offset):
self.__dict__['offset'] = offset
'''
Property Start Date
'''
def startDate(self, startDate):
self.__dict__['startDate'] = startDate
'''
Property End Date
'''
def endDate(self, endDate):
self.__dict__['endDat
|
e'] = endDate
|
alfa-addon/addon
|
plugin.video.alfa/lib/cloudscraper/interpreters/native.py
|
Python
|
gpl-3.0
| 8,626
| 0.004521
|
from __future__ import absolute_import
import ast
import re
import operator as op
import pyparsing
from ..exceptions import CloudflareSolveError
from . import JavaScriptInterpreter
# ------------------------------------------------------------------------------- #
_OP_MAP = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Invert: op.neg,
}
# ------------------------------------------------------------------------------- #
class Calc(ast.NodeVisitor):
def visit_BinOp(self, node):
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
# ------------------------------------------------------------------------------- #
def visit_Num(self, node):
return node.n
# ------------------------------------------------------------------------------- #
def visit_Expr(self, node):
return self.visit(node.value)
# ------------------------------------------------------------------------------- #
@classmethod
def doMath(cls, expression):
tree = ast.parse(expression)
calc = cls()
return calc.visit(tree.body[0])
# ------------------------------------------------------------------------------- #
class Parentheses(object):
def fix(self, s):
res = []
self.visited = set([s])
self.dfs(s, self.invalid(s), res)
return res
# ------------------------------------------------------------------------------- #
def dfs(self, s, n, res):
if n == 0:
res.append(s)
return
for i in range(len(s)):
if s[i] in ['(', ')']:
s_new = s[:i] + s[i + 1:]
if s_new not in self.visited and self.invalid(s_new) < n:
self.visited.add(s_new)
self.dfs(s_new, self.invalid(s_new), res)
# ------------------------------------------------------------------------------- #
def invalid(self, s):
plus = minus = 0
memo = {"(": 1, ")": -1}
for c in s:
plus += memo.get(c, 0)
minus += 1 if plus < 0 else 0
plus = max(0, plus)
return plus + minus
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('native')
# ------------------------------------------------------------------------------- #
def eval(self, body, domain):
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv
}
# ------------------------------------------------------------------------------- #
def flatten(lists):
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
# ------------------------------------------------------------------------------- #
def jsfuckToNumber(jsFuck):
# "Clean Up" JSFuck
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
jsFuck = Parentheses().fix(jsFuck)[0]
# Hack
|
ery Parser for Math
stack = []
bstack = []
|
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
if i == '+':
stack.append(bstack)
bstack = []
continue
bstack.append(i)
stack.append(bstack)
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
# ------------------------------------------------------------------------------- #
def divisorMath(payload, needle, domain):
jsfuckMath = payload.split('/')
if needle in jsfuckMath[1]:
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
expression_value = operators[expression[1]](
float(jsfuckToNumber(expression[0])),
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
])]))
)
else:
expression_value = jsfuckToNumber(jsfuckMath[1])
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
return expression_value
# ------------------------------------------------------------------------------- #
def challengeSolve(body, domain):
jschl_answer = 0
try:
jsfuckChallenge = re.search(
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
body,
re.DOTALL | re.MULTILINE
).groupdict()
except AttributeError:
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
if kJSFUCK:
try:
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
try:
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
try:
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
kValues = {}
for m in r.finditer(body):
kValues[int(m.group('id'))] = m.group('jsfuck')
jsfuckChallenge['k'] = kValues[kJSFUCK]
except (AttributeError, IndexError):
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
jsfuckChallenge['challenge'] = re.finditer(
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
jsfuckChallenge['variable'],
jsfuckChallenge['variable']
),
jsfuckChallenge['challenge']
)
# ------------------------------------------------------------------------------- #
if '/' in jsfuckChallenge['init']:
val = jsfuckChallenge['init'].split('/')
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
else:
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
# ------------------------------------------------------------------------------- #
for expressionMatch in jsfuckChallenge['challenge']:
oper, expression = expressionMatch.groups()
if '/' in expression:
expression_value = divisorMath(expression, 'function(p)', domain)
else:
if 'Element' in expression:
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
else:
expression_value = jsfuckToNumber(expression)
jschl_answer = operators[oper](jschl_answer, expression_value)
# ------------------------------------------------------------------------------- #
# if not jsfuckChallenge['k'] and '+ t.length' in body:
|
rouxcode/django-cms-plugins
|
cmsplugins/headers/migrations/0001_initial.py
|
Python
|
mit
| 2,379
| 0.005885
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-26 08:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.image
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
('filer', '0006_auto_20160623_1627'),
]
operations = [
migrations.CreateModel(
name='Header',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='headers_header', serialize=False,
|
to='cms.CMSPlugin')),
('css_class', models.CharField(blank=True, default='', max_length=200, verbose_name='css class')),
('in_navigation', models.BooleanField(default=False, verbose_name='i
|
n navigation')),
('is_visible', models.BooleanField(default=True, verbose_name='visible')),
('height', models.CharField(blank=True, default='', max_length=100, verbose_name='height')),
('width', models.CharField(blank=True, default='', max_length=50, verbose_name='width')),
('name', models.CharField(blank=True, default='', max_length=150, verbose_name='title')),
('show_name', models.BooleanField(default=True, verbose_name='display title')),
('slug', models.SlugField(blank=True, default='', editable=False, max_length=150, verbose_name='slug')),
('abstract', models.TextField(blank=True, default='', max_length=250, verbose_name='abstract')),
('description', models.TextField(blank=True, default='', max_length=250, verbose_name='descrition')),
('text_position', models.CharField(blank=True, default='', max_length=100, verbose_name='text position')),
('text_color', models.CharField(blank=True, default='', max_length=100, verbose_name='text color')),
('image', filer.fields.image.FilerImageField(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='filer.Image', verbose_name='image')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
njr0/abouttag
|
abouttag/location.py
|
Python
|
mit
| 840
| 0
|
# -*- coding: utf-8 -*-
"""
abouttag.location
Standard about tags for URIs and URLs
Copyright 2010 AUTHORS (see AUTH
|
ORS file)
License: MIT, see LICENSE for more information
"""
from abouttag import about
def GEOnet(fli, fni, normalize=False, convention=u'geonet-1'):
"""Usage:
from abouttag.uri import GEOnet
normalURL = URI(-2601490, -3577649)
"""
assert convention.lower() == u'geonet-1'
return u'GEOnet%d_%d' % (fli, fni)
class TestGEOnet(about.AboutTestCase
|
):
def testFluidDBBadConvention(self):
self.assertRaises(AssertionError, GEOnet, 1, 2, convention='unknown')
def testFluidDBNormalize(self):
expected = (
((1, 2), u'GEOnet1_2'),
((-99999, -77777), u'GEOnet-99999_-77777')
)
self.vectorTest(expected, GEOnet)
|
mostateresnet/keyformproject
|
keyformproject/wsgi.py
|
Python
|
mit
| 405
| 0
|
"""
WSGI config for keyformproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this
|
file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "keyformproject.settings")
applica
|
tion = get_wsgi_application()
|
opendatateam/udata
|
udata/tests/test_uris.py
|
Python
|
agpl-3.0
| 8,266
| 0.000122
|
import pytest
from udata import uris
from udata.settings import Defaults
PUBLIC_HOSTS = [
'http://foo.com/blah_blah',
'http://foo.com/blah_blah/',
'http://foo.com/blah_blah_(wikipedia)',
'http://foo.com/blah_blah_(wikipedia)_(again)',
'http://www.example.com/wpstyle/?p=364',
'https://www.example.com/foo/?bar=baz&inga=42&quux',
'http://✪df.ws/123',
'http://➡.ws/䨹',
'http://⌘.ws',
'http://⌘.ws/',
'http://foo.com/blah_(wikipedia)#cite-1',
'http://foo.com/blah_(wikipedia)_blah#cite-1',
'http://foo.com/unicode_(✪)_in_parens',
'http://foo.com/(something)?after=parens',
'http://☺.damowmow.com/',
'http://code.google.com/events/#&product=browser',
'http://j.mp',
'ftp://foo.bar/baz',
'http://foo.bar/?q=Test%20URL-encoded%20stuff',
'http://مثال.إختبار.com',
'http://例子.测试.com',
'http://उदाहरण.परीक्षा.com',
'http://-.~_!$&\'()*+,;=:%40:80%2f::::::@example.com',
'http://1337.net',
'http://a.b-c.de',
'https://foo_bar.example.com/',
'ftps://foo.bar/',
'//foo.bar/',
]
PUBLIC_HOSTS_IDN = [
'http://例子.中国',
'http://somewhere.укр',
]
WITH_CREDENTIALS = [
'http://userid:password@example.com:8080',
'http://userid:password@example.com:8080/',
'http://userid@example.com',
'http://userid@example.com/',
'http://userid@example.com:8080',
'http://userid@example.com:8080/',
'http://userid:password@example.com',
'http://userid:password@example.com/',
]
PUBLIC_IPS = [
'http://142.42.1.1/',
'http://142.42.1.1:8080',
'http://142.42.1.1:8080/',
'http://223.255.255.254',
'http://[2a00:1450:4007:80e::2004]',
'http://[2a00:1450:4007:80e::2004]:8080',
'http://[2a00:1450:4007:80e::2004]:8080/',
]
PUBLIC = PUBLIC_HOSTS + PUBLIC_HOSTS_IDN + PUBLIC_IPS + WITH_CREDENTIALS
PRIVATE_IPS = [
'http://10.1.1.1',
'http://10.1.1.1:8080',
'http://10.1.1.1:8080/index.html',
'http://10.1.1.254',
'http://10.1.1.254:8080',
'http://10.1.1.254:8080/index.html',
'http://[fc00::1]',
'http://[fc00::1]:8080',
'http://[fc00::1]:8080/index.html',
]
PRIVATE = PRIVATE_IPS
LOCAL_HOSTS = [
'http://localhost',
'http://localhost:8080',
'http://localhost:8080/index.html',
'http://localhost.localdomain',
'http://localhost.localdomain:8080',
'http://localhost.localdomain:8080/index.html',
]
LOCAL_IPS = [
'http://127.0.0.1',
'http://127.0.0.1:8080',
'http://127.0.0.1:8080/index.html',
'http://127.0.1.1',
'http://127.0.1.1:8080',
'http://127.0.1.1:8080/index.html',
'http://[::1]',
'http://[::1]:8080',
'http://[::1]:8080/index.html',
]
LOCAL = LOCAL_HOSTS + LOCAL_IPS
MULTICAST = [
'http://224.1.1.1',
'http://224.1.1.1:8080',
'http://224.1.1.1:8080/index.html',
'http://[ff00::1]',
'http://[ff00::1]:8080',
'http://[ff00::1]:8080/index.html',
]
INVALID = [
'http://',
'h/://.',
'http://..',
'http://../',
'http://?',
'http://??',
'http://??/',
'http://#',
'http://##',
'http://##/',
'http://foo.bar?q=Spaces should be encoded',
'http://foo.bar?q=Spaces should be encoded with unicode é',
'//',
'//a',
'///a',
'///',
'http:///a',
'foo.com',
'rdar://1234',
'h://test',
'http:// shouldfail.com',
':// should fail',
'http://foo.bar/foo(bar)baz quux',
'http://-error-.invalid/',
'http://_error_.invalid/',
'http://a.b--c.de/',
'http://-a.b.co',
'http://a.b-.co',
'http://0.0.0.0',
'http://10.1.1.0',
'http://10.1.1.255',
'http://1.1.1.1.1',
'http://123.123.123',
'http://3628126748',
'http://.www.foo.bar/',
'http://www.foo.bar./',
'http://.www.foo.bar./',
'http://[fffff:1450:4007:80e::2004]',
'http://[fffff:1450:4007:80e::2004]:8080',
'http://[fffff:1450:4007:80e::2004]:8080/index.html',
'http://[::]',
'http://[::]:8080',
'http://[::]:8080/index.html',
]
DEFAULT_SCHEMES = Defaults.URLS_ALLOWED_SCHEMES
# Custom schemes not in uris.SCHEMES
CUSTOM_SCHEMES = ['irc', 'unknown']
# Extract some default TLDs
DEFAULT_TLDS = list(Defaults.URLS_ALLOWED_TLDS)[:2]
# Custom TLDs not in IANA official list
CUSTOM_TLDS = ['i2', 'unknown']
def test_validate_strip_url():
assert uris.validate(' http://somewhere.com ') == 'http://somewhere.com'
@pytest.mark.parametrize('url', PUBLIC_HOSTS)
def test_default_should_validate_public_urls(url):
assert uris.validate(url) == url
@pytest.mark.parametrize('url', PUBLIC_HOSTS_IDN)
def test_default_should_validate_public_urls_with_utf8_tld(url):
assert uris.validate(url) == url
@pytest.mark.parametrize('url', PUBLIC_IPS)
def test_default_should_validate_public_ips(url):
assert uris.validate(url) == url
@pytest.mark.parametrize('scheme', DEFAULT_SCHEMES)
def test_default_should_validate_default_schemes(scheme):
url = '{0}://somewhere.com'.format(scheme)
assert uris.validate(url) == url
@pytest.mark.parametrize('scheme', CUSTOM_SCHEMES)
def test_default_should_not_validate_non_default_schemes(scheme):
url = '{0}://somewhere.com'.format(scheme)
with pytest.raises(uris.ValidationError):
uris.validate(url)
@pytest.mark.parametrize('tld', CUSTOM_TLDS)
def test_default_should_not_validate_unknown_tlds(tld):
url = 'http://somewhere.{0}'.format(tld)
with pytest.raises(uris.ValidationError):
uris.validate(url)
@pytest.mark.parametrize('url', PRIVATE)
def test_default_should_not_validate_private_urls(url):
with pytest.raises(uris.ValidationError):
uris.validate(url)
@pytest.mark.parametrize('url', LOCAL_HOSTS)
def test_default_should_not_validate_local_hosts(url):
with pytest.raises(uris.ValidationError):
uris.validate(url)
@pytest.mark.parametrize('url', INVALID)
def test_should_not_validate_bad_urls(url):
with pytest.raises(uris.ValidationError):
uris.validate(url)
@pytest.mark.parametrize('url', MULTICAST)
def test_should_not_validate_multicast_urls(url):
with pytest.raises(uris.ValidationError):
uris.validate(url)
@pytest.mark.parametrize('url', PUBLIC + PRIVATE)
def test_private_should_validate_public_and_private_urls(url):
assert uris.validate(url, private=True) == url
@pytest.mark.parametrize('url', LOCAL)
def test_private_should_not_validate_local_urls(url):
with pytest.raises(uris.ValidationError):
uris.validate(url, private=True)
@pytest.mark.parametrize('url', PUBLIC + LOCAL)
def test_local_should_validate_public_and_local_urls(url):
assert uris.validate(url, local=True) == url
@pytest.mark.parametrize('url', PRIVATE)
def test_local_should_not_validate_private_urls(url):
with pytest.raises(uris.ValidationError):
uris.validate(url, local=True)
@pytest.mark.parametrize('url', PUBLIC + LOCAL + PRIVATE)
def test_private_local_should_validate_any_valid_urls(url):
assert uris.validate(url, local=True, private=True) == url
@pytest.mark.parametrize('scheme', CUSTOM_SCHEMES)
def test_custom_schemes(scheme):
url = '{0}://somewhere.com'.format(scheme)
assert uris.validate(url, schemes=CUSTOM_SCHEMES) == url
@pytest.mark.parametrize('scheme', DEFAULT_SCHEMES)
def test_custom_schemes_should_not_validate_defaults(scheme):
url = '{0}://somewhere.com'.format(scheme)
with pytest.raises(uris.ValidationError):
uris.validate(url, schemes=CUSTOM_SCHEMES)
@pytest.mark.parametrize('tld', CUSTOM_TLDS)
def test_custom_tlds(tld):
url = 'http://somewhere.{0}'.format(tld)
assert uris.validate(url, tlds=CUSTOM_TLDS) == url
@pytest.mark.parametrize('tld', DEFAULT_TLDS)
def test_custo
|
m_tlds_should_not_validate_defaults(tl
|
d):
url = 'http://somewhere.{0}'.format(tld)
with pytest.raises(uris.ValidationError):
uris.validate(url, tlds=CUSTOM_TLDS)
@pytest.mark.parametrize('url', WITH_CREDENTIALS)
def test_with_credentials(url):
assert uris.validate(url) == url
@pytest.mark.parametrize('url', WITH_CREDENTIALS)
def test_with_credentials_disabled(url):
with pytest.raises(uris.ValidationError):
uris.validate(url, cred
|
Xobb/fabric-bolt
|
src/fabric_bolt/accounts/tables.py
|
Python
|
mit
| 1,653
| 0.005445
|
"""
Tables for the account app
"""
from django.contrib.auth import get_user_model
import django_tables2 as tables
from fabric_bolt.core.mixins.tables import ActionsColumn, PaginateTable
class UserListTable(Pa
|
ginateTable):
"""
Table for displaying users.
"""
actions = ActionsColumn([
{'title': '<i class="glyphicon glyphicon-file"></i>', 'url': 'accounts_user_view', 'args': [tables.A('pk')],
'attrs':{'data-toggle': 'tooltip', 'title': 'View User', 'data-delay': '{ "show": 300, "hide": 0 }'}},
{'title': '<i class="glyphicon glyphicon-pencil"></i>', 'url': 'accounts_user_change', 'args': [tables.A('pk')],
'attrs':{'data-toggle': 'tooltip', 'title': 'Edit Use
|
r', 'data-delay': '{ "show": 300, "hide": 0 }'}},
{'title': '<i class="glyphicon glyphicon-trash"></i>', 'url': 'accounts_user_delete', 'args': [tables.A('pk')],
'attrs':{'data-toggle': 'tooltip', 'title': 'Delete User', 'data-delay': '{ "show": 300, "hide": 0 }', 'class': 'js-delete'}},
], delimiter='   ')
email = tables.Column(verbose_name='Email')
first_name = tables.Column(verbose_name='First Name')
last_name = tables.Column(verbose_name='Last Name')
user_level = tables.Column(verbose_name='User Level', accessor='group_strigify', order_by='groups')
class Meta:
model = get_user_model()
sequence = fields = ('first_name', 'last_name', 'is_active', 'email', 'user_level', )
attrs = {'class': 'table table-striped table-bordered table-hover'}
def __init__(self, *args, **kwargs):
super(UserListTable, self).__init__(*args, **kwargs)
|
nop33/indico
|
indico/core/db/sqlalchemy/custom/greatest.py
|
Python
|
gpl-3.0
| 1,227
| 0.000815
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Found
|
ation; either version 3 of the
# License, or (at your option) any la
|
ter version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.functions import FunctionElement
class greatest(FunctionElement):
name = 'greatest'
@compiles(greatest)
def _greatest_default(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'postgresql')
def _greatest_case(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return 'CASE WHEN {0} > {1} THEN {0} ELSE {1} END'.format(compiler.process(arg1), compiler.process(arg2))
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
|
PyFoam/RunDictionary/MeshInformation.py
|
Python
|
gpl-2.0
| 2,537
| 0.016161
|
"""Gets information about the mesh of a case. Makes no attempt to manipulate
the mesh, because this is better left to the OpenFOAM-utilities"""
from PyFoam.RunDictionary.SolutionDirectory import SolutionDirectory
from PyFoam.RunDictionary.ListFile import ListFile
from PyFoam.Error import PyFoamException
from PyFoam.RunDictionary.ParsedParameterFile import ParsedFileHea
|
der
from os import path
import re
class MeshInformation:
"""Reads Information about the mesh on demand"""
def __init__(self,
case,
time="constant",
processor=None,
region=None):
""":param case: Path to t
|
he case-directory
:param time: Time for which the mesh should be looked at
:param processor: Name of the processor directory for decomposed cases"""
self.sol=SolutionDirectory(case,paraviewLink=False,archive=None,region=region)
self.time=time
self.processor=processor
def nrOfFaces(self):
try:
return self.faces
except AttributeError:
try:
faces=ListFile(self.sol.polyMeshDir(time=self.time,processor=self.processor),"faces")
self.faces=faces.getSize()
except IOError:
faces=ListFile(self.sol.polyMeshDir(processor=self.processor),"faces")
self.faces=faces.getSize()
return self.faces
def nrOfPoints(self):
try:
return self.points
except AttributeError:
try:
points=ListFile(self.sol.polyMeshDir(time=self.time,processor=self.processor),"points")
self.points=points.getSize()
except IOError:
points=ListFile(self.sol.polyMeshDir(processor=self.processor),"points")
self.points=points.getSize()
return self.points
def nrOfCells(self):
try:
return self.cells
except:
try:
try:
owner=ParsedFileHeader(path.join(self.sol.polyMeshDir(time=self.time,processor=self.processor),"owner"))
except IOError:
owner=ParsedFileHeader(path.join(self.sol.polyMeshDir(processor=self.processor),"owner"))
mat=re.compile('.+nCells: *([0-9]+) .+').match(owner["note"])
self.cells=int(mat.group(1))
return self.cells
except:
raise PyFoamException("Not Implemented")
|
gebn/nibble
|
nibble/information.py
|
Python
|
mit
| 13,259
| 0.000754
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
from collections import OrderedDict
from decimal import Decimal
import re
import math
import six
from nibble import util, decorators
@decorators.python_2_div_compatible
@decorators.python_2_nonzero_compatible
@six.python_2_unicode_compatible
class Information(object):
"""
Represents a quantity of digital information as a number of bits.
"""
# this is deliberately lax with the number to provide a more helpful error
# message
_PARSE_REGEX = re.compile(r'([\d\\.]+)(?: +)?(\w+)')
BITS = 1
NIBBLES = 4
BYTES = 8
# binary, bytes
KIBIBYTES = BYTES * 1024
MEBIBYTES = BYTES * 1024 ** 2
GIBIBYTES = BYTES * 1024 ** 3
TEBIBYTES = BYTES * 1024 ** 4
PEBIBYTES = BYTES * 1024 ** 5
EXBIBYTES = BYTES * 1024 ** 6
ZEBIBYTES = BYTES * 1024 ** 7
YOBIBYTES = BYTES * 1024 ** 8
# decimal, bytes
KILOBYTES = BYTES * 1000
MEGABYTES = BYTES * 1000 ** 2
GIGABYTES = BYTES * 1000 ** 3
TERABYTES = BYTES * 1000 ** 4
PETABYTES = BYTES * 1000 ** 5
EXABYTES = BYTES * 1000 ** 6
ZETTABYTES = BYTES * 1000 ** 7
YOTTABYTES = BYTES * 1000 ** 8
# binary, bits
KIBIBITS = 1024
MEBIBITS = 1024 ** 2
GIBIBITS = 1024 ** 3
TEBIBITS = 1024 ** 4
PEBIBITS = 1024 ** 5
EXBIBITS = 1024 ** 6
ZEBIBITS = 1024 ** 7
YOBIBITS = 1024 ** 8
# decimal, bits
KILOBITS = 1000
MEGABITS = 1000 ** 2
GIGABITS = 1000 ** 3
TERABITS = 1000 ** 4
PETABITS = 1000 ** 5
EXABITS = 1000 ** 6
ZETTABITS = 1000 ** 7
YOTTABITS = 1000 ** 8
_SYMBOLS = {
'b': 1,
'bits': 1,
'B': BYTES,
'bytes': BYTES,
'N': NIBBLES,
'n': NIBBLES,
'nibbles': NIBBLES,
'K': KIBIBYTES,
'KiB': KIBIBYTES,
'kiB': KIBIBYTES,
'kibibytes': KIBIBYTES,
'M': MEBIBYTES,
'MiB': MEBIBYTES,
'miB': MEBIBYTES,
'mebibytes': MEBIBYTES,
'G': GIBIBYTES,
'GiB': GIBIBYTES,
'giB': GIBIBYTES,
'gibibytes': GIBIBYTES,
'T': TEBIBYTES,
'TiB': TEBIBYTES,
'tiB': TEBIBYTES,
'tebibytes': TEBIBYTES,
'P': PEBIBYTES,
'PiB': PEBIBYTES,
'piB': PEBIBYTES,
'pebibytes': PEBIBYTES,
'E': EXBIBYTES,
'EiB': EXBIBYTES,
'eiB': EXBIBYTES,
'exbibytes': EXBIBYTES,
'Z': ZEBIBYTES,
'ZiB': ZEBIBYTES,
'ziB': ZEBIBYTES,
'zebibytes': ZEBIBYTES,
'Y': YOBIBYTES,
'YiB': YOBIBYTES,
'yiB': YOBIBYTES,
'yobibytes': YOBIBYTES,
'KB': KILOBYTES,
'kB': KILOBYTES,
'kilobytes': KILOBYTES,
'MB': MEGABYTES,
'mB': MEGABYTES,
'megabytes': MEGABYTES,
'GB': GIGABYTES,
'gB': GIGABYTES,
'gigabytes': GIGABYTES,
'TB': TERABYTES,
'tB': TERABYTES,
'terabytes': TERABYTES,
'PB': PETABYTES,
'pB': PETABYTES,
'petabytes': PETABYTES,
'EB': EXABYTES,
'eB': EXABYTES,
'exabytes': EXABYTES,
'ZB': ZETTABYTES,
'zB': ZETTABYTES,
'zettabytes': ZETTABYTES,
'YB': YOTTABYTES,
'yB': YOTTABYTES,
'yottabytes': YOTTABYTES,
'Kib': KIBIBITS,
'kib': KIBIBITS,
'kibibits': KIBIBITS,
'Mib': MEBIBITS,
'mib': MEBIBITS,
'mebibits': MEBIBITS,
'Gib': GIBIBITS,
'gib': GIBIBITS,
'gibibits': GIBIBITS,
'Tib': TEBIBITS,
'tib': TEBIBITS,
'tebibits': TEBIBITS,
'Pib': PEBIBITS,
'pib': PEBIBITS,
'pebibits': PEBIBITS,
'Eib': EXBIBITS,
'eib': EXBIBITS,
'exbibits': EXBIBITS,
'Zib': ZEBIBITS,
'zib': ZEBIBITS,
'zebibits': ZEBIBITS,
'Yib': YOBIBITS,
'yib': YOBIBITS,
'yobibits': YOBIBITS,
'Kb': KILOBITS,
'kb': KILOBITS,
'kilobits': KILOBITS,
'Mb': MEGABITS,
'mb': MEGABITS,
'megabits': MEGABITS,
'Gb': GIGABITS,
'gb': GIGABITS,
'gigabits': GIGABITS,
'Tb': TERABITS,
'tb': TERABITS,
'terabits': TERABITS,
'Pb': PETABITS,
'pb': PETABITS,
'petabits': PETABITS,
'Eb': EXABITS,
'eb': EXABITS,
'exabits': EXABITS,
'Zb': ZETTABITS,
'zb': ZETTABITS,
'zettabits': ZETTABITS,
'Yb': YOTTABITS,
'yb': YOTTABITS,
'yottabits': YOTTABITS
}
BINARY_BITS = ['Yib', 'Zib', 'Eib', 'Pib', 'Tib', 'Gib', 'Mib', 'Kib', 'b']
BINARY_BYTES = ['YiB', 'ZiB', 'EiB', 'PiB', 'TiB', 'GiB', 'MiB', 'KiB', 'B']
DECIMAL_BITS = ['Yb', 'Zb', 'Eb', 'Pb', 'Tb', 'Gb', 'Mb', 'Kb', 'b']
DECIMAL_BYTES = ['YB', 'ZB', 'EB', 'PB', 'TB', 'GB', 'MB', 'KB', 'B']
_CATEGORY_MAPS = {
'bB': BINARY_BYTES,
'dB': DECIMAL_BYTES,
'bb': BINARY_BITS,
'db': DECIMAL_BITS
}
def __init__(self, quantity, unit=BITS):
"""
Initialise a new information object.
:param quantity: The number of the unit.
:param unit: The size of the unit in bits, e.g. MiB = 8388608 bits.
Defaults to bits.
"""
bits = quantity * unit
if isinstance(bits, float):
bits = int(math.ceil(bits))
self.bits = bits
@classmethod
def from_quantity_unit(cls, quantity, unit):
"""
Initialise a new information object from a quantity and unit string.
:param quantity: The number of the unit.
:param unit: The unit as a string, e.g. 'MiB' or 'mebibytes'.
:return: An `Information` object representing the quantity and unit.
"""
return Information(quantity, cls._SYMBOLS[unit])
@classmethod
def is_valid_symbol(cls, s
|
ymbol):
"""
Find whether a symbol is a valid unit of information.
:param symbol: The symbol to check.
:return: True if the symbol is a valid unit, false otherwise.
"""
return symbol in cls._SYMBOLS
@classmethod
def is_valid_category(cls, category):
"""
Find whether a category is valid.
:param category: The category to check.
:return: Tru
|
e if the category is valid, false otherwise.
"""
return category in cls._CATEGORY_MAPS
def at_speed(self, speed):
"""
Find how long it would take to process this amount of data at a certain
speed.
:param speed: The speed of processing.
:return: The time taken as a `datetime.timedelta`.
"""
from nibble import Duration
scale = self.bits / speed.information.bits
return Duration(seconds=speed.duration.total_seconds() * scale)
def in_duration(self, duration):
"""
Find the speed of processing if this quantity of information is
processed in a given time.
:param duration: The time taken to process this amount of data.
:return: The speed of the processing.
"""
from nibble import Speed
return Speed(self, duration)
@classmethod
def parse(cls, string):
"""
Get an object representing an information string, e.g. "12TiB" or "9 n".
:param string: The information string.
:return: The parsed quantity of information.
:raises ValueError: If the string could not be parsed. Check the message
for the reason why.
"""
result = cls._PARSE_REGEX.match(string.strip())
if not result:
raise ValueError(
'Unable to parse information string: {0}'.format(string))
quantity_str = result.group(1)
try:
quantity = float(quantity_str)
if quantity.is_integer():
quantity = int(quantity)
except ValueError:
raise ValueError(
'Unable to parse quantity number: {0}'.format(quantity_str))
unit_str = resul
|
arskom/JsQt
|
src/jsqt/il/qt/gui.py
|
Python
|
gpl-2.0
| 7,372
| 0.003798
|
# encoding: utf8
#
# This file is part of JsQt.
#
# Copyright (C) Arskom Ltd. www.arskom.com.tr
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import jsqt
import obj
from jsqt import il
from base import SimpleProp
from jsqt.xml import etree
class MGeometryProperties(object):
hor_stretch_pol = "Expanding"
ver_stretch_pol = "Expanding"
hor_stretch_coef = 1
ver_stretch_coef = 1
def __init__(self):
self.__margin_t = etree.fromstring("<number>1</number>")
self.__margin_b = etree.fromstring("<number>1</number>")
self.__margin_l = etree.fromstring("<number>1</number>")
self.__margin_r = etree.fromstring("<number>1</number>")
self.__margin = etree.fromstring("<number>1</number>")
self.__min_width = etree.fromstring("<number>0</number>")
self.__min_height = etree.fromstring("<number>0</number>")
def get_geometry_top(self):
if "geometry.y" in self.simple_prop_data:
return int(self.simple_prop_data["geometry.y"].text)
else:
return 0
geometry_top = property(get_geometry_top)
def get_geometry_left(self):
if "geometry.x" in self.simple_prop_data:
return int(self.simple_prop_data["geometry.x"].text)
else:
return 0
geometry_left = property(get_geometry_left)
def __handle_size_policy(self, elt):
if elt[0].tag == 'sizepolicy':
tmp = self._decode_nested_prop(elt[0])
self.hor_stretch_pol = elt[0].attrib['hsizetype']
self.hor_stretch_coef = int(tmp['horstretch'].text)
self.ver_stretch_pol = elt[0].attrib['vsizetype']
self.ver_stretch_coef = int(tmp['verstretch'].text)
if not self.hor_stretch_pol in ("Fixed", "Minimum"):
if self.hor_stretch_coef == 0:
self.hor_stretch_coef = 1
if not self.ver_stretch_pol in ("Fixed", "Minimum"):
if self.ver_stretch_coef == 0:
self.ver_stretch_coef = 1
else:
jsqt.debug_print("\t\t", "WARNING: property 'sizePolicy' doesn't "
"have a 'sizepolicy' tag")
#
# hacking around qooxdoo bug 3075
# http://bugzilla.qooxdoo.org/show_bug.cgi?id=3075
#
def __handle_minimum_size(self, elt):
if elt[0].tag == 'size':
|
tmp = {}
for e in elt[0]:
tmp[e.tag]=e
self.__min_width = tmp['width']
self.__min_height = tmp['height']
|
self.simple_prop_data['geometry.width'] = self.__min_width
self.simple_prop_data['geometry.height'] = self.__min_height
else:
jsqt.debug_print("\t\t", "WARNING: property 'minimumSize' doesn't "
"have a 'size' tag")
def _compile_geometry(self, dialect, ret):
if not self._compile_simple_prop(SimpleProp("setMargin", il.primitive.DecimalInteger, 0), self.__margin):
self._compile_simple_prop(SimpleProp("setMarginTop", il.primitive.DecimalInteger, 0), self.__margin_t)
self._compile_simple_prop(SimpleProp("setMarginLeft", il.primitive.DecimalInteger, 0), self.__margin_l)
self._compile_simple_prop(SimpleProp("setMarginRight", il.primitive.DecimalInteger, 0), self.__margin_r)
self._compile_simple_prop(SimpleProp("setMarginBottom", il.primitive.DecimalInteger, 0), self.__margin_b)
self._compile_simple_prop(SimpleProp("setMinWidth", il.primitive.DecimalInteger, 0), self.__min_width)
self._compile_simple_prop(SimpleProp("setMinHeight", il.primitive.DecimalInteger, 0), self.__min_height)
xml_false = etree.fromstring("<bool>false</bool>")
if self.hor_stretch_pol == "Fixed":
self._compile_simple_prop(SimpleProp("setAllowGrowX",
il.primitive.Boolean), xml_false)
if self.ver_stretch_pol == "Fixed":
self._compile_simple_prop(SimpleProp("setAllowGrowY",
il.primitive.Boolean), xml_false)
known_simple_props = {
"geometry": {
"x": SimpleProp("", il.primitive.DecimalInteger, 0),
"y": SimpleProp("", il.primitive.DecimalInteger, 0),
"width": SimpleProp("setWidth", il.primitive.DecimalInteger, 0),
"height": SimpleProp("setHeight", il.primitive.DecimalInteger, 0),
},
"maximumSize": {
"width": SimpleProp("setMaxWidth", il.primitive.DecimalInteger,
16777215),
"height": SimpleProp("setMaxHeight", il.primitive.DecimalInteger,
16777215),
}
}
known_complex_props = {
"sizePolicy": __handle_size_policy,
"minimumSize": __handle_minimum_size,
}
class WidgetBase(obj.Base, MGeometryProperties):
def __init__(self, elt, name=None):
MGeometryProperties.__init__(self)
obj.Base.__init__(self, elt, name)
def compile(self, dialect, ret):
obj.Base.compile(self, dialect, ret)
self._compile_geometry(dialect, ret)
def _decode_nested_prop(self, elt):
retval = {}
for e in elt:
retval[e.tag] = e
return retval
@staticmethod
def get_class(class_name):
if class_name in widget_dict:
return widget_dict[class_name]
elif class_name in layout_dict:
return layout_dict[class_name]
elif class_name in custom_dict:
return custom_dict[class_name]
else:
return Stub
@staticmethod
def get_instance(elt):
if elt.tag == 'spacer':
class_name = 'Spacer'
else:
class_name = elt.attrib['class']
return WidgetBase.get_class(class_name)(elt)
class Stub(WidgetBase):
real = False
def __init__(self, elt, name=None):
WidgetBase.__init__(self, elt, name)
self.class_name = elt.attrib['class']
def compile(self, dialect, ret=None):
ret.ctor.add_statement(
il.primitive.Comment("The instance named '%s' is of type '%s' which"
" is not supported (yet?)" % (self.name, self.class_name)))
class QSpacer(WidgetBase):
type = "qx.ui.core.Spacer"
known_simple_props = {
"sizeHint": {
"width": SimpleProp("setWidth", il.primitive.DecimalInteger, 0),
"height": SimpleProp("setHeight", il.primitive.DecimalInteger, 0),
},
}
|
nmiranda/dedupe
|
dedupe/datamodel.py
|
Python
|
mit
| 4,180
| 0.010048
|
import pkgutil
from collections import OrderedDict
import dedupe.variables
import dedupe.variables.base as base
from dedupe.variables.base import MissingDataType
from dedupe.variables.interaction import InteractionType
for _, module, _ in pkgutil.iter_modules(dedupe.variables.__path__,
'dedupe.variables.') :
__import__(module)
FIELD_CLASSES = dict(base.allSubclasses(base.FieldType))
class DataModel(dict) :
def __init__(self, fields):
self['bias'] = 0
primary_fields, data_model = typifyFields(fields)
self.derived_start = len(data_model)
data_model += interactions(fields, primary_fields)
data_model += missing(data_model)
self['fields'] = data_model
self.n_fields = len(self['fields'])
self.primary_fields = primary_fields
# Changing this from a property to just a normal attribute causes
# pickling problems, because we are removing static methods from
# their class context. This could be fixed by defining comparators
# outside of classes in fieldclasses
@property
def field_comparators(self) :
start = 0
stop = 0
comparators = []
for field in self.primary_fields :
stop = start + len(field)
comparators.append((field.field, field.comparator, start, stop))
start = stop
return comparators
@property
def missing_field_indices(self) :
return [i for i, definition
in enumerate(self['fields'])
if definition.has_missing]
@property
def interactions(self) :
indices = []
fields = self['fields']
field_names = [field.name for field in fields]
for definition in fields :
if hasattr(definition, 'interaction_fields') :
interaction_indices = []
for interaction_field in definition.interaction_fields :
interaction_indices.append(field_n
|
ames.index(interaction_field))
indices.append(interaction_indices)
return indices
def typifyFields(fields) :
primary_fields = []
data_model = []
for definition in fields :
try :
field_type = definition['type']
except TypeError :
raise TypeError("Incorrect field specification: field "
"specifications are dictionaries that must "
|
"include a type definition, ex. "
"{'field' : 'Phone', type: 'String'}")
except KeyError :
raise KeyError("Missing field type: fields "
"specifications are dictionaries that must "
"include a type definition, ex. "
"{'field' : 'Phone', type: 'String'}")
if field_type == 'Interaction' :
continue
try :
field_class = FIELD_CLASSES[field_type]
except KeyError :
raise KeyError("Field type %s not valid. Valid types include %s"
% (definition['type'], ', '.join(FIELD_CLASSES)))
field_object = field_class(definition)
primary_fields.append(field_object)
if hasattr(field_object, 'higher_vars') :
data_model.extend(field_object.higher_vars)
else :
data_model.append(field_object)
return primary_fields, data_model
def missing(data_model) :
missing_variables = []
for definition in data_model[:] :
if definition.has_missing :
missing_variables.append(MissingDataType(definition.name))
return missing_variables
def interactions(definitions, primary_fields) :
field_d = {field.name : field for field in primary_fields}
interaction_class = InteractionType
interactions = []
for definition in definitions :
if definition['type'] == 'Interaction' :
field = interaction_class(definition)
field.expandInteractions(field_d)
interactions.extend(field.higher_vars)
return interactions
|
ephracis/hermes
|
utilities/lists.py
|
Python
|
mit
| 1,117
| 0.048344
|
""" This file contains code for working on lists and dictionaries. """
|
def moreThanOne(dict, key):
""" Checks if a key in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
key -- the key
Returns:
True if
|
the key exists in the dictionary and the value is at least one, otherwise false
"""
return key in dict and dict[key] > 0
def anyMoreThanOne(dict, keys):
""" Checks if any of a list of keys in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
keys -- the keys
Returns:
True if any key exists in the dictionary and the value is at least one, otherwise false
"""
for key in keys:
if key in dict and dict[key] > 0:
return True
return False
def makeUnique(list):
""" Removes duplicates from a list. """
u = []
for l in list:
if not l in u:
u.append(l)
return u
def alphabetical(lst):
""" Sorts a list of tuples in reverse alphabetical order by the first key
in the tuple.
Arguments:
lst -- the list to sort
Returns:
the sorted list
"""
return list(reversed(sorted(lst, key=lambda x: x[0])))
|
iglpdc/nipype
|
nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py
|
Python
|
bsd-3-clause
| 1,546
| 0.021992
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import WarpPointsToStd
def test_WarpPointsToStd_inputs():
input_map = dict(args=dict(argstr='%s',
),
coord_mm=dict(argstr='-mm',
xor=['coord_vox'],
),
coord_vox=dict(argstr='-vox',
xor=['coord_mm'],
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
img_file=dict(argstr='-img %s',
mandatory=True,
),
in_coords=dict(argstr='%s',
mandatory=True,
position=-1,
),
|
out_file=dict(name_source='in_coords',
|
name_template='%s_warped',
output_name='out_file',
),
premat_file=dict(argstr='-premat %s',
),
std_file=dict(argstr='-std %s',
mandatory=True,
),
terminal_output=dict(nohash=True,
),
warp_file=dict(argstr='-warp %s',
xor=['xfm_file'],
),
xfm_file=dict(argstr='-xfm %s',
xor=['warp_file'],
),
)
inputs = WarpPointsToStd.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_WarpPointsToStd_outputs():
output_map = dict(out_file=dict(),
)
outputs = WarpPointsToStd.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
zitryss/Design-Patterns-in-Python
|
behavioral/null_object.py
|
Python
|
mit
| 965
| 0
|
"""
Encapsulate the absence of an object by providing a substitutable
alternative that offers suitable default do nothing behavior.
"""
import abc
class AbstractObject(metaclass=abc.ABCMeta):
"""
Declare the interface for Client's collaborator.
Implement default behavior for the interface common to all classes,
as appropriate.
"""
@abc.abstractmethod
def request(self):
pass
class RealObject(AbstractObject):
"""
Define a concrete subclass of AbstractObject whose instances provide
useful behavior that Client expects.
"""
|
def request(self):
pass
class NullObject(AbstractObject):
"""
Provide an interface identical to AbstractObject's so that a null
object can be substituted for a real object.
Implement its interface to do nothing. What exactly it means to do
nothing depends on what sort of behavior Client is expecting.
"""
def request(self)
|
:
pass
|
mit-ll/python-keylime
|
keylime/revocation_notifier.py
|
Python
|
bsd-2-clause
| 5,242
| 0.001145
|
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
from multiprocessing import Process
import threading
import functools
import time
import os
import sys
import signal
import simplejson as json
import zmq
from keylime import config
from keylime import crypto
from keylime import keylime_logging
from keylime import secure_mount
logger = keylime_logging.init_logging('revocation_notifier')
broker_proc = None
def start_broker():
def worker():
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
frontend.bind("ipc:///tmp/keylime.verifier.ipc")
frontend.setsockopt(zmq.SUBSCRIBE, b'')
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind(
f"tcp://{config.get('cloud_verifier', 'revocation_notifier_ip')}:"
f"{config.getint('cloud_verifier', 'revocation_notifier_port')}"
)
zmq.device(zmq.FORWARDER,
|
frontend, backend)
global broker_proc
broker_proc = Process(target=worker)
broker_proc.start()
def stop_broker():
global broker_proc
if broker_proc is not None:
# Remove the socket file before we kill the process
if os.path.exists("/tmp/keylime
|
.verifier.ipc"):
os.remove("/tmp/keylime.verifier.ipc")
os.kill(broker_proc.pid, signal.SIGKILL)
def notify(tosend):
def worker(tosend):
context = zmq.Context()
mysock = context.socket(zmq.PUB)
mysock.connect("ipc:///tmp/keylime.verifier.ipc")
# wait 100ms for connect to happen
time.sleep(0.2)
# now send it out via 0mq
logger.info("Sending revocation event to listening nodes...")
for i in range(config.getint('cloud_verifier', 'max_retries')):
try:
mysock.send_string(json.dumps(tosend))
break
except Exception as e:
logger.debug("Unable to publish revocation message %d times, trying again in %f seconds: %s" % (
i, config.getfloat('cloud_verifier', 'retry_interval'), e))
time.sleep(config.getfloat('cloud_verifier', 'retry_interval'))
mysock.close()
cb = functools.partial(worker, tosend)
t = threading.Thread(target=cb)
t.start()
cert_key = None
def await_notifications(callback, revocation_cert_path):
global cert_key
if revocation_cert_path is None:
raise Exception("must specify revocation_cert_path")
context = zmq.Context()
mysock = context.socket(zmq.SUB)
mysock.setsockopt(zmq.SUBSCRIBE, b'')
mysock.connect(
f"tcp://{config.get('general', 'receive_revocation_ip')}:"
f"{config.getint('general', 'receive_revocation_port')}"
)
logger.info('Waiting for revocation messages on 0mq %s:%s' %
(config.get('general', 'receive_revocation_ip'), config.getint('general', 'receive_revocation_port')))
while True:
rawbody = mysock.recv()
body = json.loads(rawbody)
if cert_key is None:
# load up the CV signing public key
if revocation_cert_path is not None and os.path.exists(revocation_cert_path):
logger.info(
"Lazy loading the revocation certificate from %s" % revocation_cert_path)
with open(revocation_cert_path) as f:
certpem = f.read()
cert_key = crypto.x509_import_pubkey(certpem)
if cert_key is None:
logger.warning(
"Unable to check signature of revocation message: %s not available" % revocation_cert_path)
elif 'signature' not in body or body['signature'] == 'none':
logger.warning("No signature on revocation message from server")
elif not crypto.rsa_verify(cert_key, body['msg'].encode('utf-8'), body['signature'].encode('utf-8')):
logger.error("Invalid revocation message siganture %s" % body)
else:
message = json.loads(body['msg'])
logger.debug(
"Revocation signature validated for revocation: %s" % message)
callback(message)
def main():
start_broker()
def worker():
def print_notification(revocation):
logger.warning("Received revocation: %s" % revocation)
keypath = '%s/unzipped/RevocationNotifier-cert.crt' % (
secure_mount.mount())
await_notifications(print_notification, revocation_cert_path=keypath)
t = threading.Thread(target=worker)
t.start()
# time.sleep(0.5)
json_body2 = {
'v': 'vbaby',
'agent_id': '2094aqrea3',
'cloudagent_ip': 'ipaddy',
'cloudagent_port': '39843',
'tpm_policy': '{"ab":"1"}',
'vtpm_policy': '{"ab":"1"}',
'metadata': '{"cert_serial":"1"}',
'allowlist': '{}',
'ima_sign_verification_keys': '{}',
'revocation_key': '',
'revocation': '{"cert_serial":"1"}',
}
print("sending notification")
notify(json_body2)
time.sleep(2)
print("shutting down")
stop_broker()
print("exiting...")
sys.exit(0)
print("done")
if __name__ == "__main__":
main()
|
andrewseidl/chrono
|
src/demos/python/demo_solidworks_irrlicht.py
|
Python
|
bsd-3-clause
| 2,784
| 0.015445
|
#-------------------------------------------------------------------------------
# Name: modulo1
# Purpose:
#
# Author: tasora
#
# Created: 14/02/2012
# Copyright: (c) tasora 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
def main():
pass
if __name__ == '__main__':
main()
import os
import math
import ChronoEngine_python_core as chrono
import ChronoEngine_python_irrlicht as chronoirr
# ---------------------------------------------------------------------
#
# Create the simulation system.
# (Do not create parts and constraints programmatically here, we will
# load a mechanism from file)
my_system = chrono.ChSystem()
# Set the collision margins. This is expecially important for very large or
# very small objects (as in this example)! Do this before creating shapes.
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001);
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001);
# ------------
|
---------------------------------------------------------
#
# load the file generated by the SolidWorks CAD plugin
# and add it to the ChSystem
#
print ("L
|
oading C::E scene...");
exported_items = chrono.ImportSolidWorksSystem('../../../data/solid_works/swiss_escapement')
print ("...done!");
# Print exported items
for my_item in exported_items:
print (my_item.GetName())
# Add items to the physical system
for my_item in exported_items:
my_system.Add(my_item)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(my_system);
myapplication.AddTypicalSky('../../../data/skybox/')
myapplication.AddTypicalCamera(chronoirr.vector3df(0.6,0.6,0.8))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.GetSystem().SetMaxPenetrationRecoverySpeed(0.002);
myapplication.SetTimestep(0.002)
while(myapplication.GetDevice().run()):
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
|
taogeT/livetv_mining
|
crawler/gather/daily_spiders/douyu.py
|
Python
|
apache-2.0
| 2,933
| 0.002728
|
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
from ..models import LiveTVSite, LiveTVRoom, LiveTVRoomPresent, DAILY_DATE_FORMAT
from ..items import DailyItem
import numpy
import json
class DouyuDailySpider(Spider):
name = 'douyu_daily'
allowed_domains = ['douyucdn.cn', 'douyu.com']
custom_settings = {
'ITEM_PIPELINES': {
'gather.pipelines.StatisticPipeline': 300
}
}
def start_requests(self):
summary_utc = datetime.utcnow() - timedelta(days=1)
db_engine = create_engine(self.settings.get('SQLALCHEMY_DATABASE_URI'))
db_session = sessionmaker(bind=db_engine)()
db_query = db_session.query(LiveTVSite.id.label('site_id'), LiveTVRoom.id.label('room_id'),
LiveTVRoom.url.label('room_url'),
LiveTVRoomPresent.crawl_date_format.label('summary_date'),
func.array_agg(LiveTVRoomPresent.online).label('online_list')) \
.join(LiveTVSite, LiveTVRoom, LiveTVRoomPresent) \
.filter(LiveTVSite.code == 'douyu') \
.filter(LiveTVRoomPresent.crawl_date_format == summary_utc.strftime(DAILY_DATE_FORMAT)) \
.group_by(LiveTVSite.id, LiveTVRoom.id, LiveTVRoom.url, LiveTVRoomPresent.crawl_date_format)
for group_row in db_query:
meta_info = {
'site_id': group_row.site_id,
'room_id': group_row.room_id,
'summary_date': group_row.summary_date,
'online': numpy.median(group_row.online_list)
}
yield Request('http://open.douyucdn.cn/api/RoomApi/room/' + group_row.room_id, callback=self.parse,
meta=meta_info)
db_session.close()
def parse(self, response):
resp_info = json.loads(response.text)
if resp_info['error'] == 0:
room_info = resp_info['data']
meta_info = dict(response.meta, followers=room_info['fans_num'])
yield Request('https://m.douyu.com/html5/live?roomId=' + meta_info['room_id'],
callback=self.parse_html5, meta=meta_info)
def parse_html5(self, response):
resp_info = json.loads(response.text)
if resp_info['error'] == 0:
room_info = resp_info['data']
yield DailyItem({
'site_id': response.meta['site_id'],
'room_id': response.meta['room_id'],
'summary_date': response.meta['summary_date'],
'online': response.meta['online'],
'followers': response.meta['
|
followers'],
'description': '',
'announcement': room_info['show_details'],
'fallback': True
|
})
|
megapctr/Yak
|
tests/projects/querysets.py
|
Python
|
mit
| 382
| 0
|
from model_mommy.mommy import make
from yak.projects.models import Project
def test_accessible_to(me):
my_project = make(Project, owner=me)
their
|
_project = make(Project, team_members=[me])
foreign_project = make(Project)
qs = Project.objects.accessible_to(me)
|
assert set(qs) == {my_project, their_project}
assert foreign_project in Project.objects.all()
|
theatlantic/python-active-directory
|
tut/tutorial2.py
|
Python
|
mit
| 356
| 0.002809
|
from activedirectory import Client, Creds, activate
domain = 'freeadi.org'
creds = Creds(domain)
creds.load()
activate(creds)
client = Client(domain)
users = client.search('(objectClass=user)', schem
|
e='gc')
for dn,attrs in users:
name = attrs['sAMAccountName'][0]
|
domain = client.domain_name_from_dn(dn)
print('-> %s (%s)' % (name, domain))
|
kyamagu/psd2svg
|
tests/test_convert.py
|
Python
|
mit
| 1,077
| 0.000929
|
from __future__ import absolute_import, unicode_literals
from builtins import str
import os
import pytest
import io
from glob import glob
from psd_tools import PSDImage
from psd2svg import psd2svg
FIXTURES = [
p for p in glob(
os.path.join(os.path.dirname(__file__), 'fixtures', '*.psd'))
]
@pytest.mark.parametrize('psd_file', FIXTURES)
def test_convert(tmpdir, psd_file):
psd2svg(psd_file, tmpdir.dirname)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_input_io(tmpdir, psd_file):
with open(psd_file, "rb")
|
as f:
assert isinstance(psd2svg(f), str)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_input_psd(tmpdir, psd_file):
psd = PSDImage.open(psd_file)
psd2svg(psd)
@pytest.mark.parametrize('psd_file', FIXTURES[2:3])
def test_input_layer(tmpdir, psd_file):
psd = PSDImage.open(psd_file)
assert psd2svg(psd[0]).startswith("<"
|
)
@pytest.mark.parametrize('psd_file', FIXTURES[0:1])
def test_output_io(tmpdir, psd_file):
with io.StringIO() as f:
assert f == psd2svg(psd_file, f)
|
paylogic/atilla
|
atilla/pagination.py
|
Python
|
mit
| 3,235
| 0.002473
|
"""
|
Helperfunctions for paginat
|
ion."""
import collections
import math
from six.moves import http_client
from six.moves.urllib import parse as urlparse
from flask import url_for, current_app
from atilla import exceptions
PageResponse = collections.namedtuple('PageResponse', 'content,count')
def parse_current_page(page):
"""Get page number from page number string.
:param page: page number
:type page: str
:return: `int` parsed page number
:raises: atilla.exceptions.ApiException - when given page is not an integer or less than 1
"""
try:
page = int(page)
except TypeError:
message = description = 'Page is not an integer.'
raise exceptions.ApiException(
message=message,
description=description,
status_code=http_client.BAD_REQUEST,
)
if page <= 0:
message = description = 'The supplied page number is less than 1.'
raise exceptions.ApiException(
message=message,
status_code=http_client.BAD_REQUEST,
description=description,
)
return page
class Page(object):
"""Current page."""
def __init__(self, func, page):
"""Intialize current page.
:param func: Function for getting items.
:param page: Requested page.
"""
limit_per_page = current_app.config['OBJECTS_PER_PAGE']
def get_response():
return func(
limit=limit_per_page,
offset=0 if page == 1 else limit_per_page * (page - 1),
)
response = get_response()
self.number_pages = int(math.ceil(float(response.count) / limit_per_page))
if page > self.number_pages >= 1:
page = self.number_pages
response = get_response()
self.number = page
self.content = response.content
self.item_count = response.count
def has_next_page(self):
"""Helper function for checking that we have next page depends on number of page and number of pages.
:return: Boolean.
"""
return self.number < self.number_pages
def has_prev_page(self):
"""Helper function for checking that we have next page depends on number of page.
:return: Boolean.
"""
return self.number > 1
def calculate_first_next_prev_last_links(page, collection_uri):
"""Helper function which updates response with next and prev links.
:param page: Instance of Page class.
:param collection_uri: name of the flask route to use as a collection url for pagination links.
:return: Dictionary with links.
"""
links = {}
if page.has_next_page():
links['next'] = urlparse.urljoin(url_for(collection_uri, _external=True), '?page={0}'.format(page.number + 1))
if page.has_prev_page():
links['prev'] = urlparse.urljoin(url_for(collection_uri, _external=True), '?page={0}'.format(page.number - 1))
if page.number_pages > 1:
links['last'] = urlparse.urljoin(url_for(collection_uri, _external=True), '?page={0}'.format(page.number_pages))
if page.number > 1:
links['first'] = url_for(collection_uri, _external=True)
return links
|
biosustain/optlang
|
slow_tests/test_miplib_gurobi_interface.py
|
Python
|
apache-2.0
| 3,070
| 0.002606
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dist
|
ributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the
|
License.
import gzip
import json
import os
import tempfile
import unittest
from functools import partial
import nose
try:
import gurobipy
except ImportError as e:
raise nose.SkipTest('Skipping MILP tests because gurobi is not available.')
else:
from optlang.gurobi_interface import Model
# problems from http://miplib.zib.de/miplib2003/miplib2003.php
TRAVIS = os.getenv('TRAVIS', False)
DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
SOLUTION = os.path.join(DATA_PATH, "miplib2003.json")
PROBLEMS_DIR = os.path.join(DATA_PATH, "miplib2003")
def load_problem(mps_file):
prob_tmp_file = tempfile.mktemp(suffix='.mps')
with open(prob_tmp_file, 'wb') as tmp_handle:
f = gzip.open(mps_file, 'rb')
tmp_handle.write(f.read())
f.close()
problem = gurobipy.read(prob_tmp_file)
model = Model(problem=problem)
model.configuration.presolve = True
model.configuration.verbosity = 3
model.configuration.timeout = 60 * 9
return problem, model
def check_dimensions(model, gurobi_problem):
nose.tools.assert_true(len(gurobi_problem.getVars()) == len(model.variables))
def check_optimization(model, expected_solution):
status = model.optimize()
if status is not "time_limit":
nose.tools.assert_equals(status, expected_solution['status'])
if status is "optimal":
nose.tools.assert_almost_equal(expected_solution['solution'], model.objective.value, places=4)
def test_miplib(solutions=SOLUTION, problem_dir=PROBLEMS_DIR):
if TRAVIS:
raise nose.SkipTest('Skipping extensive MILP tests on travis-ci.')
with open(solutions, "r") as f:
data = json.load(f)
print(data)
for name, problem_data in data.items():
problem_file = os.path.join(problem_dir, "{}.mps.gz".format(name))
problem, model = load_problem(problem_file)
func = partial(check_dimensions, model, problem)
func.description = "test_miplib_dimensions_%s (%s)" % (name, os.path.basename(str(__file__)))
yield func
func = partial(check_optimization, model, problem_data)
func.description = "test_miplib_optimization_%s (%s)" % (name, os.path.basename(str(__file__)))
yield func
if __name__ == '__main__':
nose.runmodule()
|
mozman/ezdxf
|
examples/addons/r12writer.py
|
Python
|
mit
| 2,030
| 0
|
# Copyright (c) 2020 Manfred Moitzi
# License: MIT License
from pathlib import Path
from time import perf_counter
import math
from ezdxf.addons import MengerSponge
from ezdxf.addons import r12writer
from ezdxf.render.forms import sphere, circle, translate
DIR = Path("~/Desktop/Outbox").expanduser()
def menger_sponge(filename, level=1, kind=0):
t0 = perf_counter()
sponge = MengerSponge(level=level, kind=kind).mesh()
t1 = perf_counter()
print(f"Build menger sponge <{kind}> in {t1 - t0:.5f}s.")
with r12writer(filename) as r12:
r12.add_polyface(sponge.vertices, sponge.faces, color=1)
print(f'saved as "{filename}".')
def polymesh(filename, size=(10, 10)):
m, n = size # rows, cols
dx = math.pi / m * 2
dy = math.pi / n * 2
vertices = []
for x in range(m): # rows second
z1 = math.sin(dx * x)
for y in range(n): # cols first
z2 = math.sin(dy * y)
z = z1 * z2
vertices.append((x, y, z))
with r12writer(filename) as r12:
r12.add_polymesh(vertices, size=size, color=1)
print(f'saved as "{filename}".')
def polyface_sphere(filename):
mesh = sphere(16, 8, quads=True)
with r12writer(filename) as r12:
r12.add_polyface(mesh.vertices, mesh.faces, color=1)
print(f'saved as "{filename}".')
def polylines(filename):
with r12writer(filename) as r12:
r12.add_polyline_2d(circle(8), color=1, closed=False)
r12.add_polyline_2d(
translate(circle(8), vec=(3, 0)), color=3, closed=True
)
r12.add_polyline_2d(
[(0, 4), (4, 4, 1), (8, 4,
|
0, 0.2, 0.000001), (12, 4)],
format="xybse",
start_width=0.1,
end_width=0.1,
color=5,
)
print(f'saved as "{filename}".')
if __name__ == "__main__":
menger_sponge(DIR / "menger_sponge_r12.dxf", level=2)
polymesh(DIR / "polymesh.dxf", size=(20, 10))
polyface_sphere(DIR / "sphere.dxf")
polylines(DIR / "polylines
|
.dxf")
|
aldian/tensorflow
|
tensorflow/python/ops/image_ops_impl.py
|
Python
|
apache-2.0
| 217,245
| 0.003701
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable('RandomCrop')
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('HSVToRGB')
ops.NotDifferentiable('DrawBoundingBoxes')
ops.NotDifferentiable('SampleDistortedBoundingBox')
ops.NotDifferentiable('SampleDistortedBoundingBoxV2')
# TODO(bsteiner): Implement the gradient function for extract_glimpse
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('ExtractGlimpse')
ops.NotDifferentiable('NonMaxSuppression')
ops.NotDifferentiable('NonMaxSuppressionV2')
ops.NotDifferentiable('NonMaxSuppressionWithOverlaps')
ops.NotDifferentiable('GenerateBoundingBoxProposals')
# pylint: disable=invalid-name
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
If `cond` is not a tensor, behave like an ordinary assert statement, except
that a empty list is returned. If `cond` is a tensor, return a list
containing a single TensorFlow assert op.
Args:
cond: Something evaluates to a boolean value. May be a tensor.
ex_type: The exception class to use.
msg: The error message.
Returns:
A list, containing at most one assert op.
"""
if _is_tensor(cond):
return [control_flow_ops.Assert(cond, [msg])]
else:
if not cond:
raise ex_type(msg)
else:
return []
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable))
def _ImageDimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise, they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def _Check3DImage(image, require_static=True):
"""Assert that we are working with a properly shaped image.
Args:
image: 3-D Tensor of shape [height, width, channels]
require_static: If `True`, requires that all dimensions of `image` are known
and non-zero.
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
raise ValueError("'image' (shape %s) must be three-dimensional." %
image.shape)
if require_static and not image_shape.is_fully_defined():
raise ValueError("'image' (shape %s) must be fully defined." % image_shape)
if any(x == 0 for x in image_shape):
raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape)
if not image_shape.is_fully_defined():
return [
check_ops.assert_positive(
array_ops.shape(image),
["all dims of 'image.shape' "
'must be > 0.'])
]
else:
return []
def _Assert3DImage(image):
"""Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: 3-D Tensor of shape [height, width, channels]
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_Check3DImage(image, require_static=False), image)
def _AssertAtLeast3DImage(image):
"""Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: >= 3-D Tensor of size [*, height, width, depth]
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_CheckAtLeast3DImage(image, require_static=False), image)
def _CheckAtLeast3DImage(image, require_static=True):
"""Assert that we are working with a properly shaped image.
Args:
image
|
: >= 3-D Tensor of size [*, height, width, depth]
|
require_static: If `True`, requires that all dimensions of `image` are known
and non-zero.
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
if image.get_shape().ndims is None:
image_shape = image.get_shape().with_rank(3)
else:
image_shape = image.get_shape().with_rank_at_least(3)
except ValueError:
raise ValueError("'image' (shape %s) must be at least three-dimensional." %
image.shape)
if require_static and not image_shape.is_fully_defined():
raise ValueError('\'image\' must be fully defined.')
if any(x == 0 for x in image_shape[-3:]):
raise ValueError('inner 3 dims of \'image.shape\' must be > 0: %s' %
image_shape)
if not image_shape[-3:].is_fully_defined():
return [
check_ops.assert_positive(
array_ops.shape(image)[-3:],
["inner 3 dims of 'image.shape' "
'must be > 0.']),
|
s3ql/main
|
src/s3ql/parse_args.py
|
Python
|
gpl-3.0
| 14,648
| 0.003687
|
'''
argparse.py - this file is part of S3QL.
Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>
This work can be distributed under the terms of the GNU GPLv3.
This module provides a customized ArgumentParser class. Differences
are:
* a --version argument is added by default
* convenience functions are available for adding --quiet,
--debug, --cachedir, --log and --authfile options.
* instead of the usage string one can pass a usage list. The first
element will be prefixed with ``usage: `` as usual. Additional
elements will be printed on separate lines and prefixed with
`` or: ``.
* When element of an usage list, the ``DEFAULT_USAGE`` object
will be replaced by the automatically generated usage message,
excluding any --help arguments.
* When specified on its own, the replacement will be done including
any --help arguments.
* The ``usage`` and ``add_help`` settings are inherited from the
parent parser to the subparsers.
'''
# Pylint really gets confused by this module
#pylint: disable-all
from .logging import logging # Ensure use of custom logger class
from . import RELEASE
from .backends import prefix_map
from .common import _escape
from getpass import getpass
from argparse import ArgumentTypeError, ArgumentError
import configparser
import argparse
import stat
import os
import sys
import re
DEFAULT_USAGE = object()
log = logging.getLogger(__name__)
class HelpFormatter(argparse.HelpFormatter):
def _format_usage(self, usage, actions, groups, prefix):
'''Special handling for usage lists
If usage is a list object, its elements will be printed on
separate lines. DEFAULT_USAGE will be replaced by the
default usage string of the parser (but, if `usage`` is a list,
excluding any --help arguments)).
'''
if isinstance(usage, list):
# Omit help argument
actions = [ x for x in actions if not isinstance(x, argparse._HelpAction) ]
res = []
for s in usage:
if not res:
res.append('usage: ')
else:
res.append(' or: ')
if s is DEFAULT_USAGE:
res.append(super()._format_usage(None, actions, groups, '')[:-1])
else:
res.append(s % dict(prog=self._prog))
res.append('\n')
return '%s\n\n' % ''.join(res)
elif usage is DEFAULT_USAGE:
return super()._format_usage(None, actions, groups, prefix)
else:
return super()._format_usage(usage, actions, groups, prefix)
def format_help(self):
help_ = super().format_help()
if help_.count('\n') > 2:
return help_ + '\n'
else:
return help_
class SubParsersAction(argparse._SubParsersAction):
'''A replacement for _SubParsersAction that keeps
track of the parent parser'''
def __init__(self, **kw):
self.parent = kw.pop('parent')
super().__init__(**kw)
def add_parser(self, *a, **kwargs):
'''Pass parent usage and add_help attributes to new parser'''
if 'usage' not in kwargs:
# Inherit, but preserve old progs attribute
usage = self.parent.usage
repl = dict(prog=self.parent.prog)
if isinstance(usage, list):
usage = [ (x % repl if isinstance(x, str) else x)
for x in usage ]
elif usage:
usage = usage % repl
kwargs['usage'] = usage
if 'help' in kwargs:
kwargs.setdefault('description', kwargs['help'].capitalize() + '.')
kwargs.setdefault('add_help', self.parent.add_help)
kwargs.setdefault('formatter_class', self.parent.formatter_class)
if 'parents' in kwargs:
for p in kwargs['parents']:
if p.epilog:
kwargs.setdefault('epilog', p.epilog % dict(prog=self.parent.prog))
return super().add_parser(*a, **kwargs)
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, *a, **kw):
if 'formatter_class' not in kw:
kw['formatter_class'] = HelpFormatter
super().__init__(*a, **kw)
self.register('action', 'parsers', SubParsersAction)
def add_version(self):
self.add_argument('--version', action='version',
help="just print program version and exit",
version='S3QL %s' % RELEASE)
def add_quiet(self):
self.add_argument("--quiet", action="store_true", default=False,
help="be really quiet")
def add_backend_options(self):
self.add_argument("--backend-options", default={}, type=suboptions_type,
metavar='<options>',
help="Backend specific options (separate by commas). See "
"backend documentation for available options.")
def add_debug(self):
destnote = ('Debug messages will be written to the target '
'specified by the ``--log`` option.')
self.add_argument("--debug-modules", metavar='<modules>',
type=lambda s: s.split(','), dest='debug',
help="Activate debugging output from specified modules "
"(use commas to separate multiple modules). "
+ destnote)
self.add_argument("--debug", action='append_const', const='s3ql',
help="Activate debugging output from all S3QL modules. "
+ destnote)
def add_cachedir(self):
self.add_argument("--cachedir", type=str, metavar='<path>',
default=os.path.expanduser("~/.s3ql"),
help='Store cached data in this directory '
'(default: `~/.s3ql)`')
def add_log(self, default=None):
self.add_argument("--log", type=str_or_None_type, metavar='<target>', default=default,
help='Destination for log messages. Specify ``none`` for standard '
'output or ``syslog`` for the system logging daemon. '
'Anything else will be interpreted as a file name. Log files '
'will be rotated when they reach 1 MiB, and at most 5 old log '
'files will be kept. Default: ``%(default)s``')
def add_storage_url(self):
self.add_argument("storage_url", metavar='<storage-url>',
type=storage_url_type,
help='Storage URL of the backend that contains the file system')
self.add_argument("--authfile", type=str, metavar='<path>',
default=os.path.expanduser("~/.s3ql/authinfo2"),
help='Read authentication credentials from this file '
'(default: `~/.s3ql/authinfo2)`')
def add_compress(self):
def compression_type(s):
hit = re.match(r'^([a-z0-9]+)(?:-([0-9]))?$', s)
if not hit:
raise argparse.ArgumentTypeError('%s is not a valid --compress value' % s)
alg = hit.group(1)
lvl = hit.gr
|
oup(2)
if alg not in ('none', 'zlib', 'bzip2', 'lzma'):
raise argparse.ArgumentTypeError('Invalid compression algorithm: %s' % alg)
if lvl is None:
lvl = 6
else:
lvl = int(lvl)
if alg == 'none':
alg = None
|
return (alg, lvl)
self.add_argument("--compress", action="store", default='lzma-6',
metavar='<algorithm-lvl>', type=compression_type,
help="Compression algorithm and compression level to use when "
"storing new data. *algorithm* may be any of `lzma`, `bzip2`, "
"`zlib`, or none. *lvl* may be any integer from 0 (fastest) "
|
jcberquist/SublimeText-Lucee
|
src/modelcompletions/documentation.py
|
Python
|
mit
| 1,258
| 0.023847
|
STYLES = {
"side_color": "#4C9BB0",
"header_color": "#306B7B",
"header_bg_color": "#E4EEF1",
"text_color": "#272B33"
}
def get_documentation(bean_name, file_path, function_name, function_metadata):
model_doc = dict(STYLES)
model_doc["links"] = [{"href": "go_to_definition", "text": "Go to Definition"}]
model_doc["header"] = bean_name + "." + function_name + "()"
if len(function_metadata["access"]) > 0:
model_doc["he
|
ader"] = "<em>" + function_metadata["access"] + "</em> " + model_doc["header"]
if len(function_metadata["returntype"]) > 0:
model_doc["header"] += ":" + function_metadata["returntype"]
model_doc["description"] = "<small>" + file_path + "</small>"
model_doc["body"] = ""
if len(function_metadata["arguments"]) > 0:
model_doc["body"] += "<ul>"
for arg_name, arg_params in function_metadata["arguments"]:
model_doc["body"] += "<li>"
if arg_params["required"]:
model_doc["b
|
ody"] += "required "
if arg_params["type"]:
model_doc["body"] += "<em>" + arg_params["type"] + "</em> "
model_doc["body"] += "<strong>" + arg_name + "</strong>"
if arg_params["default"]:
model_doc["body"] += " = " + arg_params["default"]
model_doc["body"] += "</li>"
model_doc["body"] += "</ul>"
return model_doc
|
zephyrproject-rtos/zephyr
|
doc/_extensions/zephyr/vcs_link.py
|
Python
|
apache-2.0
| 2,232
| 0.000896
|
"""
VCS Link
########
Copyright (c) 2021 Nordic Semiconductor ASA
SPDX-License-Identifier: Apache-2.0
Introduction
============
This Sphinx extension can be used to obtain the VCS URL for a given Sphinx page.
This is useful, for example, when adding features like "Open on GitHub" on top
of pages. The extension installs a Jinja filter which can be used on the
template to obtain VCS page URLs.
Configuration options
=====================
- ``vcs_link_base_url``: Base URL used as a prefix for generated URLs.
- ``vcs_link_prefixes``: Mapping of pages (regex) <> VCS prefix.
- ``vcs_link_exclude``: List of pages (regex) that will not report a URL. Useful
for, e.g., auto-generated pages not in VCS.
"""
from functools import partial
import os
import re
from typing import Optional
from sphinx.application import Sphinx
__version__ = "0.1.0"
def vcs_link_get_url(app: Sphinx, pagename: str) -> Optional[str]:
"""Obtain VCS URL for the given page.
Args:
app: Sphinx instance.
pagename: Page name (
|
path).
Returns:
VCS URL if applicable, None otherwise.
"""
if not os.path.isfile(app.env.project.doc2path(pagename)):
return None
for exclude in app.config.vcs_link_exclude:
if re.match(exclude, pagename):
return None
found_prefix = ""
for pattern, prefix in app.config.vcs_link_prefixes.items():
if re.match(pattern, pagename):
found_prefix = prefix
break
return "/".join(
[
a
|
pp.config.vcs_link_base_url,
found_prefix,
app.env.project.doc2path(pagename, basedir=False),
]
)
def add_jinja_filter(app: Sphinx):
if app.builder.name != "html":
return
app.builder.templates.environment.filters["vcs_link_get_url"] = partial(
vcs_link_get_url, app
)
def setup(app: Sphinx):
app.add_config_value("vcs_link_base_url", "", "")
app.add_config_value("vcs_link_prefixes", {}, "")
app.add_config_value("vcs_link_exclude", [], "")
app.connect("builder-inited", add_jinja_filter)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_data_labels11.py
|
Python
|
bsd-2-clause
| 1,503
| 0.000665
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Wor
|
kbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
|
self.maxDiff = None
filename = 'chart_data_labels11.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1, 'leader_lines': 1, 'position': 'best_fit'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
eharney/nova
|
nova/tests/api/openstack/compute/test_image_metadata.py
|
Python
|
apache-2.0
| 9,569
| 0
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob
from nova.api.openstack.compute import image_metadata
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
class ImageMetaDataTest(test.TestCase):
def setUp(self):
super(ImageMetaDataTest, self).setUp()
fakes.stub_out_glance(self.stubs)
self.controller = image_metadata.Controller()
def test_index(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
def test_show(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
def test_show_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
def test_show_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
def test_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
def test_create_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body)
def test_update_all(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
def test_update_all_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
def test_update_item_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1', body)
def test_update_item_bad_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = ''
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1', body)
def test_update_item_too_many_keys(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
overload = {}
for num in range(CONF.quota_metadata_items + 1):
overload['key%s' % num] = 'value%s' % num
body = {'meta': overload}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1', body)
def test_update_item_body_uri_mismatch(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', '
|
bad', body)
def test_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
self.assertIsNone(res)
def test_delete_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'DELETE'
|
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
def test_delete_image_not_found(self):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
def test_too_many_metadata_items_on_create(self):
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
req.body = jsonutils.dumps(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, '123', data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, '123', data)
def test_too_many_metadata_items_on_put(self):
self.flags(quota_metadata_items=1)
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update, req, '123', 'blah', body)
def test_image_not_authorized_update(self):
image_id = 131
# see nova.tests.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
|
mscuthbert/abjad
|
abjad/tools/pitchtools/test/test_pitchtools_PitchSegment___min__.py
|
Python
|
gpl-3.0
| 214
| 0.004673
|
# -*- encoding: utf-8 -*-
from abjad import *
def test
|
_pitchtools_PitchSegment___min___01():
pitch_segment = pitchtools.PitchSegment([-2, -1.5, 6, 7, -1.5, 7])
assert min(pitch_segment) ==
|
NamedPitch(-2)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/sankey/_hoverlabel.py
|
Python
|
mit
| 2,055
| 0.000487
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="sankey", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more tw
|
o or
more lines
alignsrc
|
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
OpenEye-Contrib/OEMicroservices
|
oemicroservices/common/__init__.py
|
Python
|
apache-2.0
| 74
| 0
|
# Initia
|
lization for oemicros
|
ervices.common
__all__ = ('functor', 'util')
|
PAIR-code/saliency
|
saliency/core/grad_cam_test.py
|
Python
|
apache-2.0
| 7,486
| 0.002939
|
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests accuracy and error handling for grad_cam."""
import unittest
import unittest.mock as mock
from .base import SHAPE_ERROR_MESSAGE
from . import grad_cam
import numpy as np
CONVOLUTION_LAYER_VALUES = grad_cam.CONVOLUTION_LAYER_VALUES
CONVOLUTION_OUTPUT_GRADIENTS = grad_cam.CONVOLUTION_OUTPUT_GRADIENTS
INPUT_HEIGHT_WIDTH = 5 # width and height of input images in pixels
class GradCamTest(unittest.TestCase):
"""To run: "python -m saliency.core.grad_cam_test" from top-level saliency directory."""
def setUp(self):
super().setUp()
self.grad_cam_instance = grad_cam.GradCam()
def testGradCamGetMask(self):
"""Tests the GradCAM method using a simple network.
Simple test case where the network contains one convolutional layer that
acts as a horizontal line detector and the input image is a 5x5 matrix with
a centered 3x3 grid of 1s and 0s elsewhere.
The computed GradCAM mask should detect the pixels of highest importance to
be along the two horizontal lines in the image (exact expected values stored
in ref_mask).
"""
def create_call_model_function():
def call_model(x_value_batch, call_model_args=None, expected_keys=None):
# simulates conv layer output and grads where the kernel for the conv
# layer is a horizontal line detector of kernel size 3 and the input is
# a 3x3 square of ones in the center of the image.
grad = np.zeros([5, 5])
grad[(0, -1), (0, -1)] = 2
grad[(1, -1), 1:-1] = 3
output = np.zeros([5, 5])
output[:] = [1, 2, 3, 2, 1]
output[(0, -1), :] *= -1
output[2, :] = 0
grad = grad.reshape(x_value_batch.shape)
output = output.reshape(x_value_batch.shape)
return {CONVOLUTION_LAYER_VALUES: output,
CONVOLUTION_OUTPUT_GRADIENTS: grad}
return call_model
call_model_function = create_call_model_function()
# Generate test input (centered matrix of 1s surrounded by 0s)
# and generate corresponding GradCAM mask
img = np.zeros([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH])
img[1:-1, 1:-1] = 1
img = img.reshape([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH, 1])
mask = self.grad_cam_instance.GetMask(
img,
call_model_function=call_model_function,
call_model_args=None,
should_resize=True,
three_dims=False)
# Compare generated mask to expected result
ref_mask = np.array([[0., 0., 0., 0., 0.],
[0.33, 0.67, 1., 0.67, 0.33],
[0., 0., 0., 0., 0.],
[0.33, 0.67, 1., 0.67, 0.33],
[0., 0., 0., 0., 0.]])
self.assertTrue(
np.allclose(mask, ref_mask, atol=0.01),
'Generated mask did not match reference mask.')
def testGradCamCallModelArgs(self):
img = np.ones([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH])
img = img.reshape([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH, 1])
expected_keys = [
CONVOLUTION_LAYER_VALUES, CONVOLUTION_OUTPUT_GRADIENTS
]
call_model_args = {'foo': 'bar'}
mock_call_model = mock.MagicMock(
return_value={
CONVOLUTION_LAYER_VALUES: [img],
CONVOLUTION_OUTPUT_GRADIENTS: [img]
})
self.grad_cam_instance.GetMask(
img,
call_model_function=mock_call_model,
call_model_args=call_model_args,
should_resize=True,
three_dims=False)
calls = mock_call_model.mock_calls
self.assertEqual(len(calls), 1)
for call in calls:
kwargs = call[2]
self.assertEqual(
kwargs['call_model_args'],
call_model_args,
msg='function was called with incorrect call_model_args.')
self.assertEqual(
kwargs['expected_keys'],
expected_keys,
msg='function was called with incorrect expected_keys.')
def testGradCamErrorGradientsMismatch(self):
"""Tests the GradCAM method using a simple network.
Simple test case where the network contains one convolutional layer that
acts as a horizontal line detector and the input image is a 5x5 matrix with
a centered 3x3 grid of 1s and 0s elsewhere.
The call_model_function returns the gradients without the outermost batch
dimension, so the expectation is that a ValueError will be raised.
"""
def create_call_model_function():
def call_model(x_value_batch, call_model_args=None, expected_keys=None):
grad = np.zeros(x_value_batch.shape)
output = np.zeros(x_value_batch.shape)
return {CONVOLUTION_LAYER_VALUES: output,
CONVOLUTION_OUTPUT_GRADIENTS: grad[0]}
return call_model
call_model_function = create_call_model_function()
# Generate test input (centered matrix of 1s surrounded by 0s)
# and generate corresponding GradCAM mask
img = np.zeros([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH])
img[1:-1, 1:-1] = 1
img = img.reshape([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH, 1])
expected_error = SHAPE_ERROR_MESSAGE[CONVOLUTION_OUTPUT_GRADIENTS].format(
'1', '5')
with self.assertRaisesRegex(ValueError, expected_error):
self.grad_cam_instance.GetMask(
img,
call_model_function=call_model_function,
call_model_args=None,
should_resize=True,
three_dims=False)
def testGradCamErrorValuesMismatch(self):
"""Tests the GradCAM method using a simple network.
Simple test case where the network contains one convolutional layer that
acts as a horizontal line detector and the input image is a 5x5 matrix with
a centered 3x3 grid of 1s and 0s elsewhere.
The call_model_function returns the gradients without the outermost batch
dimension, so the expectation is that a ValueError will be raised.
"""
def create_call_model_function():
def call_model(x_value_batch, call_model_args=None, expected_keys=None):
grad = np.zeros(x_value_batch.shape)
output = np.zeros(x_value_batch.shape)
return {CONVOLUTION_OUTPUT_GRADIENTS: grad,
CONVOLUTION_LAYER_VALUES: output[0]}
return call_model
call_model_function = create_call_model_function()
# Generate test input (centered matrix of 1s surrounded by 0s)
# and generate corresponding GradCAM mask
img = np.zeros([INP
|
UT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH])
img[1:-1, 1:-1] = 1
img = img.reshape([INPUT_HEIGHT_WIDTH, INPUT_HEIGHT_WIDTH, 1])
expected_error = SHAPE_ERROR_MESSAGE[CONVOLUTION_LAYER_VALUES].format('1', '5')
with self.assertRaisesRegex(ValueError, expected_error):
self.grad_cam_instance.GetMask(
img,
call_model_function=call_model_function,
call_model_args=None,
s
|
hould_resize=True,
three_dims=False)
if __name__ == '__main__':
unittest.main()
|
snipsco/ntm-lasagne
|
examples/associative-recall-task.py
|
Python
|
mit
| 4,480
| 0.00558
|
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from lasagne.layers import InputLayer, DenseLayer, ReshapeLayer
import lasagne.layers
import lasagne.nonlinearities
import lasagne.updates
import lasagne.objectives
import lasagne.init
from ntm.layers import NTMLayer
from ntm.memory import Memory
from ntm.controllers import DenseController
from ntm.heads import WriteHead, ReadHead
from ntm.updates import graves_rmsprop
from utils.generators import AssociativeRecallTask
from utils.visualization import Dashboard
def model(input_var, batch_size=1, size=8, num_units=100, memory_shape=(128, 20)):
# Input Layer
l_input = InputLayer((batch_size, None, size + 2), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory(memory_shape, name='memory', memory_init=lasagne.init.Constant(1e-6), learn_init=False)
controller = DenseController(l_input, memory_shape=memory_shape,
num_units=num_units, num_reads=1,
nonlinearity=lasagne.nonlinearities.rectify,
name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=memory_shape, name='write', learn_init=False,
nonlinearity_key=lasagne.nonlinearities.rectify,
nonlinearity_add=lasagne.nonlinearities.rectify),
ReadHead(controller, num_shifts=3, memory_shape=memory_shape, name='read', learn_init=False,
nonlinearity_key=lasagne.nonlinearities.rectify)
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, num_units))
l_output_dense = DenseLayer(l_output_reshape, num_units=size + 2, nonlinearity=lasagne.nonlinearities.sigmoid, \
name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, size + 2))
return l_output, l_ntm
if __name__ == '__main__':
# Define the input and expected output variable
input_var, target_var = T.tensor3s('input', 'target')
# The generator to sample examples from
generator = AssociativeRecallTask(batch_size=1, max_iter=1000000, size=8, max_num_items=6, \
min_item_length=1, max_item_length=3)
# The model (
|
1-layer Neural Turing Machine)
l_output, l_ntm = model(input_var, batch_size=generator.batch_size,
size=generator.size, num_units=100, memory_shape=(128, 20))
# The generated output variable and the loss function
pred_var = T.clip(lasagne.layers.get_output(l_output), 1e-6, 1. - 1e-6)
loss = T.mean(lasagne.objectives.bina
|
ry_crossentropy(pred_var, target_var))
# Create the update expressions
params = lasagne.layers.get_all_params(l_output, trainable=True)
learning_rate = theano.shared(1e-4)
updates = lasagne.updates.adam(loss, params, learning_rate=learning_rate)
# Compile the function for a training step, as well as the prediction function and
# a utility function to get the inner details of the NTM
train_fn = theano.function([input_var, target_var], loss, updates=updates)
ntm_fn = theano.function([input_var], pred_var)
ntm_layer_fn = theano.function([input_var], lasagne.layers.get_output(l_ntm, get_details=True))
# Training
try:
scores, all_scores = [], []
for i, (example_input, example_output) in generator:
score = train_fn(example_input, example_output)
scores.append(score)
all_scores.append(score)
if i % 500 == 0:
mean_scores = np.mean(scores)
if mean_scores < 0.01:
learning_rate.set_value(1e-5)
print 'Batch #%d: %.6f' % (i, mean_scores)
scores = []
except KeyboardInterrupt:
pass
# Visualization
def marker1(params):
return params['num_items'] * (params['item_length'] + 1)
def marker2(params):
return (params['num_items'] + 1) * (params['item_length'] + 1)
markers = [
{
'location': marker1,
'style': {'color': 'red', 'ls': '-'}
},
{
'location': marker2,
'style': {'color': 'green', 'ls': '-'}
}
]
dashboard = Dashboard(generator=generator, ntm_fn=ntm_fn, ntm_layer_fn=ntm_layer_fn, \
memory_shape=(128, 20), markers=markers, cmap='bone')
# Example
params = generator.sample_params()
dashboard.sample(**params)
|
CSC301H-Fall2013/JuakStore
|
Storefront/Storefront/settings.py
|
Python
|
mit
| 5,847
| 0.001197
|
import os
# Django settings for Storefront project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'JuakStore', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'JuakfrontPassword1',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
PROJECT_DIR = os.path.dirname(__file__)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes t
|
hat know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contri
|
b.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5ff9b-_!8o66m+4dq!v!u3lq*5o)$oqgj1o#byi_j1l^koeko_'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Storefront.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'Storefront.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'juakstore',
'juakstore.juakregister',
)
ACCOUNT_ACTIVATION_DAYS = 365
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'eaststorefront@gmail.com'
EMAIL_HOST_PASSWORD = 'JuakfrontPassword1'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
LorenzSelv/pinned
|
pinned/settings.py
|
Python
|
mit
| 5,525
| 0.001448
|
"""
Django settings for pinned project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from dotenv import load_dotenv, find_dotenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['PINNED_DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.getenv('PINNED_DJANGO_DEBUG_MODE', True))
# force https connection when running on the server
SECURE_SSL_REDIRECT = bool(os.getenv('PINNED_DJANGO_SSL_REDIRECT', False))
ALLOWED_HOSTS = ['127.0.0.1', 'pinned.pythonanywhere.com', 'pinned-app-deploy.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'core',
'colorful',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
|
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'social_django',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pinned.urls'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, '../../templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pinned.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Tijuana'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
# Social-Auth and Auth0 backend config
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'core.pipeline.get_avatar',
)
SOCIAL_AUTH_TRAILING_SLASH = False
SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID')
SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile'
]
AUDIENCE = None
if os.environ.get('AUTH0_AUDIENCE'):
AUDIENCE = os.environ.get('AUTH0_AUDIENCE')
else:
if SOCIAL_AUTH_AUTH0_DOMAIN:
AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo'
if AUDIENCE:
SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE}
AUTHENTICATION_BACKENDS = {
'pinned.auth0backend.Auth0',
'django.contrib.auth.backends.ModelBackend'
}
LOGIN_URL = "/login/auth0"
LOGIN_REDIRECT_URL = "/map/"
LOGOUT_REDIRECT_URL = "/"
AUTH_USER_MODEL = 'core.User'
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
|
evanbiederstedt/RRBSfun
|
scripts/repeat_finder_scripts/repeat_finder_RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.A.py
|
Python
|
mit
| 706
| 0.009915
|
import glob
import numpy as np
import pandas as pd
from numpy import nan
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/RRBS_anno_clean")
repeats = pd.read_csv("repeats_hg19.csv")
annofiles = glob.glob("RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.A*")
def between_range(row):
subset = repeats.l
|
oc[(row["chr"] == repeats.chr) & (row.start >= repeats.start) & (row.start <= repeats.end), :]
if subset.empty:
return np.nan
return subset.repeat_class
#newdf1 = pd.DataFrame()
for filename in annofiles:
df = pd.read_table(filename)
|
df["hg19_repeats"] = df.apply(between_range, axis = 1)
df.to_csv(str("repeatregions_") + filename + ".csv", index=False)
|
COCS4950G7/COSC4950
|
Source/demoCrack3.py
|
Python
|
gpl-3.0
| 6,305
| 0.005234
|
# Chris Bugg
# 10/1/14
# NOTE: Runs on Python 2.7.6
# UPDATE:
# 10/10/14
# -> Now runs with 8 sub-processes using
# the [a-z,A-Z,0-9] alphabet
#
# 10/12/2014 UPDATE: Ubuntu 14.04, OS X, and Windows 7 have commited to this project by Chris H
# Ubuntu GUI ran 1.71 million hashes per minute on Overclocked Tower 4.0GHz
# Ubuntu Terminal ran 1.81 million hashes per minute on Overclocked tower 4.0GHz
# Windows 7 ERROR!!! does not run code correctly. Didnt even try to crack the hash
# OS X ran 371,000 hashes per second
# 10/13/14
# -> Now runs on Windows systems
# Put another lock on subprocess progress display to solve overwriting on Windows
# (Latest_Stable_Versions)
# 10/24/2013
# Replaced the hashing code with an optimized version, showing around 17% speedup.
# Seeing 1690716 hashes per second on my laptop, up from around 1430000.
# Nick Baum
# 10/27/2014
# The code will work great on any Linux OS running python (can use something better than Ubuntu)
# Tested the code on my laptop and desktop for Chris's chart.
# Laptop running OSX 10.9 i5 @ 2.53GHz 8gb ram, took 388.63 seconds with 383,016 hashes per second
# Set up desktop tower with intel Dual Core @ 1.8GHz, 4GB ram, running centos, 431.9 secons with 273,697 hps
# 11/5/2014
# Replaced the numbered process and chunk variables with arrays of the same. Now automatically spawns the correct
# number of proceses and chunks as needed for that number.
# Nick Baum
import hashlib
from time import time
from multiprocessing import Process, Pipe, Lock, cpu_count
import os
class DemoCrack():
algorithm = "sha256"
origHash = ''
alphabet = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") #added by chris h
chunks = []
key = ''
alphaChoice = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" # changed by chris h
countey = 0
number_of_processes = cpu_count()
def __init__(self):
#Code to fix the windows errors
"""
Details:
This 'catches' the sub-processes on windows
so they don't execute the constructor, cause
Windows processes are stupid.
"""
if not __name__ == '__main__':
return
os.system('cls' if os.name == 'nt' else 'clear')
self.whatWeGot()
self.getHash()
os.system('cls' if os.name == 'nt' else 'clear')
self.whatWeGot()
self.chunkIt()
start = time()
self.countey += 1
lock = Lock()
|
parentPipe, childPipe = Pipe()
children = []
for i in range(
|
0, self.number_of_processes):
children.append(Process(target=self.subProcess, args=(childPipe, lock, )))
children[i].start()
for chunk in self.chunks:
parentPipe.send("6")
parentPipe.send(chunk)
count = 0
done = False
rec = 0
while not done:
if count > (self.number_of_processes - 1):
for i in range(0, self.number_of_processes):
children[i].join()
print "No Dice!"
done = True
else:
rec = parentPipe.recv()
if rec == "found":
self.countey = parentPipe.recv()
for i in range(0, self.number_of_processes):
children[i].terminate()
done = True
count += 1
elapsed = (time() - start)
print "That took: ", elapsed, " seconds."
speed = (self.number_of_processes * int(self.countey)) / elapsed
if rec == "found":
print "At about: ", speed, " hashes per second."
exit = raw_input("Hit (Enter/Return) to quit ")
def subProcess(self, pipe, lock):
lock.acquire()
loops = pipe.recv()
alphabet = pipe.recv()
lock.release()
if self.looper6(alphabet, lock) == True:
lock.acquire()
pipe.send("found")
pipe.send(self.countey)
pipe.close()
lock. release()
else:
lock.acquire()
pipe.send("not found")
pipe.close()
lock. release()
def chunkIt(self):
chunky = [self.alphabet[i::self.number_of_processes] for i in range(self.number_of_processes)]
for chunk in chunky:
self.chunks.append(chunk)
def getHash(self):
key = raw_input("What's the 6 LowerCase-Letter Key: ")
self.origHash = hashlib.new(self.algorithm, key).hexdigest()
print "The Key you entered was: ", key
print "Which has a hash of: ", self.origHash
def whatWeGot(self):
print "**********************************"
print "Here's what we've got so far: "
print
print "Key is: ", self.key
print "Hash is: ", self.origHash
print "Searching: ", self.alphaChoice
print "**********************************"
def isSolution(self, key):
possible = hashlib.new(self.algorithm, key).hexdigest()
if possible == self.origHash:
print
print "Solution found!"
print "Key is: ", key
print "Which has a hash of: ", possible
return True
else:
return False
def looper6(self, alphabet, lock):
for x in alphabet:
#Processes were overwriting, so locking this print too --Latest_Stable_Versions
lock.acquire()
print "Searching ...", x, "*****"
lock.release()
for y in self.alphabet:
for z in self.alphabet:
for a in self.alphabet:
for b in self.alphabet:
for c in self.alphabet:
self.countey += 1
key = x + y + z + a + b + c
if self.isSolution(key):
return True
return False
DemoCrack()
|
michaelBenin/Django-facebook
|
django_facebook/urls.py
|
Python
|
bsd-3-clause
| 2,316
| 0.000864
|
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.conf import settings
urlpatterns = patterns(
'django_facebook.views',
url(r'^connect/$', 'connect', name='facebook_
|
connect'),
url(r'^disconnect/$',
'disconnect', name='facebook_disconnect'),
url(r'^example/$', 'example', name='facebook_example'),
)
dev_patterns = patterns(
'django_facebook.example_views',
url(
r'^lazy_decorator_example/$', 'lazy_decorator_example',
name='facebook_lazy_decorator_example'),
url(r'^decorator_example/$', 'decorato
|
r_example',
name='facebook_decorator_example'),
url(
r'^decorator_example_scope/$', 'decorator_example_scope',
name='facebook_decorator_example_scope'),
url(r'^wall_post/$',
'wall_post', name='facebook_wall_post'),
url(r'^checkins/$',
'checkins', name='facebook_checkins'),
url(r'^image_upload/$',
'image_upload', name='facebook_image_upload'),
url(r'^canvas/$', 'canvas', name='facebook_canvas'),
url(r'^page_tab/$',
'page_tab', name='facebook_page_tab'),
url(r'^open_graph_beta/$', 'open_graph_beta',
name='facebook_open_graph_beta'),
url(r'^remove_og_share/$', 'remove_og_share',
name='facebook_remove_og_share'),
)
# when developing enable the example views
if settings.DEBUG:
# only enable example views while developing
urlpatterns += dev_patterns
# help autodiscovery a bit
from django_facebook import admin
# putting this here instead of models.py reduces issues with import ordering
if getattr(settings, 'AUTH_PROFILE_MODULE', None) == 'django_facebook.FacebookProfile':
'''
If we are using the django facebook profile model, create the model
and connect it to the user create signal
'''
from django.db.models.signals import post_save
from django_facebook.models import FacebookProfile
from django_facebook.utils import get_user_model
# Make sure we create a FacebookProfile when creating a User
def create_facebook_profile(sender, instance, created, **kwargs):
if created:
FacebookProfile.objects.create(user=instance)
post_save.connect(create_facebook_profile, sender=get_user_model())
|
iver56/trondheim.kodeklubben.no
|
backend/wsgi/courses/views.py
|
Python
|
gpl-3.0
| 4,033
| 0
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import View
from .models import \
Course, Registration, Task, TaskSubmission, ScoreProfile
from .forms import TaskSubmissionForm
class CourseListView(View):
template_name = 'courses/course_select.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = {
'courses': request.user.course_set.all(),
'profile': ScoreProfile.get_score_profile(request.user),
'highscore': ScoreProfile.objects.all().order_by('-score')[:10]
}
return render(request,
self.template_name,
context)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
pass
class ProfileView(View):
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = {}
profile = ScoreProfile.get_score_profile(request.user)
context['username'] = request.user.username
context['rank'] = profile.current_rank
context['score'] = profile.score
context['courses'] = request.user.course_set.all()
context['valid_submissions'] = \
TaskSubmission.objects.filter(submitted_by=request.user,
valid=True).values_list('task',
flat=True)
return render(request, 'courses/profile.html', context)
class TaskSubmissionView(View):
form_class = TaskSubmissionForm
template_name = 'courses/task.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
context = self.get_context_data()
context['form'] = self.form_class()
context['subs'] = TaskSubmission.objects.filter(
submitted_by=request.user,
task=self.kwargs['task_id']
)
context['valid_subs'] = context['subs'].filter(
valid=True
)
return render(request, self.template_name, context)
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
task = Task.objects.get(pk=self.kwargs['task_id'])
sub = TaskSubmission()
sub.task = task
sub.submitted_by = request.user
sub.valid = False
form = self.form_class(request.POST, request.FILES, instance=sub)
if form.is_valid():
form.save()
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = {}
context['task'] = Task.objects.get(pk=self.kwargs['task_id'])
return context
class CourseRegistrationView(View):
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
course_id = request.POST['course_id']
course = Course.objects.get(pk=course_id)
if course:
Registration.objects.filter(user=request.user,
|
course=course).delete()
else:
return
if request.POST['sign_up'] == u'master':
Registration(user=request.user,
|
course=course,
granted=False,
code_master=True,
role=Registration.CODE_MASTER).save()
elif request.POST['sign_up'] == u'kid':
Registration(user=request.user,
course=course,
granted=False,
code_master=False,
role=Registration.KID).save()
elif request.POST['sign_up'] == u'reserve':
Registration(user=request.user,
course=course,
granted=False,
code_master=False,
role=Registration.RESERVE).save()
return
|
ibelikov/jimmy
|
jimmy/modules/throttle/__init__.py
|
Python
|
apache-2.0
| 43
| 0
|
# -*- coding: utf-8 -*-
f
|
rom impl
|
import *
|
ellmetha/django-parler
|
parler/tests/test_admin.py
|
Python
|
apache-2.0
| 1,318
| 0.00607
|
from __future__ import unicode_literals
from django.contrib.admin.util import label_for_field
from .utils import AppTestCase
from .testapp.models import SimpleModel, ConcreteModel, AbstractModel
class AdminTests(AppTestCase):
"""
Test admin features
"""
def test_list_label(self):
# Ensure model data is correct
self.assertEqual(SimpleModel._parler_meta.root_model._meta.get_field_by_name('tr_title')[0].verbose_name, "Translated Title")
# See that adding a field to the admin list_display also receives the translated title
# This happens by
|
TranslatedFieldDescriptor.short_description
self.assertEqual(label_for_field('tr_title', SimpleModel), "Translated Title")
d
|
ef test_list_label_abc(self):
# Ensure model data is correct
self.assertEqual(ConcreteModel._parler_meta.root_model._meta.get_field_by_name('tr_title')[0].verbose_name, "Translated Title")
# See that the TranslatedFieldDescriptor of the concrete model properly routes to the proper model
self.assertEqual(label_for_field('tr_title', ConcreteModel), "Translated Title")
# See that the TranslatedFieldDescriptor of the abstract model handles the fallback properly.
self.assertEqual(label_for_field('tr_title', AbstractModel), "Tr title")
|
ppeczek/Politikon
|
accounts/pipeline.py
|
Python
|
gpl-2.0
| 358
| 0
|
from social.pipeli
|
ne.partial import partial
import logging
logger = logging.getLogger(__name__)
@partial
def save_profile(backend, user, response, *args, **kwargs):
print 'test2'
logger(backend)
logger(user)
logger(response
|
)
if backend.name == 'facebook':
print backend
if backend.name == 'twitter':
print backend
|
Alzon/senlin
|
senlin/tests/unit/engine/service/test_triggers.py
|
Python
|
apache-2.0
| 13,447
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_messaging.rpc import dispatcher as rpc
import six
from senlin.common import exception
from senlin.engine import environment
from senlin.engine import parser
from senlin.engine import service
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit import fakes
trigger_spec = """
type: TestTrigger
version: 1.0
rule:
KEY1: a_string
KEY2: 3
"""
class TriggerTest(base.SenlinTestCase):
def setUp(self):
super(TriggerTest, self).setUp()
self.ctx = utils.dummy_context(project='trigger_test_project')
self.eng = service.EngineService('host-a', 'topic-a')
self.eng.init_tgm()
environment.global_env().register_trigger('TestTrigger',
fakes.TestTrigger)
def test_trigger_create(self):
spec = parser.simple_parse(trigger_spec)
result = self.eng.trigger_create(self.ctx, 't-1', spec)
self.assertIsInstance(result, dict)
self.assertIsNotNone(result['id'])
self.assertEqual('t-1', result['name'])
self.assertEqual('TestTrigger', result['type'])
self.assertEqual('', result['desc'])
self.assertEqual('insufficient_data', result['state'])
self.assertEqual('low', result['severity'])
self.assertEqual({}, result['links'])
self.assertEqual(spec, result['spec'])
self.assertEqual(self.ctx.user, result['user'])
self.assertEqual(self.ctx.project, result['project'])
self.assertEqual(self.ctx.domain, result['domain'])
self.assertIsNotNone(result['created_time'])
self.assertIsNone(result['updated_time'])
self.assertIsNone(result['deleted_time'])
def test_trigger_create_with_parameters(self):
spec = parser.simple_parse(trigger_spec)
result = self.eng.trigger_create(self.ctx, 't-1', spec,
description='DESC',
enabled=False,
state='OK',
severity='high')
self.assertEqual(spec, result['spec'])
self.assertEqual('DESC', result['desc'])
self.assertFalse(result['enabled'])
self.assertEqual('OK', result['state'])
self.assertEqual('high', result['severity'])
def test_trigger_create_type_not_found(self):
spec = parser.simple_parse(trigger_spec)
spec['type'] = 'Bogus'
ex = self.assertRaises(rpc.ExpectedException,
self.eng.trigger_create,
self.ctx, 't-1', spec)
self.assertEqual(exception.TriggerTypeNotFound, ex.exc_info[0])
def test_trigger_create_invalid_spec(self):
spec = parser.simple_parse(trigger_spec)
spec['KEY3'] = 'value3'
ex = self.assertRaises(rpc.ExpectedException,
self.eng.trigger_create,
self.ctx, 't-1', spec)
self.assertEqual(exception.SpecValidationFailed, ex.exc_info[0])
def test_trigger_create_failed_validation(self):
spec = parser.simple_parse(trigger_spec)
self.patchobject(fakes.TestTrigger, 'validate',
side_effect=exception.InvalidSpec(message='BOOM'))
ex = self.assertRaises(rpc.ExpectedException,
self.eng.trigger_create,
self.ctx, 't1', spec)
self.assertEqual(exception.InvalidSpec, ex.exc_info[0])
def test_trigger_get(self):
spec = parser.simple_parse(trigger_spec)
t = self.eng.trigger_create(self.ctx, 't-1', spec)
for identity in [t['id'], t['id'][:6], 't-1']:
result = self.eng.trigger_get(self.ctx, identity)
self.assertIsInstance(result, dict)
self.assertEqual(t['id'], result['id'])
ex = self.assertRaises(rpc.ExpectedException,
self.eng.trigger_get, self.ctx, 'Bogus')
self.assertEqual(exception.TriggerNotFound, ex.exc_info[0])
def test_trigger_list(self):
spec = parser.simple_parse(trigger_spec)
t1 = self.eng.trigger_create(self.ctx, 't-1', spec)
t2 = self.eng.trigger_create(self.ctx, 't-2', spec)
result = self.eng.trigger_list(self.ctx)
self.assertIsInstance(result, list)
names = [t['name'] for t in result]
ids = [t['id'] for t in result]
self.assertIn(t1['name'], names)
self.assertIn(t2['name'], names)
self.assertIn(t1['id'], ids)
self.assertIn(t2['id'], ids)
def test_trigger_list_with_limit_marker(self):
spec = parser.simple_parse(trigger_
|
spec)
t1 = self.eng.trigger_
|
create(self.ctx, 't-1', spec)
t2 = self.eng.trigger_create(self.ctx, 't-2', spec)
result = self.eng.trigger_list(self.ctx, limit=0)
self.assertEqual(0, len(result))
result = self.eng.trigger_list(self.ctx, limit=1)
self.assertEqual(1, len(result))
result = self.eng.trigger_list(self.ctx, limit=2)
self.assertEqual(2, len(result))
result = self.eng.trigger_list(self.ctx, limit=3)
self.assertEqual(2, len(result))
result = self.eng.trigger_list(self.ctx, marker=t1['id'])
self.assertEqual(1, len(result))
result = self.eng.trigger_list(self.ctx, marker=t2['id'])
self.assertEqual(0, len(result))
self.eng.trigger_create(self.ctx, 't-3', spec)
result = self.eng.trigger_list(self.ctx, limit=1, marker=t1['id'])
self.assertEqual(1, len(result))
result = self.eng.trigger_list(self.ctx, limit=2, marker=t1['id'])
self.assertEqual(2, len(result))
def test_trigger_list_with_sort_keys(self):
spec = parser.simple_parse(trigger_spec)
t1 = self.eng.trigger_create(self.ctx, 'TB', spec, severity='low')
t2 = self.eng.trigger_create(self.ctx, 'TA', spec, severity='low')
t3 = self.eng.trigger_create(self.ctx, 'TC', spec, severity='high')
# default by created_time
result = self.eng.trigger_list(self.ctx)
self.assertEqual(t1['id'], result[0]['id'])
self.assertEqual(t2['id'], result[1]['id'])
# use name for sorting
result = self.eng.trigger_list(self.ctx, sort_keys=['name'])
self.assertEqual(t2['id'], result[0]['id'])
self.assertEqual(t1['id'], result[1]['id'])
# use permission for sorting
result = self.eng.trigger_list(self.ctx, sort_keys=['severity'])
self.assertEqual(t3['id'], result[0]['id'])
# use name and permission for sorting
result = self.eng.trigger_list(self.ctx,
sort_keys=['severity', 'name'])
self.assertEqual(t3['id'], result[0]['id'])
self.assertEqual(t2['id'], result[1]['id'])
self.assertEqual(t1['id'], result[2]['id'])
# unknown keys will be ignored
result = self.eng.trigger_list(self.ctx, sort_keys=['duang'])
self.assertIsNotNone(result)
def test_trigger_list_with_sort_dir(self):
spec = parser.simple_parse(trigger_spec)
t1 = self.eng.trigger_create(self.ctx, 'TB', spec, severity='low')
t2 = self.eng.trigger_create(self.ctx, 'TA', spec, severity='low')
t3 = self.eng.trigger_create(self.ctx, 'TC', spec, severity='high')
# default by created_time, ascending
result = self.eng.trigger_list(self.ctx)
self.assertEqual(t1['id'], result[0]['id'])
self.assertEqual
|
alexey4petrov/pythonFlu
|
Foam/helper.py
|
Python
|
gpl-3.0
| 2,152
| 0.032063
|
## pythonFlu - Python wrapping for OpenFOAM C++ API
## Copyright (C) 2010- Alexey Petrov
## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR)
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## See http://sourceforge.net/projects/pythonflu
##
## Author : Alexey PETROV, Andrey SIMURZIN
##
#--------------------------------------------------------------------------------------
class TLoadHelper( object ):
def __init__( self, the_dict ):
self._dict = the_dict
pass
def __getattr__( self, the_attr ):
if not self._dict.has_key( the_attr ):
raise AttributeError( "There is no \"%s\" attribute " %the_attr)
pass
a_result = self._dict[ the_attr ]
if type( a_result ) == str:
an_interface =a_result.split('.')[ -1
|
]
#print an_interface
an_interface_path = ".".join( a_result.split( "." )[ :-1 ] )
#print an_interface_path
exe
|
c "from %s import %s as a_result" %( an_interface_path, an_interface )
pass
exec "self.%s = a_result" %the_attr
return a_result
pass
#--------------------------------------------------------------------------------------
class TManLoadHelper( TLoadHelper ):
def __call__( self, theExpr, theDeps ):
result = theExpr.holder( theDeps )
theExpr.this.disown()
return result
pass
pass
#--------------------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.