repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
googleapis/python-dialogflow | samples/generated_samples/dialogflow_generated_dialogflow_v2beta1_participants_list_suggestions_async.py | Python | apache-2.0 | 1,546 | 0.00194 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListSuggestions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install goo | gle-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2beta1_Participants_ListSuggestions_async]
from google.cloud import dialogflow_v2beta1
async def sample_list_suggestions():
# Create a client
client = dialogflow_v2beta1.ParticipantsAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2beta1.ListSuggestionsRequest(
)
# Make the reque | st
page_result = client.list_suggestions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2beta1_Participants_ListSuggestions_async]
|
sandialabs/BioCompoundML | bcml/Parser/read_training.py | Python | bsd-3-clause | 2,349 | 0 | """
This process takes in the training dataset and outputs
a data structure that includes the name of the molecule,
the predictor, and the CAS number
Attributes:
input_file (str): This is the training file that
is read by the output
Instance (class): This is a private class which
structures each instance
Model (class): This is a public class with the
total structure of the set
"""
class Read(object):
"""This file reads a training file"""
def _read_file(self):
header = {}
compounds = []
predictors = []
weights = []
predictor = False
with open(self.input_file) as fb:
line = fb.readline()
if line.startswith('#'):
line = line.replace("#", "")
head = line.strip().split('\t')
for count, item in enumerate(head):
header[count] = item.rstrip()
else:
header[0] = 'Name'
header[1] = 'Predictor'
self.predictor = header[1]
header[2] = 'CAS'
for line in fb:
larray = line.strip().split('\t')
compound = {}
if self.user:
compound['userhash'] = {}
for count, item in enumerate(larray):
if header[co | unt] == self.predictor:
| predictor = item
elif self.weights is True and header[count] == 'Weight':
weight = float(item.rstrip())
elif self.user is True:
compound['userhash'][header[count]] = item.rstrip()
compound[header[count]] = item
compounds.append(compound)
predictors.append(predictor)
if self.weights:
weights.append(weight)
else:
weights.append(1.0)
return (compounds, predictors, weights)
def __init__(self, input_file, predictor=False, user=False, id_name=False,
weights=False):
self.input_file = input_file
self.predictor = predictor
self.user = user
self.id_name = id_name
self.weights = weights
(self.compounds, self.predictors, self.weights) = self._read_file()
|
PicoGeyer/CS-6250-A5_firewall_test | testing-topo.py | Python | mit | 2,069 | 0.006283 | #!/usr/bin/python
"Assignment 5 - This defines a topology for running a firewall. It is not \
necessarily the topology that will be used for grading, so feel free to \
edit and create new topologies and share them."
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost, RemoteController
from mininet.util import custom
from mininet.link import TCLink
from mininet.cli import CLI
cl | ass FWTopo(Topo):
''' Creates the following topoplogy:
e1 e2 e3
| | |
\ | /
firwall (s1)
/ | \
| | |
w1 w2 w3
'''
def __init__(self, cpu=.1, bw=10, delay=None, **params):
super(FWTopo,self).__init__()
# Host in link configura | tion
hconfig = {'cpu': cpu}
lconfig = {'bw': bw, 'delay': delay}
# Create the firewall switch
s1 = self.addSwitch('s1')
# Create East hosts and links)
e1 = self.addHost('e1', **hconfig)
e2 = self.addHost('e2', **hconfig)
e3 = self.addHost('e3', **hconfig)
self.addLink(s1, e1, port1=1, port2=1, **lconfig)
self.addLink(s1, e2, port1=2, port2=1, **lconfig)
self.addLink(s1, e3, port1=3, port2=1, **lconfig)
# Create West hosts and links)
w1 = self.addHost('w1', **hconfig)
w2 = self.addHost('w2', **hconfig)
w3 = self.addHost('w3', **hconfig)
self.addLink(s1, w1, port1=4, port2=1, **lconfig)
self.addLink(s1, w2, port1=5, port2=1, **lconfig)
self.addLink(s1, w3, port1=6, port2=1, **lconfig)
def main():
print "Starting topology"
topo = FWTopo()
net = Mininet(topo=topo, link=TCLink, controller=RemoteController, autoSetMacs=True)
net.start()
try:
from unit_tests import run_tests
raw_input('Unit tests to be run next. Make sure your firewall is running, then press a key')
run_tests(net)
except ImportError:
raise
CLI(net)
if __name__ == '__main__':
main()
|
deka108/mathqa-server | cms/admin.py | Python | apache-2.0 | 1,772 | 0 | """
# Name: cms/admin.py
# Description:
# Created by: Phuc Le-Sanh
# Date Created: N.A
# Last Modified: Nov 21 2016
# Modified by: Phuc Le-Sanh
"""
from django.contrib import admin
from django.contrib | .admin import ModelAdmin
from apiv2.models import *
c | lass QuestionAdmin(ModelAdmin):
fields = ('id', 'content', 'concept', 'is_sample', 'subconcept',
'difficulty_level', 'marks', 'keypoints', 'keywords',
'paper', 'source', 'used_for', 'response_type',
'question_type', 'paper')
empty_value_display = '-empty-'
# from meas_models.models import *
#
#
# class EducationLevelAdmin(ModelAdmin):
# list_display = ('id', 'name', 'description',)
# list_editable = ('name', 'description',)
#
#
# admin.site.register(EducationLevel, EducationLevelAdmin)
#
#
# class SubjectAdmin(ModelAdmin):
# list_display = ('id', 'name', 'description',)
# list_editable = ('name', 'description',)
#
#
# admin.site.register(Subject, SubjectAdmin)
#
#
# class TopicAdmin(ModelAdmin):
# list_display = ('id', 'name', 'description', 'order',)
# list_editable = ('name', 'description', 'order',)
#
#
# admin.site.register(Topic, TopicAdmin)
#
#
# class ConceptAdmin(ModelAdmin):
# list_display = ('id', 'name', 'description',)
# list_editable = ('name', 'description',)
#
#
# admin.site.register(Concept, ConceptAdmin)
#
#
# class TestAdmin(ModelAdmin):
# list_display = ('id', 'name', 'test_type')
# list_editable = ('name', 'test_type')
#
#
# admin.site.register(Test, TestAdmin)
#
#
# class QuestionAdmin(ModelAdmin):
# list_display = ('id', 'content', 'difficulty_level')
# list_editable = ('content', 'difficulty_level')
#
#
# admin.site.register(Question, QuestionAdmin)
|
mwindau/praktikum | v504/oppellog.py | Python | mit | 701 | 0.012839 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
u, i=np.genfromtxt('Rohdaten/Daten_1_5.txt', unpack=True)
i=np.log(i)
u=np.log(u)
def f(u, a, b):
return a * u + b
params, covariance = curve_fit(f, u, i)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '+-', errors[0])
print('b =', params[1], '+-', errors[1])
x_plot = np.linspace(2.2, 5.6)
plt.plot(u, | i, 'rx', label='Messwerte')
plt.plot(x_plot, f(x_plot, params[0], params[1]), 'b-', label='Ausgleichsgerade')
plt.xlabel('log(U / V)')
plt.ylabel('log(I / mA)')
#plt.xlim(8, 300)
#plt.yscale('log')
#plt.xscale('log')
plt.grid()
plt.legend(loc='best')
| plt.savefig('build/oppellog.pdf')
#
|
Telefonica/vaultier-cli | vaultcli/main.py | Python | gpl-3.0 | 37,793 | 0.005716 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Adrián López Tejedor <adrianlzt@gmail.com>
# Óscar García Amor <ogarcia@connectical.com>
#
# Distributed under terms of the GNU GPLv3 license.
from vaultcli.auth import Auth
from vaultcli.client import Client
from vaultcli.config import Config
from vaultcli.secret import Secret
from vaultcli.views import print_tree, print_workspaces, print_vaults, print_cards, print_secrets, print_secret
from vaultcli.helpers import query_yes_no
|
from zipfile import ZipFile, ZIP_DEFLATED
import argparse
import json
import os
import sys
def get_config_file(args):
if args.config:
return | args.config
else:
config_files = [
os.path.join(os.path.expanduser('~'), '.config/vaultcli/vaultcli.conf'),
os.path.join(os.path.expanduser('~'), '.vaultcli.conf')
]
config_file = [file for file in config_files if os.access(file, os.R_OK)]
if config_file == []:
# No config file found, promt to generate default one
if query_yes_no('No config file found. You want create new one?'):
try:
os.makedirs(os.path.join(os.path.expanduser('~'), '.config/vaultcli'), exist_ok=True)
except Exception as e:
err = 'vaultcli cannot create path to new config file.\n{0}'.format(e)
raise SystemExit(err)
config_file = os.path.join(os.path.expanduser('~'), '.config/vaultcli/vaultcli.conf')
config = Config(config_file)
config.set_default('email', 'user@example.com')
config.set_default('server', 'https://example.com')
config.set_default('key','vaultier.key')
msg = 'New config file created in \'{}\'.\nPlease edit it and set your custom parameters.'.format(config_file)
raise SystemExit(msg)
else:
raise SystemExit()
else:
return config_file[0]
def config(args):
# Get config in object
config = Config(get_config_file(args))
if '.' in args.option:
try:
section, option = args.option.split('.')
except ValueError as e:
err = 'You can only specify an \'option\' or \'section.option\'.\n{0}'.format(e)
raise SystemExit(err)
else:
section = 'DEFAULT'
option = args.option
if args.value:
config.set(section, option, args.value)
else:
err = config.get(section, option) if config.get(section, option) else '{0} is not defined in config'.format(args.option)
raise SystemExit(err)
def write_binary_file(file_name, file_contents):
try:
with open(file_name, 'wb') as file:
file.write(file_contents)
except Exception as e:
err = 'vaultcli cannot write file.\n{0}'.format(e)
raise SystemExit(err)
def write_json_file(file_name, file_contents):
try:
with open(file_name, 'w') as file:
json.dump(file_contents, file)
except Exception as e:
err = 'vaultcli cannot write file.\n{0}'.format(e)
raise SystemExit(err)
def configure_client(args):
# Get config in object
config_file = get_config_file(args)
config = Config(config_file)
# Get config vaules from config file
email = config.get_default('email')
server = config.get_default('server')
key = config.get_default('key')
# Check if main values have data
if not email or not server or not key:
err = 'Your config file \'{}\' is invalid, please check it.'.format(config_file)
raise SystemExit(err)
try:
key = open(key, "r").read()
except Exception as e:
err = 'vaultcli have a problem reading your keyfile.\n{0}'.format(e)
raise SystemExit(err)
if args.insecure:
verify = False
else:
if config.get_default('verify') == None:
verify = True
else:
verify = False if config.get_default('verify').lower() == 'false' else config.get_default('verify')
token = Auth(server, email, key, verify).get_token()
return Client(server, token, key, verify)
def import_workspace(args):
try:
with args.file as file:
data = json.load(file)
except Exception as e:
err = 'vaultcli cannot read json file.\n{0}'.format(e)
raise SystemExit(err)
if 'name' in data:
client = configure_client(args)
if args.use_ids:
if 'id' in data:
workspace_data = {
'id': data['id'],
'name': data['name'],
'description': data.get('description')
}
try:
client.set_workspace(data['id'], workspace_data)
except Exception as e:
raise SystemExit(e)
else:
err = 'Cannot use workspace ID because not provided in file'
raise SystemExit(err)
else:
try:
new_workspace = client.add_workspace(data['name'], data.get('description'))
except Exception as e:
raise SystemExit(e)
if 'vaults' in data:
if isinstance(data['vaults'], list):
for vault in data['vaults']:
if 'name' in vault:
if args.use_ids:
if 'id' in vault:
vault_data = {
'id': vault['id'],
'name': vault['name'],
'description': vault.get('description'),
'color': vault.get('color')
}
try:
client.set_vault(vault['id'], vault_data)
except Exception as e:
raise SystemExit(e)
else:
err = 'Cannot use vault ID because not provided in file'
raise SystemExit(err)
else:
try:
new_vault = client.add_vault(new_workspace['workspace']['id'], vault['name'], vault.get('description'), vault.get('color'))
except Exception as e:
raise SystemExit(e)
if 'cards' in vault:
if isinstance(vault['cards'], list):
for card in vault['cards']:
if 'name' in card:
if args.use_ids:
if 'id' in card:
card_data = {
'id': card['id'],
'name': card['name'],
'description': card.get('description')
}
client.set_card(card['id'], card_data)
else:
err = 'Cannot use card ID because not provided in file'
raise SystemExit(err)
else:
try:
new_card = client.add_card(new_vault['id'], card['name'], card.get('description'))
except Exception as e:
raise SystemExit(e)
if 'secrets' in card:
if isinstance(card['s |
Smart-Torvy/torvy-home-assistant | homeassistant/components/cover/demo.py | Python | mit | 5,159 | 0 | """
Demo platform for the cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.cover import CoverDevice
from homeassistant.helpers.event import track_utc_time_change
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo covers."""
add_devices([
DemoCover(hass, 'Kitchen Window'),
DemoCover(hass, 'Hall Window', 10),
DemoCover(hass, 'Living Room Window', 70, 50),
])
class DemoCover(CoverDevice):
"""Representation of a demo cover."""
# pylint: disable=no-self-use, too-many-instance-attributes
def __init__(self, hass, name, position=None, tilt_position=None):
"""Initialize the cover."""
self.hass = hass
self._name = name
self._position = position
self._set_position = None
self._set_tilt_position = None
self._tilt_position = tilt_position
self._closing = True
self._closing_tilt = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._position is not None:
if self.current_cover_position > 0:
return False
else:
return True
else:
return None
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position in (0, None):
return
self._listen_cover()
self._closing = True
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if self._tilt_position in (0, None):
return
self._listen_cover_tilt()
self._closing_tilt = True
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position in (100, None):
return
self._listen_cover()
self._closing = False
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if self._tilt_position in (100, None):
return
self._listen_cover_tilt()
self._closing_tilt = False
def set_cover_position(self, position, **kwargs):
"""Move the cover to a specific position."""
self._set_position = round(position, -1)
if self._position == position:
return
self._listen_cover()
self._closing = position < self._position
def set_cover_tilt_position(self, tilt_position, **kwargs):
"""Move the cover til to a specific position."""
self._set_tilt_position = round(tilt_position, -1)
if self._tilt_position == tilt_position:
return
self._listen_cover_tilt()
self._closing_tilt = tilt_position < self._tilt_position
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self._position is None:
return
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
self._set_position = None
def stop_cover_tilt(self, **kwargs):
"""Stop the cover tilt."""
if self._tilt_position is None:
| return
if self._unsub_listener_cover_tilt is not None:
self._unsub_listener_cover_tilt()
self._unsub_listener_cover_tilt = No | ne
self._set_tilt_position = None
def _listen_cover(self):
"""Listen for changes in cover."""
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._time_changed_cover)
def _time_changed_cover(self, now):
"""Track time changes."""
if self._closing:
self._position -= 10
else:
self._position += 10
if self._position in (100, 0, self._set_position):
self.stop_cover()
self.update_ha_state()
def _listen_cover_tilt(self):
"""Listen for changes in cover tilt."""
if self._unsub_listener_cover_tilt is None:
self._unsub_listener_cover_tilt = track_utc_time_change(
self.hass, self._time_changed_cover_tilt)
def _time_changed_cover_tilt(self, now):
"""Track time changes."""
if self._closing_tilt:
self._tilt_position -= 10
else:
self._tilt_position += 10
if self._tilt_position in (100, 0, self._set_tilt_position):
self.stop_cover_tilt()
self.update_ha_state()
|
plxaye/chromium | src/chrome/test/functional/media/pyauto_media.py | Python | apache-2.0 | 1,125 | 0.007111 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""PyAuto media test base. Handles PyAuto initialization and path setup.
Required to ensure each media test can load the appropriate libraries. Each
test must include this snippet:
# This should be at the top
import pyauto_media
<test code>
# This should be at the bottom.
if __name__ == '__main__':
pyauto_media.Main()
"""
import os
import sys
def _SetupPaths():
"""Add paths required for loading PyAuto and other utilities to sys.path."""
media_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(media_dir)
sys.path.append(os.path.normpath(os.path.join(media_dir, os.pardir)))
# Add psutil library path.
# TO | DO(dalecurtis): This should only be added for t | ests which use psutil.
sys.path.append(os.path.normpath(os.path.join(
media_dir, os.pardir, os.pardir, os.pardir, os.pardir,
'third_party', 'psutil')))
_SetupPaths()
import pyauto_functional
Main = pyauto_functional.Main
|
bremond/siconos | Build/tools/publish.py | Python | apache-2.0 | 6,107 | 0.000819 | #!/usr/bin/env python
# documentation publication on http://siconos.gforge.inria.fr
# ./publish [- | r <sha>] [-u <gforge_user>] [-s <src dir ] [-b <build dir>] \
# [-w workdir] [-m]
# -r followed by some git sha
# -u followed by gforge login
# -s followed by src directory
# -b followed by build directory
# -d : publish devel version
# example:
# to update site with current documentation
# ./publish -u bremond -b \
# /scratch/maurice/build/maurice/siconos/master/Release -s ~/src/git/siconos
# to update site with 3.5.x (=rev 3194 | ) documentation
# ./publish -r3194 -u bremond [...]
# Note: some rsync error may occurs due to some files modes on remote site
import sys
import os
import shutil
import tempfile
import re
from subprocess import check_call
from getpass import getuser
from getopt import gnu_getopt, GetoptError
#
# exit only if not imported
#
def stop(n):
import __main__ as main
if hasattr(main, '__file__'):
sys.exit(n)
else:
raise Exception('stop', n)
# a tempdir class to be used like 'with TempDir() as tmpdir:'
# i.e. the temp directory is erased ad the end of the block
class WorkDir():
def __init__(self, prefix, tmp=False):
self.name = None
self.prefix = prefix
self.tmp = tmp
def __enter__(self):
# just create prefix
if not os.path.exists(self.prefix):
os.makedirs(self.prefix)
# The user of mkdtemp() is responsible for deleting the
# temporary directory and its contents when done with it.
if self.tmp:
self.name = tempfile.mkdtemp(prefix=self.prefix)
return self.name
else:
return self.prefix
def __exit__(self, xtype, value, traceback):
# So we remove directory here
if self.tmp:
shutil.rmtree(self.name)
else:
pass
devel = False
user = getuser()
srcdir = None
builddir = None
revision = 'HEAD'
main_doc = False
workdir_path = '/tmp/{0}/publish'.format(getuser())
try:
opts, args = gnu_getopt(sys.argv[1:], 'r:u:s:b:w:dm', ['devel',
'main',
'revision=',
'user=',
'srcdir=',
'builddir=',
'workdir='])
for o, a in opts:
if o in ['-m', '--main']:
main_doc = True
if o in ['-r', '--revision']:
revision = a
elif o in ['-u', '--user']:
user = a
elif o in ['-d', '--devel']:
devel = True
elif o in ['-s', '--srcdir']:
srcdir = a
elif o in ['-b', '--builddir']:
builddir = a
elif o in ['-w', '--workdir']:
workdir_path = a
except GetoptError, err:
# print help information and exit:
sys.stderr.write(str(err)) # will print something like 'option
# -a not recognized'
stop(2)
def get_version(path):
with open(os.path.join(path, 'cmake', 'SiconosVersion.cmake')) as\
cmakefile:
cmakefile_as_str = cmakefile.read()
majorm = re.findall(r'MAJOR_VERSION (\w+).*', cmakefile_as_str)
minorm = re.findall(r'MINOR_VERSION (\w+).*', cmakefile_as_str)
patchm = re.findall(r'PATCH_VERSION (\w+).*', cmakefile_as_str)
if len(majorm) > 0:
return '{0}.{1}.{2}'.format(majorm[0],
minorm[0],
patchm[0])
else:
return None
with WorkDir(workdir_path) as workdir:
if builddir is None:
builddir = os.path.join(workdir, 'build')
try:
os.mkdir(builddir)
except OSError:
pass
if srcdir is None:
bsrcdir = os.path.join(workdir, 'src')
srcdir = os.path.join(workdir, 'src', 'siconos')
try:
os.mkdir(bsrcdir)
except OSError:
pass
try:
os.mkdir(srcdir)
except OSError:
pass
# get sources
try:
check_call(['git', 'clone', 'git@github.com:siconos/siconos.git'],
cwd=bsrcdir)
except:
pass
else:
bsrcdir = os.path.dirname(srcdir)
check_call(['git', 'checkout', revision], cwd=srcdir)
if not devel:
version = get_version(srcdir)
else:
version = 'devel'
assert(version is not None)
# make documentation
check_call(['cmake', srcdir, '-DWITH_DOCUMENTATION=TRUE'] +
['-DWITH_{0}_DOCUMENTATION=TRUE'.format(m) for m in
['numerics', 'kernel', 'control', 'mechanics', 'io']],
cwd=builddir)
check_call(['make', 'doc'], cwd=builddir)
# second pass for make doc
check_call(['make', 'doc'], cwd=builddir)
generated_doc_path = os.path.join(builddir, 'Docs', 'build', 'html')
# change local modes
for root, dirs, files in os.walk(generated_doc_path):
for d in dirs:
os.chmod(os.path.join(root, d), 0o775)
for f in files:
os.chmod(os.path.join(root, f), 0o664)
os.chmod(generated_doc_path, 0o775)
doc_path = '{0}@scm.gforge.inria.fr:/home/groups/siconos/htdocs'.\
format(user)
destination = os.path.join(doc_path, version)
# upload
check_call(['rsync', '-rlvp', '-e', 'ssh -o "StrictHostKeyChecking no"',
generated_doc_path, destination])
# htaccess if this is the main documentation
if main_doc:
htaccess_filename = os.path.join(workdir, '.htaccess')
with open(htaccess_filename, 'w') as htaccess:
htaccess.write(
'redirect 301 /index.html http://siconos.gforge.inria.fr/{0}/html/index.html\n'.\
format(version))
check_call(['rsync', htaccess_filename, doc_path])
|
RaoUmer/distarray | docs/sphinx/source/conf.py | Python | bsd-3-clause | 8,831 | 0.006455 | # -*- coding: utf-8 -*-
#
# DistArray documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 31 01:11:34 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
try:
from distarray.__version__ import __version__, __short_version__
except ImportError:
__version__ = __short_version__ = "<unknown>"
# Mock out difficult imports so readthedocs autodoc works
import mock
MOCK_MODULES = ['mpi4py', 'h5py', 'zmq',
'matplotlib',
'matplotlib.pyplot',
'numpy',
'numpy.lib',
'numpy.lib.format',
'numpy.lib.utils',
'numpy.compat',
'IPython',
'IPython.parallel',
'numbers',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.MagicMock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../distarray'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax',
'sphinx.ext.autosummary', 'sphinx.ext.napoleon',
'sphinxcontrib.programoutput']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DistArray'
copyright = u'2008-2014, IPython Development Team and Enthought, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __short_version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_pref | ix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_th | eme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/www/output/images/distarray-logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DistArraydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DistArray.tex', u'DistArray Documentation',
u'IPython Development Team and Enthought, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'distarray', u'DistArray Documentation',
[u'IPython Development Team and Enthought, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------- |
dldinternet/aws-cfn-resource-bridge | setup.py | Python | apache-2.0 | 3,161 | 0.001582 | #!/usr/bin/env python
#==============================================================================
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
import sys
from distutils.core import setup, Distribution
from aws.cfn import bridge
name = 'aws-cfn-resource-bridge'
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
print >> sy | s.stderr, "Python 2.6+ is required"
sys.exit(1)
rpm_requires = ['python >= 2.6', 'python-daemon', 'python-botocore >= 0.17.0']
dependencies = ['python-daemon>=1.5.2', 'botocore>=0.17.0']
if sys.version_info[:2] == | (2, 6):
# For python2.6 we have to require argparse
rpm_requires.append('python-argparse >= 1.1')
dependencies.append('argparse>=1.1')
_opts = {
'build_scripts': {'executable': '/usr/bin/env python'},
'bdist_rpm': {'requires': rpm_requires}
}
_data_files = [('share/doc/%s-%s' % (name, bridge.__version__), ['NOTICE.txt', 'LICENSE']),
('init/redhat', ['init/redhat/cfn-resource-bridge']),
('init/ubuntu', ['init/ubuntu/cfn-resource-bridge'])]
try:
import py2exe
_opts['py2exe'] = {
# TODO: Need to update this for this package
'typelibs': [('{000C1092-0000-0000-C000-000000000046}', 1033, 1, 0),
('{E34CB9F1-C7F7-424C-BE29-027DCC09363A}', 0, 1, 0)],
'excludes': ['certifi', 'pyreadline', 'difflib', 'distutils', 'doctest', 'pdb', 'inspect', 'unittest',
'adodbapi'],
'includes': ['chardet', 'dbhash', 'dumbdbm'],
'dll_excludes': ['msvcr71.dll', 'w9xpopen.exe', ''],
'compressed': True,
'com_server': [],
'ctypes_com_server': [],
'service': ["aws.cfn.bridge.winbridge"],
'isapi': [],
'windows': [],
'zipfile': 'library.zip',
'console': ['bin/cfn-resource-bridge']
}
_data_files = [('', ['license/win/NOTICE.txt', 'license/win/LICENSE.rtf'])]
except ImportError:
pass
setup_options = dict(
name=name,
version=bridge.__version__,
description='A custom resource framework for AWS CloudFormation',
long_description=open('README.md').read(),
author='AWS CloudFormation',
url='http://aws.amazon.com/cloudformation/',
license='Apache License 2.0',
scripts=['bin/cfn-resource-bridge'],
classifiers=[],
packages=[
'aws',
'aws.cfn',
'aws.cfn.bridge'
],
install_requires=dependencies,
data_files=_data_files,
options=_opts
)
setup(**setup_options)
|
timpalpant/calibre | src/calibre/gui2/convert/single.py | Python | gpl-3.0 | 12,211 | 0.004586 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import cPickle, shutil
from PyQt5.Qt import QAbstractListModel, Qt, QFont, QModelIndex, QDialog, QCoreApplication, QSize
from calibre.gui2 import gprefs
from calibre.ebooks.conversion.config import (GuiRecommendations, save_specifics,
load_specifics)
from calibre.gui2.convert.single_ui import Ui_Dialog
from calibre.gui2.convert.metadata import MetadataWidget
from calibre.gui2.convert.look_and_feel import LookAndFeelWidget
from calibre.gui2.convert.heuristics import HeuristicsWidget
from calibre.gui2.convert.search_and_replace import SearchAndReplaceWidget
from calibre.gui2.convert.page_setup import PageSetupWidget
from calibre.gui2.convert.structure_detection import StructureDetectionWidget
from calibre.gui2.convert.toc import TOCWidget
from calibre.gui2.convert.debug import DebugWidget
from calibre.ebooks.conversion.plumber import (Plumber,
supported_input_formats, ARCHIVE_FMTS)
from calibre.ebooks.conversion.config import delete_specifics
from calibre.customize.ui import available_output_formats
from calibre.customize.conversion import OptionRecommendation
from calibre.utils.config import prefs, tweaks
from calibre.utils.logging import Log
class NoSupportedInputFormats(Exception):
def __init__(self, available_formats):
Exception.__init__(self)
self.available_formats = available_formats
def sort_formats_by_preference(formats, prefs):
uprefs = [x.upper() for x in prefs]
def key(x):
try:
return uprefs.index(x.upper())
except ValueError:
pass
return len(prefs)
return sorted(formats, key=key)
def get_output_formats(preferred_output_format):
all_formats = {x.upper() for x in available_output_formats()}
all_formats.discard('OEB')
pfo = preferred_output_format.upper() if preferred_output_format else ''
restrict = tweaks['restrict_output_formats']
if restrict:
fmts = [x.upper() for x in restrict]
if pfo and pfo not in fmts and pfo in all_formats:
fmts.append(pfo)
else:
fmts = list(sorted(all_formats,
key=lambda x:{'EPUB':'!A', 'MOBI':'!B'}.get(x.upper(), x)))
return fmts
class GroupModel(QAbstractListModel):
def __init__(self, widgets):
self.widgets = widgets
QAbstractListModel.__init__(self)
def rowCount(self, *args):
return len(self.widgets)
def data(self, index, role):
try:
widget = self.widgets[index.row()]
except:
return None
if role == Qt.DisplayRole:
return (widget.config_title())
if role == Qt.DecorationRole:
return (widget.config_icon())
if role == Qt.FontRole:
f = QFont()
f.setBold(True)
return (f)
return None
def get_preferred_input_format_for_book(db, book_id):
recs = load_specifics(db, book_id)
if recs:
return recs.get('gui_preferred_input_format', None)
def get_available_formats_for_book(db, book_id):
available_formats = db.formats(book_id, index_is_id=True)
if not available_formats:
available_formats = ''
return set([x.lower() for x in
available_formats.split(',')])
def get_supported_input_formats_for_book(db, book_id):
available_formats = get_available_formats_for_book(db, book_id)
input_formats = set([x.lower() for x in supported_input_formats()])
input_formats = sorted(available_formats.intersection(input_formats))
if not input_formats:
raise NoSupportedInputFormats(tuple(x for x in available_formats if x))
return input_formats
def get_input_format_for_book(db, book_id, pref):
'''
Return (preferred input format, list of available formats) for the book
identified by book_id. Raises an error if the book has no input formats.
:param pref: If None, the format used as input for the last conversion, if
any, on this book is used. If not None, should be a lowercase format like
'epub' or 'mobi'. If you do not want the last converted format to be used,
set pref=False.
'''
if pref is None:
pref = get_preferred_input_format_for_book(db, book_id)
if hasattr(pref, 'lower'):
pref = pref.lower()
input_formats = get_supported_input_formats_for_book(db, book_id)
input_format = pref if pref in input_formats else \
sort_formats_by_preference(input_formats, prefs['input_format_order'])[0]
return input_format, input_formats
class Config(QDialog, Ui_Dialog):
'''
Configuration dialog for single book conversion. If accepted, has the
following important attributes
output_format - Output format (without a leading .)
input_format - Input format (without a leading .)
opf_path - Path to OPF file with user specified metadata
cover_path - Path to user specified cover (can be None)
recommendations - A pickled list of 3 tuples in the same format as the
recommendations member of the Input/Output plugins.
'''
def __init__(self, parent, db, book_id,
preferred_input_format=None, preferred_output_format=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.opt_individual_saved_settings.setVisible(False)
self.db, self.book_id = db, book_id
self.setup_input_output_formats(self.db, self.book_id, preferred_input_format,
preferred_output_format)
self.setup_pipeline()
self.input_formats.currentIndexChanged[str].connect(self.setup_pipeline)
self.output_formats.currentIndexChanged[str].connect(self.setup_pipeline)
self.groups.activated[(QModelIndex)].connect(self.show_pane)
self.groups.clicked[(QModelIndex)].connect(self.show_pane)
self.groups.entered[(QModelIndex)].connect(self.show_group_help)
rb = self.buttonBox.button(self.buttonBox.RestoreDefaults)
rb.setText(_('Restore &Defaults'))
rb.clicked.connect(self.restore_defaults)
self.groups.setMouseTracking(True)
geom = gprefs.get('convert_single_dialog_geom', None) |
if geom:
self.restoreGeometry(geom)
else:
self.resize(self.sizeHint())
def sizeHint(self):
desktop = QCoreApplication.instance().desktop()
geom = desktop.availableGeometry(self)
nh, nw = max(300, geom.height()-50), max(400, geom.width()-70)
return QSize(nw, nh)
def restore_defaults(self):
delete_specifics(self.db, self.book_id)
self.setup_pipeline()
@property
def input_form | at(self):
return unicode(self.input_formats.currentText()).lower()
@property
def output_format(self):
return unicode(self.output_formats.currentText()).lower()
@property
def manually_fine_tune_toc(self):
for i in xrange(self.stack.count()):
w = self.stack.widget(i)
if hasattr(w, 'manually_fine_tune_toc'):
return w.manually_fine_tune_toc.isChecked()
def setup_pipeline(self, *args):
oidx = self.groups.currentIndex().row()
input_format = self.input_format
output_format = self.output_format
output_path = 'dummy.'+output_format
log = Log()
log.outputs = []
input_file = 'dummy.'+input_format
if input_format in ARCHIVE_FMTS:
input_file = 'dummy.html'
self.plumber = Plumber(input_file, output_path, log)
def widget_factory(cls):
return cls(self.stack, self.plumber.get_option_by_name,
self.plumber.get_option_help, self.db, self.book_id)
self.mw = widget_factory(MetadataWidget)
self.setWindowTitle(_('Convert')+ ' ' + unicode(self.mw.title.text()))
lf = widget_factory(LookAndFeelWidget)
hw = widget_factory(HeuristicsWidget)
sr = widget_factory(SearchAndReplaceWidget)
ps = widget_factory(PageS |
tacitia/ThoughtFlow | project/project/settings.py | Python | mit | 4,859 | 0.00247 | # !/usr/bin/python
import os
# CUSTOM PATH SETTINGS
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
REPO_DIR = os.path.realpath(os.path.join(PROJECT_DIR, '..'))
HOME_DIR = os.path.realpath(os.path.join(REPO_DIR, '..'))
STATIC_DIR = os.path.realpath(os.path.join(HOME_DIR, 'staticfiles'))
MEDIA_DIR = os.path.realpath(os.path.join(HOME_DIR, 'media'))
# Generate a secret_key at: http://www.miniwebtool.com/django-secret-key-generator/
SECRET_KEY = '4pj+-=v^pwzo$dy=gde=6^xkvy*(f5azz*fn^^&dzt8f6f9d%9'
DEBUG = True
ADMINS = [('Hua', 'lillian.g621@gmail.com')]
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third part
'rest_framework',
'rest_framework_swagger',
'autoslug',
'django_seo_js',
# Apps
'core',
'authentication',
'logger',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
)
# Django SEO JS settings
MIDDLEWARE_CLASSES = (
'django_seo_js.middleware.HashBangMiddleware', # If you're using #!
'django_seo_js.middleware.UserAgentMiddleware', # If you want to detect by user agent
) + MIDDLEWARE_CLASSES
SEO_JS_ENABLED = True
SEO_JS_BACKEND = 'django_seo_js.backends.PrerenderHosted'
SEO_JS_PRERENDER_URL = 'http://localhost:8555/' # Note trailing slash.
SEO_JS_PRERENDER_RECACHE_URL = 'http://localhost:8555/recache'
SEO_JS_USER_AGENTS = [
'Googlebot',
'Yahoo',
'bingbot',
'Badiu',
'Ask Jeeves',
'baiduspider',
'twitterbot',
'facebookexternalhit',
'rogerbot',
'linkedinbot',
'embedly',
'quora link preview',
'showyoubot',
'outbrain',
'pinterest',
'slackbot'
]
TEMPLATES = [
{
'BACKEND': | 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
| 'context_processors':
(
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
}
},
]
SWAGGER_SETTINGS = {
"exclude_namespaces": [], # List URL namespaces to ignore
"api_version": '1.0', # Specify your API's version
"api_path": "", # Specify the path to your API not a root level
"enabled_methods": [ # Specify which methods to enable in Swagger UI
'get',
'post',
'put',
'patch',
'delete'
],
"api_key": '', # An API key
"is_authenticated": True, # Set to True to enforce user authentication,
"is_superuser": True, # Set to True to enforce admin only access
}
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ng_thoughtnet',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '/Applications/MAMP/tmp/mysql/mysql.sock',
'PORT': '5432',
},
'sqlite3': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Staticfiles settings
STATIC_ROOT = STATIC_DIR
STATIC_URL = "/static/"
STATICFILES_DIRS = (
os.path.join(REPO_DIR, "staticfiles"),
os.path.join(REPO_DIR, "bower_components"),
)
# Media settings
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = "/media/"
AUTH_USER_MODEL = 'authentication.Account'
|
EducatedMachine/pysprintly | api/items.py | Python | mit | 133 | 0 | class Items:
def __init__(self, connection, cache_values=True):
| self.conn = connection
self.cache_values = Tr | ue
|
amw2104/fireplace | tests/test_league.py | Python | agpl-3.0 | 15,470 | 0.028636 | from utils import *
def test_ancient_shade():
game = prepare_empty_game()
shade = game.player1.give("LOE_110")
assert len(game.player1.deck) == 0
shade.play()
assert len(game.player1.deck) == 1
assert game.player1.deck[0].id == "LOE_110t"
game.end_turn()
assert game.player1.hero.health == 30
game.end_turn()
assert game.player1.hero.health == 30 - 7
def test_anubisath_sentinel():
game = prepare_game()
wisp = game.player2.summon(WISP)
sentinel1 = game.player1.give("LOE_061")
sentinel1.play()
sentinel2 = game.player1.give("LOE | _061")
sentinel2.play()
game.end_turn(); game.end_turn()
assert sentinel2.atk == sentinel2.health == 4
game.player1.give("CS2_029").play(target=sentinel1)
assert sentinel2.atk == sentinel2.health == 4 + 3
assert wisp.atk == wisp.health == 1
game.player1.give("CS2_029").play(target=sentinel2)
assert wisp.atk == wisp.health == 1
def test_anyfin_can_happen():
g | ame = prepare_game()
# kill a Wisp
wisp = game.player1.give(WISP)
wisp.play()
game.player1.give(MOONFIRE).play(target=wisp)
game.end_turn(); game.end_turn()
assert len(game.player1.field) == 0
assert len(game.player2.field) == 0
game.player1.give("LOE_026").play()
assert len(game.player1.field) == 0
assert len(game.player2.field) == 0
game.end_turn()
# kill a single Murloc twice
murloc = game.player2.give(MURLOC)
murloc.play()
game.player2.give(MOONFIRE).play(target=murloc)
game.player2.give("LOE_026").play()
assert len(game.player2.field) == 1
game.player2.field[0].destroy()
game.end_turn()
# kill another 4 Murloc Tinyfins and 1 Murloc Raider
for i in range(4):
murloc = game.player1.give(MURLOC)
murloc.play()
game.player1.give(MOONFIRE).play(target=murloc)
othermurloc = game.player1.give("CS2_168")
othermurloc.play()
game.player1.give(MOONFIRE).play(target=othermurloc)
game.end_turn(); game.end_turn()
assert len(game.player1.field) == 0
game.player1.give("LOE_026").play()
assert len(game.player1.field.filter(id=MURLOC)) == 6
assert len(game.player1.field.filter(id="CS2_168")) == 1
def test_curse_of_rafaam():
game = prepare_game()
game.player2.discard_hand()
assert len(game.player2.hand) == 0
curse = game.player1.give("LOE_007")
curse.play()
assert len(game.player2.hand) == 1
cursed = game.player2.hand[0]
assert cursed.id == "LOE_007t"
assert cursed.immune_to_spellpower
assert game.player2.hero.health == 30
game.end_turn()
assert game.player2.hero.health == 30 - 2
game.player2.give(KOBOLD_GEOMANCER).play()
game.end_turn()
assert game.player2.hero.health == 30 - 2
game.end_turn()
assert game.player2.hero.health == 30 - 2 - 2
cursed.play()
game.end_turn(); game.end_turn()
assert game.player2.hero.health == 30 - 2 - 2
def test_cursed_blade():
game = prepare_game()
blade = game.player1.give("LOE_118")
blade.play()
game.player1.give(MOONFIRE).play(target=game.player1.hero)
assert game.player1.hero.health == 30 - (1*2)
def test_cursed_blade_bolf_ramshield():
game = prepare_game()
blade = game.player1.give("LOE_118")
blade.play()
bolf = game.player1.give("AT_124")
bolf.play()
game.player1.give(MOONFIRE).play(target=game.player1.hero)
assert game.player1.hero.health == 30
assert bolf.damage == 2
def test_desert_camel():
game = prepare_empty_game()
goldshire1 = game.player1.give(GOLDSHIRE_FOOTMAN)
assert goldshire1.cost == 1
goldshire1.shuffle_into_deck()
game.player1.give(WISP).shuffle_into_deck()
game.player2.give(WISP).shuffle_into_deck()
camel1 = game.player1.give("LOE_020")
camel1.play()
assert len(game.player1.field) == 2
assert camel1 in game.player1.field
assert goldshire1 in game.player1.field
assert len(game.player2.field) == 0
game.end_turn(); game.end_turn()
goldshire2 = game.player2.give(GOLDSHIRE_FOOTMAN)
goldshire2.shuffle_into_deck()
camel2 = game.player1.give("LOE_020")
camel2.play()
assert len(game.player2.field) == 1
assert goldshire2 in game.player2.field
def test_djinni_of_zephyrs():
game = prepare_game()
game.player1.discard_hand()
game.player2.discard_hand()
statue = game.player1.give(ANIMATED_STATUE)
statue.play()
djinni = game.player1.give("LOE_053")
djinni.play()
game.player1.give(MOONFIRE).play(target=statue)
assert statue.damage == djinni.damage == 1
pwshield = game.player1.give("CS2_004")
pwshield.play(target=statue)
statue.max_health == 10 + 2
djinni.max_health == 6 + 2
assert len(game.player1.hand) == 1 + 1
# Djinni can trigger on minions that are "dead" (eg. killed by the spell)
naturalize = game.player1.give("EX1_161")
naturalize.play(target=statue)
assert len(game.player2.hand) == 2 + 2
assert statue.dead
assert djinni.dead
def test_djinni_of_zephyrs_untargeted():
game = prepare_game()
game.player1.discard_hand()
djinni = game.player1.give("LOE_053")
djinni.play()
arcaneint = game.player1.give("CS2_023")
arcaneint.play()
assert len(game.player1.hand) == 2
def test_eerie_statue():
game = prepare_game()
statue = game.player1.give("LOE_107")
statue.play()
assert not statue.can_attack()
game.end_turn(); game.end_turn()
assert statue.can_attack()
wisp = game.player1.give(WISP)
wisp.play()
assert statue.cant_attack
assert not statue.can_attack()
game.player1.give(MOONFIRE).play(target=wisp)
assert statue.can_attack()
def test_entomb():
game = prepare_empty_game()
wisp = game.player1.give(WISP)
wisp.play()
game.end_turn()
entomb = game.player2.give("LOE_104")
assert wisp in game.player1.field
assert len(game.player1.field) == 1
assert len(game.player2.deck) == 0
entomb.play(target=wisp)
assert len(game.player1.field) == 0
assert len(game.player2.deck) == 1
assert wisp in game.player2.deck
def test_ethereal_conjurer():
game = prepare_game(MAGE, MAGE)
conjurer = game.player1.give("LOE_003")
conjurer.play()
assert len(game.player1.choice.cards) == 3
for card in game.player1.choice.cards:
assert card.type == CardType.SPELL
# assert card.card_class == CardClass.MAGE # TODO
def test_everyfin_is_awesome():
game = prepare_game()
awesome = game.player1.give("LOE_113")
assert awesome.cost == 7
game.player1.give(MURLOC)
assert awesome.cost == 7
murloc1 = game.player1.give(MURLOC)
murloc1.play()
assert awesome.cost == 6
murloc2 = game.player2.summon(MURLOC)
assert awesome.cost == 6
assert murloc1.atk == murloc1.health == 1
awesome.play()
assert murloc1.buffs
assert murloc1.atk == murloc1.health == 1 + 2
assert not murloc2.buffs
def test_excavated_evil():
game = prepare_empty_game()
evil = game.player1.give("LOE_111")
wisp1 = game.player1.summon(WISP)
wisp2 = game.player2.summon(WISP)
assert len(game.player2.deck) == 0
evil.play()
assert wisp1.dead and wisp2.dead
assert game.player1.hero.health == game.player2.hero.health == 30
assert len(game.player2.deck) == 1
assert game.player2.deck[0].id == "LOE_111"
def test_explorers_hat():
game = prepare_empty_game()
wisp = game.player1.give(WISP).play()
game.player1.give("LOE_105").play(target=wisp)
assert wisp.health == 2
game.end_turn()
assert len(game.player1.hand) == 0
game.player2.give(MOONFIRE).play(target=wisp)
game.player2.give(MOONFIRE).play(target=wisp)
assert wisp.dead
assert len(game.player1.hand) == 1
def test_fossilized_devilsaur():
game = prepare_game()
game.player1.give(WISP).play()
game.player2.summon(CHICKEN)
devilsaur1 = game.player1.give("LOE_073")
devilsaur1.play()
assert not devilsaur1.taunt
game.end_turn(); game.end_turn()
chicken = game.player1.give(CHICKEN)
chicken.play()
devilsaur2 = game.player1.give("LOE_073")
devilsaur2.play()
assert devilsaur2.taunt
def test_gorillabot_a3():
game = prepare_game()
gorillabot1 = game.player1.give("LOE_039")
assert not gorillabot1.powered_up
gorillabot1.play()
assert not game.player1.choice
game.end_turn(); game.end_turn()
assert gorillabot1.race == Race.MECHANICAL
gorillabot2 = game.player1.give("LOE_039")
assert gorillabot2.powered_up
gorillabot2.play()
assert game.player1.choice
assert len(game.player1.choice.cards) == 3
for i in range(3):
assert game.player1.choice.cards[i].race == Race.MECHANICAL
def test_huge_toad():
game = prepare_game()
dummy = game.p |
kontza/sigal | sigal/plugins/copyright.py | Python | mit | 2,076 | 0 | """Plugin which add a copyright to the image.
Settings:
- ``copyright``: the copyright text.
- ``copyright_text_font``: the copyright text font - either system/user
font-name or absolute path to font.ttf file. If no font is specified, or
specified font is not found, the default font is used.
- ``copyright_text_font_size``: the copyright text font-size. If no font is
specified, this setting is ignored.
- ``copyright_text_color``: the copyright text color in a tuple (R, G, B)
with decimal RGB code, e.g. ``(255, 255, 255)`` is white.
- ``copyright_text_position``: the copyright text position in 2 tuple (left,
top). By default text would be positioned at bottom-left corner.
"""
import logging
from PIL import ImageDraw, ImageFont
from sigal import signals
logger = logging.getLogger(__name__)
def add_copyright(img, settings=None):
logger.debug('Adding copyright to %r', img)
draw = ImageDraw.Draw(img)
text = settings['copyright']
font = settings.get('copyright_text_font', None)
font_size = settings.get('copyright_text_font_size', 10)
assert font_size >= 0
color = settings.get('copyright_text_color', (0, 0, 0))
bottom_margin = 3 # bottom margin for text
text_height = bottom_margin + 12 # default text height (of 15)
if font:
try:
font = ImageFont.truetype(font, font_size)
text_height = font.getsize(text)[1] + bottom_margin
except Exception: # load default font in case of any exception
logger.debug("Exception: Couldn't locate font %s, using "
"default font", font)
font = ImageFont.load_default()
else:
font = ImageFont.load_default()
left, top | = settings.get('copyright_text_position',
(5, img.size[1] - text_height))
draw.text((left, top), text, fill=color, font=font)
return img
def register(settings):
if settings.get('copyright'):
signals.img_resized.connect(add_copyright)
else:
logger.warning('Copyright tex | t is not set')
|
XiaodunServerGroup/ddyedx | lms/djangoapps/class_dashboard/dashboard_data.py | Python | agpl-3.0 | 15,860 | 0.003279 | """
Computes the data to display on the Instructor Dashboard
"""
from courseware import models
from django.db.models import Count
from django.utils.translation import ugettext as _
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
def get_problem_grade_distribution(course_id):
"""
Returns the grade distribution per problem for the course
`course_id` the course ID for the course interested in
Output is a dict, where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
"""
# Aggregate query on studentmodule table for grade data for all problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
curr_problem = row['module_state_key']
# Build set of grade distributions for each problem that has student responses
if curr_problem in prob_grade_distrib:
prob_grade_distrib[curr_problem]['grade_distrib'].append((row['grade'], row['count_grade']))
if (prob_grade_distrib[curr_problem]['max_grade'] != row['max_grade']) and \
(prob_grade_distrib[curr_problem]['max_grade'] < row['max_grade']):
prob_grade_distrib[curr_problem]['max_grade'] = row['max_grade']
else:
prob_grade_distrib[curr_problem] = {
'max_grade': row['max_grade'],
'grade_distrib': [(row['grade'], row['count_grade'])]
}
return prob_grade_distrib
def get_sequential_open_distrib(course_id):
"""
Returns the number of students that opened each subsection/sequential of the course
`course_id` the course ID for the course interested in
Outputs a dict mapping the 'module_id' to the number of students that have opened that subsection/sequential.
"""
# Aggregate query on studentmodule table for "opening a subsection" data
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
module_type__exact="sequential",
).values('module_state_key').annotate(count_sequential=Count('module_state_key'))
# Build set of "opened" data for each subsection that has "opened" data
sequential_open_distrib = {}
for row in db_query:
sequential_open_distrib[row['module_state_key']] = row['count_sequential']
return sequential_open_distrib
def get_problem_set_grade_distrib(course_id, problem_set):
"""
Returns the grade distribution for the problems specified in `problem_set`.
`course_id` the course ID for the course interested in
`problem_set` an array of strings representing problem module_id's.
Requests from the database the a count of each grade for each problem in the `problem_set`.
Returns a dict, where the key is the problem 'module_id' and the value is a dict with two parts:
'max_grade' - the maximum grade possible for the course
'grade_distrib' - array of tuples (`grade`,`count`) ordered by `grade`
"""
# Aggregate query on studentmodule table for grade data for set of problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
module_state_key__in=problem_set,
).values(
'module_state_key',
'grade',
'max_grade',
).annotate(count_grade=Count('grade')).order_by('module_state_key', 'grade')
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
if row['module_state_key'] not in prob_grade_distrib:
prob_grade_distrib[row['module_state_key']] = {
'max_grade': 0,
'grade_distrib': [],
}
curr_grade_distrib = prob_grade_distrib[row['module_state_key']]
curr_grade_distrib['grade_distrib'].append((row['grade'], row['count_grade']))
if curr_grade_distrib['max_grade'] < row['max_grade']:
curr_grade_distrib['max_grade'] = row['max_grade']
return prob_grade_distrib
def get_d3_problem_grade_distrib(course_id):
"""
Returns problem grade distribution information for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array of dicts in the order of the sections. Each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
prob_grade_distrib = get_problem_grade_distribution(course_id)
d3_data = []
# Retrieve course object down to problems
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=4)
# Iterate through sections, subsections, units, problems
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_meta | data(section).get('display_name', '')
data = []
c_subsection = 0
fo | r subsection in section.get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
# Student data is at the problem level
if child.location.category == 'problem':
c_problem += 1
stack_data = []
# Construct label to display for this problem
label = "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem)
# Only problems in prob_grade_distrib have had a student submission.
if child.location.url() in prob_grade_distrib:
# Get max_grade, grade_distribution for this problem
problem_info = prob_grade_distrib[child.location.url()]
# Get problem_name for tooltip
problem_name = own_metadata(child).get('display_name', '')
# Compute percent of this grade over max_grade
max_grade = float(problem_info['max_grade'])
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = (grade * 100.0) / max_grade
# Construct tooltip for problem in grade distibution view
tooltip = _("{label} {problem_name} - {count_grade} {students} ({percent:.0f}%: {grade:.0f}/{max_grade:.0f} {questions})").format(
label=label,
problem_name=problem_name,
count_grade=count_grade,
students=_("students"),
percent=percent,
grade=grade,
max_grade=max_grade,
questions=_("questions"),
)
# Construct data to be sent to d3
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
})
problem = {
'xValue': label,
'sta |
ajslater/magritte | test.py | Python | gpl-2.0 | 272 | 0 | import sys
fro | m unittest import TestCase
from magritte.cli import main
class TestConsole(TestCase):
def test_basic(self):
sys.argv += ['-V']
with self.assertRaises(SystemExit) as cm:
main()
|
self.assertEqual(cm.exception.code, 0)
|
garlandkr/ansible | lib/ansible/module_common.py | Python | gpl-3.0 | 6,752 | 0.004295 | # (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# from python and deps
from cStringIO import StringIO
import inspect
import os
import shlex
# from Ansible
from ansible import errors
from ansible import utils
from ansible import constants as C
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
REPLACER_LANG = "\"<<INCLUDE_ANSIBLE_MODULE_LANG>>\""
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
class ModuleReplacer(object):
"""
The Replacer is used to insert chunks of code into modules before
transfer. Rather than doing classical python imports, this allows for more
efficient transfer in a no-bootstrapping scenario by not moving extra files
over the wire, and also takes care of embedding arguments in the transferred
modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
will result in a template evaluation of
{{ include 'basic.py' }}
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
"""
# ******************************************************************************
def __init__(self, strip_comments=False):
this_file = inspect.getfile(inspect.currentframe())
self.snippet_path = os.path | .join(os.path.dirname(this_file), 'module_utils')
self.strip_comments = strip_comments # TODO: implement
# ******************************************************* | ***********************
def slurp(self, path):
if not os.path.exists(path):
raise errors.AnsibleError("imported module support code does not exist at %s" % path)
fd = open(path)
data = fd.read()
fd.close()
return data
def _find_snippet_imports(self, module_data, module_path):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
module_style = 'non_native_want_json'
output = StringIO()
lines = module_data.split('\n')
snippet_names = []
for line in lines:
if line.find(REPLACER) != -1:
output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
snippet_names.append('basic')
elif line.startswith('from ansible.module_utils.'):
tokens=line.split(".")
import_error = False
if len(tokens) != 3:
import_error = True
if line.find(" import *") == -1:
import_error = True
if import_error:
raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
snippet_name = tokens[2].split()[0]
snippet_names.append(snippet_name)
output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
else:
if self.strip_comments and line.startswith("#") or line == '':
pass
output.write(line)
output.write("\n")
if len(snippet_names) > 0 and not 'basic' in snippet_names:
raise errors.AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
return (output.getvalue(), module_style)
# ******************************************************************************
def modify_module(self, module_path, complex_args, module_args, inject):
with open(module_path) as f:
# read in the module source
module_data = f.read()
(module_data, module_style) = self._find_snippet_imports(module_data, module_path)
complex_args_json = utils.jsonify(complex_args)
# We force conversion of module_args to str because module_common calls shlex.split,
# a standard library function that incorrectly handles Unicode input before Python 2.7.3.
try:
encoded_args = repr(module_args.encode('utf-8'))
except UnicodeDecodeError:
encoded_args = repr(module_args)
encoded_lang = repr(C.DEFAULT_MODULE_LANG)
encoded_complex = repr(complex_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_ARGS, encoded_args)
module_data = module_data.replace(REPLACER_LANG, encoded_lang)
module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in inject:
facility = inject['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split("\n")
shebang = None
if lines[0].startswith("#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
module_data = "\n".join(lines)
return (module_data, module_style, shebang)
|
jkreft-usgs/PubsWarehouse_UI | server/setup.py | Python | unlicense | 3,994 | 0.001502 | """
Setuptools configuration for Pubs Warehouse.
This setup script requires that static assets have been built into the
`assets/dist` directory prior to the build.
"""
import os
from setuptools import setup, find_packages
def read_requirements():
"""
Get application requirements from
the requirements.txt file.
:return: portal_ui Python requirements
:rtype: list
"""
with open('requirements.txt', 'r') as req:
requirements = req.readlines()
install_requires = [r.strip() for r in requirements if r.find('git+') != 0]
dependency_links = [r.strip() for r in requirements if r.find('git+') == 0]
return {'install_requires': install_requires, 'dependency_links': dependency_links}
def read(filepath):
"""
Read the contents from a file.
:param str filepath: path to the file to be read
:return: file contents
:rtype: str
"""
with open(filepath, 'r') as file:
content = file.read()
return content
def identify_data_files(data_dirs, exclusions=('.gitignore', '.webassets-cache')):
"""
Recursively introspect the contents of a directory. Once the contents
have been introspected, generate a list directories and sub-directories
with their contents as lists.
Any files listed in exclusions will not be included
as a data file. Please note that the list generated by this function
will override any exclusions defined in MANIFEST.in. This
means that if one specifies a file to be excluded in MANIFEST.in,
but t | his function in | cludes that file as a data file, then it's
going to be in the distributable.
:param list data_dirs: list of tuples each of the form: (`installation directory`, `source directory`)
the installation directory can be None to preserve the source directory's structure in the wheel's data
directory
:param tuple exclusions: tuple of all the files or directories NOT to include as a data file
:return: all contents of the directories as a list of tuples
:rtype: list
"""
directory_data_files = []
for installation_directory, directory_name in data_dirs:
for root, _, files in os.walk(directory_name, followlinks=True):
pathnames = [
os.path.relpath(os.path.join(root, filename))
for filename in files
if not any(ex in os.path.join(root, filename)
for ex in exclusions)
]
if pathnames:
data_install_path = (
installation_directory if installation_directory else os.path.relpath(root).strip('../')
)
data_file_element = (data_install_path, pathnames)
directory_data_files.append(data_file_element)
return directory_data_files
PARSED_REQUIREMENTS = read_requirements()
setup(
name='usgs_flask_pubs_ui',
version='2.26.0dev',
description='USGS Publications Warehouse User Interface',
long_description=read('../README.md'),
author='Mary Bucknell, James Kreft, Andrew Yan',
author_email='mbucknell@usgs.gov, jkreft@usgs.gov, ayan@usgs.gov',
url='https://github.com/USGS-CIDA/PubsWarehouse_UI',
classifiers=[
'Environment :: Web Environment',
'Framework :: Flask',
'License :: Public Domain',
'Programming Language :: Python :: 3.6'
],
packages=find_packages(),
include_package_data=True,
install_requires=PARSED_REQUIREMENTS['install_requires'],
test_suite='nose.collector',
tests_require=PARSED_REQUIREMENTS['install_requires'],
zip_safe=False,
# include the tier agnostic configuration file in the distributable
# the file gets placed in site-packages upon dist installation
py_modules=['config'],
# include static files in the distributable
# they will appear in the root of the virtualenv upon dist installation
data_files=identify_data_files([
(None, 'static')
])
)
|
Azure/azure-sdk-for-python | sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/operations/_tags_operations.py | Python | mit | 5,614 | 0.004097 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
scope: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Consumption/tags')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _S | ERIALIZER. | header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class TagsOperations(object):
"""TagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.consumption.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
scope: str,
**kwargs: Any
) -> Optional["_models.TagsResult"]:
"""Get all available tag keys for the defined scope.
:param scope: The scope associated with tags operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope and
'/providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group
scope..
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagsResult, or the result of cls(response)
:rtype: ~azure.mgmt.consumption.models.TagsResult or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.TagsResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
scope=scope,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TagsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Consumption/tags'} # type: ignore
|
ypu/virt-test | tools/github/cache_populate.py | Python | gpl-2.0 | 718 | 0 | #!/usr/bin/env python
import sys
import os |
import getpass
import datetime
from github import Github
from github_issues import GithubIssues
gh = Github(login_or_token=raw_input("Enter github username: "),
password=getpass.getpass('Enter github password: '),
user_agent='PyGithub/Python')
print "Enter location (<user>/<repo>)",
repo_full_name = 'autotest/virt-test'
repo_full_name = raw_input("or blank for '%s': "
% repo_full_name).stri | p() or repo_full_name
print
issues = GithubIssues(gh, repo_full_name)
for issue in issues:
sys.stdout.write(str(issue['number']) + '\n')
sys.stdout.flush()
# make sure cache is cleaned and saved up
del issues
print
|
MaximeGLegault/StrategyIA | ai/STA/Tactic/RotateAroundPosition.py | Python | mit | 1,168 | 0.003425 | # Under MIT license, see LICENS | E.txt
from typing import List
import numpy as np
from RULEngine.Game.OurPlayer import OurPlayer
from RULEngine.Util.Position import Position
from RULEngine.Util.Pose import Pose
from ai.STA.Tactic.Tactic import Tactic
from ai.STA.Tactic.tactic_constants import Flags
from ai.STA.Action.rotate_around import RotateAround
from ai.states.game_state import GameState
class RotateAroundPosition(Tactic):
def __init__(self, game_state: GameState, pla | yer: OurPlayer, target: Pose, args: List[str]=None):
super().__init__(game_state, player, target, args)
def exec(self):
if self.check_success():
self.status_flag = Flags.SUCCESS
else:
self.status_flag = Flags.WIP
ball = self.game_state.get_ball_position().conv_2_np()
target = self.target.position.conv_2_np()
ball_to_target = target - ball
orientation = np.arctan2(ball_to_target[1], ball_to_target[0])
next_action = RotateAround(self.game_state, self.player, Pose(Position.from_np(ball), orientation), 90)
return next_action.exec()
def check_success(self):
return False
|
blitzmann/Pyfa | eos/db/saveddata/databaseRepair.py | Python | gpl-3.0 | 11,132 | 0.004581 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy.exc import DatabaseError
from logbook import Logger
pyfalog = Logger(__name__)
class DatabaseCleanup(object):
def __init__(self):
pass
@staticmethod
def ExecuteSQLQuery(saveddata_engine, query):
try:
results = saveddata_engine.execute(query)
return results
except DatabaseError:
pyfalog.error("Failed to connect to database or error executing query:\n{0}", query)
return None
@staticmethod
def OrphanedCharacterSkills(saveddata_engine):
# Find orphaned character skills.
# This solves an issue where the character doesn't exist, but skills for that character do.
# See issue #917
pyfalog.debug("Running database cleanup for character skills.")
query = "SELECT COUNT(*) AS num FROM characterSkills WHERE characterID NOT IN (SELECT ID from characters)"
results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
if results is None:
return
row = results.first()
if row and row['num']:
query = "DELETE FROM characterSkills WHERE characterID NOT IN (SELECT ID from characters)"
delete = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
pyfalog.error("Database corruption found. Cleaning up {0} records.", delete.rowcount)
@staticmethod
def OrphanedFitDamagePatterns(saveddata_engine):
# Find orphaned damage patterns.
# This solves an issue where the damage pattern doesn't exist, but fits reference the pattern.
# See issue #777
pyfalog.debug("Running database cleanup for orphaned damage patterns attached to fits.")
query = "SELECT COUN | T(*) AS num FROM fits WHERE damagePatternID NOT IN (SELECT ID FROM damagePatterns) OR damagePatternID IS NULL"
results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
if results is None:
return
row = results.first()
if row and row['num']:
# Get Uniform damage pattern ID
uniform_query = "SELECT ID FROM damagePatterns | WHERE name = 'Uniform'"
uniform_results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, uniform_query)
if uniform_results is None:
return
rows = uniform_results.fetchall()
if len(rows) == 0:
pyfalog.error("Missing uniform damage pattern.")
elif len(rows) > 1:
pyfalog.error("More than one uniform damage pattern found.")
else:
uniform_damage_pattern_id = rows[0]['ID']
update_query = "UPDATE 'fits' SET 'damagePatternID' = {} " \
"WHERE damagePatternID NOT IN (SELECT ID FROM damagePatterns) OR damagePatternID IS NULL".format(uniform_damage_pattern_id)
update_results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, update_query)
pyfalog.error("Database corruption found. Cleaning up {0} records.", update_results.rowcount)
@staticmethod
def OrphanedFitCharacterIDs(saveddata_engine):
# Find orphaned character IDs. This solves an issue where the character doesn't exist, but fits reference the pattern.
pyfalog.debug("Running database cleanup for orphaned characters attached to fits.")
query = "SELECT COUNT(*) AS num FROM fits WHERE characterID NOT IN (SELECT ID FROM characters) OR characterID IS NULL"
results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
if results is None:
return
row = results.first()
if row and row['num']:
# Get All 5 character ID
all5_query = "SELECT ID FROM characters WHERE name = 'All 5'"
all5_results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, all5_query)
if all5_results is None:
return
rows = all5_results.fetchall()
if len(rows) == 0:
pyfalog.error("Missing 'All 5' character.")
elif len(rows) > 1:
pyfalog.error("More than one 'All 5' character found.")
else:
all5_id = rows[0]['ID']
update_query = "UPDATE 'fits' SET 'characterID' = " + str(all5_id) + \
" WHERE characterID not in (select ID from characters) OR characterID IS NULL"
update_results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, update_query)
pyfalog.error("Database corruption found. Cleaning up {0} records.", update_results.rowcount)
@staticmethod
def NullDamagePatternNames(saveddata_engine):
# Find damage patterns that are missing the name.
# This solves an issue where the damage pattern ends up with a name that is null.
# See issue #949
pyfalog.debug("Running database cleanup for missing damage pattern names.")
query = "SELECT COUNT(*) AS num FROM damagePatterns WHERE name IS NULL OR name = ''"
results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
if results is None:
return
row = results.first()
if row and row['num']:
query = "DELETE FROM damagePatterns WHERE name IS NULL OR name = ''"
delete = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
pyfalog.error("Database corruption found. Cleaning up {0} records.", delete.rowcount)
@staticmethod
def NullTargetResistNames(saveddata_engine):
# Find target resists that are missing the name.
# This solves an issue where the target resist ends up with a name that is null.
# See issue #949
pyfalog.debug("Running database cleanup for missing target resist names.")
query = "SELECT COUNT(*) AS num FROM targetResists WHERE name IS NULL OR name = ''"
results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
if results is None:
return
row = results.first()
if row and row['num']:
query = "DELETE FROM targetResists WHERE name IS NULL OR name = ''"
delete = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
pyfalog.error("Database corruption found. Cleaning up {0} records.", delete.rowcount)
@staticmethod
def OrphanedFitIDItemID(saveddata_engine):
# Orphaned items that are missing the fit ID or item ID.
# See issue #954
for table in ['drones', 'cargo', 'fighters']:
pyfalog.debug("Running database cleanup for orphaned {0} items.", table)
query = "SELECT COUNT(*) AS num FROM {} WHERE itemID IS NULL OR itemID = '' or itemID = '0' or fitID IS NULL OR fitID = '' or fitID = '0'".format(
table)
results = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
if results is None:
return
row = results.first()
if row and row['num']:
query = "DELETE FROM {} WHERE itemID IS NULL OR itemID = '' or itemID = '0' or fitID IS NULL OR fitID = '' or fitID = '0'".format(
table)
delete = DatabaseCleanup.ExecuteSQLQuery(saveddata_engine, query)
pyfalo |
BhallaLab/moose | moose-gui/suds/reader.py | Python | gpl-3.0 | 5,246 | 0.000191 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
XML document reader classes providing integration with the | suds library's
caching system.
"""
from suds.cache import Cache, NoCache
from suds.plugin import PluginContainer
from suds.sax.parser import Parser
from suds.store import DocumentStore
from suds.transport import Request
class Reader:
"""
Provides integration with the cache.
@ivar options: An options object.
@type optio | ns: I{Options}
"""
def __init__(self, options):
"""
@param options: An options object.
@type options: I{Options}
"""
self.options = options
self.plugins = PluginContainer(options.plugins)
def mangle(self, name, x):
"""
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
"""
h = abs(hash(name))
return '%s-%s' % (h, x)
class DocumentReader(Reader):
"""
Provides integration between the SAX L{Parser} and the document cache.
"""
def open(self, url):
"""
Open an XML document at the specified I{URL}.
First, the document attempted to be retrieved from the I{object cache}.
If not found, it is downloaded and parsed using the SAX parser. The
result is added to the cache for the next open().
@param url: A document URL.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
"""
cache = self.cache()
id = self.mangle(url, 'document')
d = cache.get(id)
if d is None:
d = self.download(url)
cache.put(id, d)
self.plugins.document.parsed(url=url, document=d.root())
return d
def download(self, url):
"""
Download the document.
@param url: A document URL.
@type url: str.
@return: A file pointer to the document.
@rtype: file-like
"""
content = None
store = self.options.documentStore
if store is not None:
content = store.open(url)
if content is None:
fp = self.options.transport.open(Request(url))
try:
content = fp.read()
finally:
fp.close()
ctx = self.plugins.document.loaded(url=url, document=content)
content = ctx.document
sax = Parser()
return sax.parse(string=content)
def cache(self):
"""
Get the cache.
@return: The I{cache} when I{cachingpolicy} = B{0}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 0:
return self.options.cache
return NoCache()
class DefinitionsReader(Reader):
"""
Provides integration between the WSDL Definitions object and the object
cache.
@ivar fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
def __init__(self, options, fn):
"""
@param options: An options object.
@type options: I{Options}
@param fn: A factory function (constructor) used to create the object
not found in the cache.
@type fn: I{Constructor}
"""
Reader.__init__(self, options)
self.fn = fn
def open(self, url):
"""
Open a WSDL at the specified I{URL}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL URL.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d
def cache(self):
"""
Get the cache.
@return: The I{cache} when I{cachingpolicy} = B{1}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 1:
return self.options.cache
return NoCache()
|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_logging.py | Python | mit | 134,314 | 0.003231 | #!/usr/bin/env python
#
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import select
import socket
import struct
import sys
import tempfile
from test.support import (captured_stdout, run_with_locale, run_unittest,
patch, requires_zlib, TestHandler, Matcher)
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asynchat
import asyncore
import errno
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog
except ImportError:
win32evtlog = None
try:
import win32evtlogutil
except ImportError:
win32evtlogutil = None
win32evtlog = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Set | up the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock() |
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namesp |
dogebuild/dogebuild | dogebuild/dogefile.py | Python | mit | 6,192 | 0.003068 | import logging
import sys
from functools import reduce
from os.p | ath import relpath
from pathlib import Path
from typing import Dict, List, Any
from argparse import ArgumentParser
from dogebuild.common import DOGE_FILE, DirectoryContext, sanitize_name
from dogebuild.dependencies_functions import resolve_dependency_tree
from dogebuild.dogefile_internals.context import Context, ContextHolderGuard
from dogebuild.dogefile_internals.dependencies import Dependency, DogeDependency
class DogeFileLoggerAdapter(logging.LoggerAdapter):
def __init__(self, logger, do | ge_file_id: str):
extra = {"doge_file_id": doge_file_id}
super(DogeFileLoggerAdapter, self).__init__(logger, extra)
self.doge_file_id = doge_file_id
def process(self, msg, kwargs):
return f"{self.doge_file_id}: {msg}", kwargs
class DogeFileFactory:
def __init__(self, root_path: Path):
self.store = {}
self.root_path = root_path.resolve()
def create(self, doge_file: Path) -> "DogeFile":
doge_file = doge_file.resolve()
if doge_file not in self.store:
self.store[doge_file] = DogeFile(doge_file, relpath(doge_file.parent, self.root_path), self)
return self.store[doge_file]
class DogeFile:
def __init__(self, doge_file: Path, doge_file_id: str, factory: DogeFileFactory):
self.doge_file = doge_file
self.directory = self.doge_file.parent
self.doge_file_id = doge_file_id
context = DogeFile._load_doge_file(self.doge_file, doge_file_id)
self.dependencies = context.dependencies
self.test_dependencies = context.test_dependencies
self.relman = context.relman
self.artifacts = {}
self.artifacts_to_publish = context.artifacts_to_publish + reduce(
lambda acc, plugin: acc + plugin.artifacts_to_publish, context.plugins, []
)
self.code_context = context.code_context
self.modules = context.modules
self.parameters = context.parameters
self.processed_tasks = {}
self.logger = DogeFileLoggerAdapter(logging.getLogger(), self.doge_file_id)
self.factory = factory
def extract_parameters(self, options: List[str]):
parser = ArgumentParser()
for param in self.parameters:
parser.add_argument(f"--{param.name}", type=param.parser, default=param.default)
parser.add_argument("tasks", nargs="*")
return parser.parse_intermixed_args(options)
def run_tasks(self, tasks: List[str], parameters: Dict[str, Any]):
for submodule in self.modules:
submodule_doge_file = self.factory.create(self.directory / submodule / DOGE_FILE)
submodule_doge_file.run_tasks(tasks, parameters)
with DirectoryContext(self.directory):
self._resolve_dependencies(parameters)
run_list = self.relman.get_tasks(map(sanitize_name, tasks))
self.logger.info("Run tasks: {}".format(", ".join(map(lambda task: task.canonical_name, run_list))))
for current_task in run_list:
if current_task.canonical_name in self.processed_tasks:
continue
result = current_task.run(self.artifacts, parameters, self.code_context)
if result.error is not None:
self.logger.exception(
f"{current_task.TASK_TYPE} {current_task.canonical_name} failed", exc_info=result.error
)
exit(1)
elif result.exit_code != 0:
self.logger.error(f"{current_task.TASK_TYPE} {current_task.canonical_name} failed")
exit(result.exit_code)
else:
self._add_artifacts(result.artifacts)
self.logger.debug(f"{current_task.TASK_TYPE} {current_task.canonical_name} successfully terminated")
self.processed_tasks[current_task.canonical_name] = result
def _resolve_dependencies(self, parameters: Dict[str, Any]):
for dependency in self.dependencies + self.test_dependencies:
self.logger.info(f"Resolving dependency {dependency}")
dependency.acquire_dependency()
if isinstance(dependency, DogeDependency):
dependency_doge_file = self.factory.create(dependency.get_doge_file_path())
dependency_doge_file.run_tasks(dependency.tasks, parameters)
self._add_artifacts(dependency_doge_file._get_published_artifacts())
else:
self._add_artifacts(dependency.get_artifacts())
def _add_artifacts(self, add: Dict[str, List]) -> None:
for type, artifacts in add.items():
if type in self.artifacts:
self.artifacts[type] += artifacts
else:
self.artifacts[type] = artifacts
def _get_published_artifacts(self) -> Dict[str, List[Path]]:
return {k: v for k, v in self.artifacts.items() if k in self.artifacts_to_publish}
@staticmethod
def _load_doge_file(doge_file: Path, doge_file_id: str) -> Context:
with open(doge_file, "r") as file, ContextHolderGuard(doge_file, doge_file_id) as holder:
code = compile(file.read(), doge_file.name, "exec")
sys.path = [str(doge_file.parent)] + sys.path
exec(code, holder.globals_context)
sys.path = sys.path[1:]
holder.context.verify()
return holder.context
def dependency_tree(self) -> int:
deps, _ = load_doge_file(DOGE_FILE) # noqa
deps = resolve_dependency_tree(deps)
self._print_dependencies(deps)
return 0
def _print_dependencies(self, dependencies: List[Dependency], inner_level: int = 0):
for d in dependencies:
if d.original_version:
print(
" " * (2 * inner_level - 1) + "+" + str(d) + " conflict resolved for {}".format(d.original_version)
)
else:
print(" " * (2 * inner_level - 1) + "+" + str(d))
self._print_dependencies(d.dependencies, inner_level=inner_level + 1)
|
Akrog/cinder | cinder/tests/__init__.py | Python | apache-2.0 | 1,435 | 0.000697 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governin | g permissions and limitations
# under the License.
"""
:mod:`cinder.tests` -- Cinder Unittests
=====================================================
.. automodule:: cinder.tests
:platform: Unix
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@gmail.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: | Andy Smith <andy@anarkystic.com>
"""
import eventlet
eventlet.monkey_patch()
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)
|
salas106/lahorie | lahorie/plugins/parrot.py | Python | mit | 238 | 0.012605 | # -*- coding: utf8 -*-
"""
The ``parrot plugin
====================
Some can of russian parrot, that ac | t like a russian roulette, randomly repeating some sentence.
May enter in berzek mode, but it is very rare.
"""
| |
ajaygarg84/sugar | extensions/deviceicon/speaker.py | Python | gpl-2.0 | 7,404 | 0 | # Copyright (C) 2008 Martin Dengler
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
from gi.repository import GConf
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
f | rom sugar3.graphics import style
from sugar3.graphics.icon import get_icon_state, Icon
from sugar3.graphics.tray import TrayIcon
from sugar3.graphics.palette import Palette
from sugar3.graphics.palettemenu import PaletteMenuBox
from sugar3.graphics.palettemenu import PaletteMenuItem
from | sugar3.graphics.palettemenu import PaletteMenuItemSeparator
from sugar3.graphics.xocolor import XoColor
from jarabe.frame.frameinvoker import FrameWidgetInvoker
from jarabe.model import sound
_ICON_NAME = 'speaker'
class DeviceView(TrayIcon):
FRAME_POSITION_RELATIVE = 103
def __init__(self):
client = GConf.Client.get_default()
self._color = XoColor(client.get_string('/desktop/sugar/user/color'))
TrayIcon.__init__(self, icon_name=_ICON_NAME, xo_color=self._color)
self.set_palette_invoker(FrameWidgetInvoker(self))
self.palette_invoker.props.toggle_palette = True
self._model = DeviceModel()
self._model.connect('notify::level', self.__speaker_status_changed_cb)
self._model.connect('notify::muted', self.__speaker_status_changed_cb)
self.connect('draw', self.__draw_cb)
self._update_info()
def create_palette(self):
label = GLib.markup_escape_text(_('My Speakers'))
palette = SpeakerPalette(label, model=self._model)
palette.set_group_id('frame')
return palette
def _update_info(self):
name = _ICON_NAME
current_level = self._model.props.level
xo_color = self._color
if self._model.props.muted:
name += '-muted'
xo_color = XoColor('%s,%s' % (style.COLOR_WHITE.get_svg(),
style.COLOR_WHITE.get_svg()))
self.icon.props.icon_name = get_icon_state(name, current_level,
step=-1)
self.icon.props.xo_color = xo_color
def __draw_cb(self, *args):
self._update_info()
def __speaker_status_changed_cb(self, pspec_, param_):
self._update_info()
class SpeakerPalette(Palette):
def __init__(self, primary_text, model):
Palette.__init__(self, label=primary_text)
self._model = model
box = PaletteMenuBox()
self.set_content(box)
box.show()
self._mute_item = PaletteMenuItem('')
self._mute_icon = Icon(icon_size=Gtk.IconSize.MENU)
self._mute_item.set_image(self._mute_icon)
box.append_item(self._mute_item)
self._mute_item.show()
self._mute_item.connect('activate', self.__mute_activate_cb)
separator = PaletteMenuItemSeparator()
box.append_item(separator)
separator.show()
vol_step = sound.VOLUME_STEP
self._adjustment = Gtk.Adjustment(value=self._model.props.level,
lower=0,
upper=100 + vol_step,
step_incr=vol_step,
page_incr=vol_step,
page_size=vol_step)
hscale = Gtk.HScale()
hscale.props.draw_value = False
hscale.set_adjustment(self._adjustment)
hscale.set_digits(0)
box.append_item(hscale, vertical_padding=0)
hscale.show()
self._adjustment_handler_id = \
self._adjustment.connect('value_changed',
self.__adjustment_changed_cb)
self._model_notify_level_handler_id = \
self._model.connect('notify::level', self.__level_changed_cb)
self._model.connect('notify::muted', self.__muted_changed_cb)
self.connect('popup', self.__popup_cb)
def _update_muted(self):
if self._model.props.muted:
mute_item_text = _('Unmute')
mute_item_icon_name = 'dialog-ok'
else:
mute_item_text = _('Mute')
mute_item_icon_name = 'dialog-cancel'
self._mute_item.set_label(mute_item_text)
self._mute_icon.props.icon_name = mute_item_icon_name
self._mute_icon.show()
def _update_level(self):
if self._adjustment.props.value != self._model.props.level:
self._adjustment.handler_block(self._adjustment_handler_id)
try:
self._adjustment.props.value = self._model.props.level
finally:
self._adjustment.handler_unblock(self._adjustment_handler_id)
def __adjustment_changed_cb(self, adj_):
self._model.handler_block(self._model_notify_level_handler_id)
try:
self._model.props.level = self._adjustment.props.value
finally:
self._model.handler_unblock(self._model_notify_level_handler_id)
self._model.props.muted = self._adjustment.props.value == 0
def __level_changed_cb(self, pspec_, param_):
self._update_level()
def __mute_activate_cb(self, menuitem_):
self._model.props.muted = not self._model.props.muted
def __muted_changed_cb(self, pspec_, param_):
self._update_muted()
def __popup_cb(self, palette_):
self._update_level()
self._update_muted()
class DeviceModel(GObject.GObject):
__gproperties__ = {
'level': (int, None, None, 0, 100, 0, GObject.PARAM_READWRITE),
'muted': (bool, None, None, False, GObject.PARAM_READWRITE),
}
def __init__(self):
GObject.GObject.__init__(self)
sound.muted_changed.connect(self.__muted_changed_cb)
sound.volume_changed.connect(self.__volume_changed_cb)
def __muted_changed_cb(self, **kwargs):
self.notify('muted')
def __volume_changed_cb(self, **kwargs):
self.notify('level')
def _get_level(self):
return sound.get_volume()
def _set_level(self, new_volume):
sound.set_volume(new_volume)
def _get_muted(self):
return sound.get_muted()
def _set_muted(self, mute):
sound.set_muted(mute)
def get_type(self):
return 'speaker'
def do_get_property(self, pspec):
if pspec.name == 'level':
return self._get_level()
elif pspec.name == 'muted':
return self._get_muted()
def do_set_property(self, pspec, value):
if pspec.name == 'level':
self._set_level(value)
elif pspec.name == 'muted':
self._set_muted(value)
def setup(tray):
tray.add_device(DeviceView())
|
wisechengyi/pants | src/python/pants/backend/python/rules/python_create_binary.py | Python | apache-2.0 | 4,021 | 0.00373 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Optional
from pants.backend.python.rules.pex import Pex
from pants.backend.python.rules.pex_from_target_closure import CreatePexFromTargetClosure
from pants.backend.python.rules.targets import | EntryPoint, PythonBinarySources
from p | ants.backend.python.rules.targets import targets as python_targets
from pants.backend.python.targets.python_binary import PythonBinary
from pants.build_graph.address import Address
from pants.engine.addressable import Addresses
from pants.engine.legacy.structs import PythonBinaryAdaptor
from pants.engine.parser import HydratedStruct
from pants.engine.rules import UnionRule, rule
from pants.engine.selectors import Get
from pants.engine.target import SourcesRequest, SourcesResult, Target, hydrated_struct_to_target
from pants.rules.core.binary import BinaryTarget, CreatedBinary
from pants.rules.core.strip_source_roots import SourceRootStrippedSources, StripSnapshotRequest
# TODO: consider replacing this with sugar like `SelectFields(EntryPoint, PythonBinarySources)` so
# that the rule would request that instead of this dataclass. Note that this syntax must support
# both optional_fields (see the below TODO) and opt-out `SentinelField`s
# (see https://github.com/pantsbuild/pants/pull/9316#issuecomment-600152573).
@dataclass(frozen=True)
class PythonBinaryFields:
address: Address
sources: PythonBinarySources
entry_point: EntryPoint
# TODO: consume the other PythonBinary fields like `ZipSafe`. Consider making those fields
# optional. We _need_ PythonBinarySources and EntryPoint to work properly. If your target
# type also has ZipSafe, AlwaysWriteCache, etc, then we can do some additional things as an
# extra bonus. Consider adding `Target.maybe_get()` to facilitate this.
@staticmethod
def is_valid_target(tgt: Target) -> bool:
return tgt.has_fields([EntryPoint, PythonBinarySources])
@classmethod
def create(cls, tgt: Target) -> "PythonBinaryFields":
return cls(
tgt.address, sources=tgt.get(PythonBinarySources), entry_point=tgt.get(EntryPoint)
)
@rule
async def convert_python_binary_target(adaptor: PythonBinaryAdaptor) -> PythonBinaryFields:
hydrated_struct = await Get[HydratedStruct](Address, adaptor.address)
tgt = hydrated_struct_to_target(hydrated_struct, target_types=python_targets())
return PythonBinaryFields.create(tgt)
@rule
async def create_python_binary(fields: PythonBinaryFields) -> CreatedBinary:
entry_point: Optional[str]
if fields.entry_point.value is not None:
entry_point = fields.entry_point.value
else:
# TODO: rework determine_source_files.py to work with the Target API. It should take the
# Sources AsyncField as input, rather than TargetAdaptor.
sources_result = await Get[SourcesResult](SourcesRequest, fields.sources.request)
stripped_sources = await Get[SourceRootStrippedSources](
StripSnapshotRequest(sources_result.snapshot)
)
source_files = stripped_sources.snapshot.files
# NB: `PythonBinarySources` enforces that we have 0-1 sources.
if len(source_files) == 1:
module_name = source_files[0]
entry_point = PythonBinary.translate_source_path_to_py_module_specifier(module_name)
else:
entry_point = None
request = CreatePexFromTargetClosure(
addresses=Addresses([fields.address]),
entry_point=entry_point,
output_filename=f"{fields.address.target_name}.pex",
)
pex = await Get[Pex](CreatePexFromTargetClosure, request)
return CreatedBinary(digest=pex.directory_digest, binary_name=pex.output_filename)
def rules():
return [
UnionRule(BinaryTarget, PythonBinaryAdaptor),
convert_python_binary_target,
create_python_binary,
]
|
beepscore/google-python-exercises | basic/string_tool.py | Python | apache-2.0 | 2,076 | 0.003372 | #!/usr/bin/env python3 -tt
"""
Utility methods for strings.
"""
class StringTool:
def __init__(self):
pass
def clean_string(self, a_string):
"""
Return string with unwanted punctuation removed.
Currently uses str.replace()
Could make more efficient using regular expressions re sub.
# http://pymotw.com/2/re/
"""
# delete double quote
string_cleaned = a_string.replace('"', '')
# delete unusual quote
string_cleaned = string_cleaned.replace('`', '')
# delete trailing quote. Use space and other punctuation to help recognize it.
string_cleaned = string_cleaned.replace("' ", " ")
string_cleaned = string_cleaned.replace("';", ";")
string_cleaned = string_cleaned.replace("':", ":")
string_cleaned = string_cleaned.replace("',", ",")
string_cleaned = string_cleaned.replace("'.", ".")
string_cleaned = string_cleaned.replace("'?", "?")
string_cleaned = string_cleaned.replace("'!", "!")
string_cleaned = string_cleaned.replace("'\n", "\n")
# delete leading quote. Use space and other punctuation to help recognize it.
string_cleaned = string_cleaned.replace(" '", " ")
string_cleaned = string_cleaned.replace("-'", "-")
# replace other punctuation with space to avoid accidentally joining words
string_cleaned = string_cleaned.replace('\n', ' ')
string_cleaned = string_cleaned.replace('(', ' ')
string_cleaned = string_cleaned.replace(')', ' ')
string_cleaned = string_cleaned.replace('-', ' ')
string_cleaned = string_cleaned.replace('_', ' ')
string_cleaned = string_cle | aned.replace(';', ' ')
| string_cleaned = string_cleaned.replace(':', ' ')
string_cleaned = string_cleaned.replace(',', ' ')
string_cleaned = string_cleaned.replace('.', ' ')
string_cleaned = string_cleaned.replace('?', ' ')
string_cleaned = string_cleaned.replace('!', ' ')
return string_cleaned
|
sontek/python-driver | tests/integration/cqlengine/statements/test_base_statement.py | Python | apache-2.0 | 983 | 0 | # Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and |
# limitations under the License.
from unittest import TestCase
from cassandra.cqlengine.statements import BaseCQLStatement, StatementException
class BaseStatementTest(TestCase):
def test_where_clause_type_checking(self):
""" tests that only assignment clauses can be added to queries """
stmt = Ba | seCQLStatement('table', [])
with self.assertRaises(StatementException):
stmt.add_where_clause('x=5')
|
coquelicot/Pooky | pooky/Widgets.py | Python | gpl-3.0 | 2,289 | 0.003058 | # This file is part of Pooky.
# Copyright (C) 2013 Fcrh <coquelicot1117@gmail.com>
#
# Pooky is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pooky is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pooky. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui
class SingletonWidget(QtGui.QWidget):
__instance = None
def __init__(self, *args):
super().__init__(*args)
if self.__class__.__instance is not None:
raise RuntimeE | rror("Singleton check failed.")
else:
self.__class__.__instance = self
class Palette(SingletonWidget):
def __init__(self, *args):
super(). | __init__(*args)
class Preference(SingletonWidget):
def __init__(self, *args):
super().__init__(*args)
QtGui.QLabel('Almost Empty XD.', self)
self.resize(640, 480)
self.setWindowTitle('Preference')
class About(SingletonWidget):
def __init__(self, *args):
super().__init__(*args)
mainlayout = QtGui.QVBoxLayout()
mainlayout.addWidget(self.initContent(), True)
mainlayout.addLayout(self.initButtonLayout(), True)
self.setLayout(mainlayout)
self.setWindowTitle('About Pooky')
self.adjustSize()
def initButtonLayout(self):
btnlayout = QtGui.QHBoxLayout()
licenseBtn = QtGui.QPushButton('License')
def licenseCallBack():
raise RuntimeError("Not implement yet.")
licenseBtn.pressed.connect(licenseCallBack)
btnlayout.addWidget(licenseBtn)
closeBtn = QtGui.QPushButton('Close')
def closeCallBack():
self.lower()
self.hide()
closeBtn.pressed.connect(closeCallBack)
btnlayout.addWidget(closeBtn)
return btnlayout
def initContent(self):
return QtGui.QWidget()
|
maxive/erp | addons/website_sale_link_tracker/controllers/backend.py | Python | agpl-3.0 | 2,123 | 0.002355 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http
from odoo.addons.website_sale.controllers.backend import WebsiteSaleBackend
from odoo.http import request
class WebsiteSaleLinkTrackerBackend(WebsiteSaleBackend):
@http.route()
def fetch_dashboard_data(self, date_from, date_to):
| results = super(WebsiteSaleLinkTrackerBackend, self).fetch_dashboard_data(date_from, date_to)
results['dashboards']['sales']['utm_graph'] = self.fetch_utm_data(date_from, date_to)
return results
def fetch_utm_data(self, date_from, date_to):
sale_utm_domain = [
('team_id.team_type', '=', 'website'),
('state', 'in', ['sale', 'done']),
('confirmation_date', '>=', date_from),
('confirmation_date', '<=', date_to)
]
| orders_data_groupby_campaign_id = request.env['sale.order'].read_group(
domain=sale_utm_domain + [('campaign_id', '!=', False)],
fields=['amount_total', 'id', 'campaign_id'],
groupby='campaign_id')
orders_data_groupby_medium_id = request.env['sale.order'].read_group(
domain=sale_utm_domain + [('medium_id', '!=', False)],
fields=['amount_total', 'id', 'medium_id'],
groupby='medium_id')
orders_data_groupby_source_id = request.env['sale.order'].read_group(
domain=sale_utm_domain + [('source_id', '!=', False)],
fields=['amount_total', 'id', 'source_id'],
groupby='source_id')
return {
'campaign_id': self.compute_utm_graph_data('campaign_id', orders_data_groupby_campaign_id),
'medium_id': self.compute_utm_graph_data('medium_id', orders_data_groupby_medium_id),
'source_id': self.compute_utm_graph_data('source_id', orders_data_groupby_source_id),
}
def compute_utm_graph_data(self, utm_type, utm_graph_data):
return [{
'utm_type': data[utm_type][1],
'amount_total': data['amount_total']
} for data in utm_graph_data]
|
danlrobertson/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py | Python | mpl-2.0 | 4,527 | 0.001546 | from node import NodeVisitor, ValueNode, ListNode, BinaryExpressionNode
from parser import atoms, precedence
atom_names = {v:"@%s" % k for (k,v) in atoms.iteritems()}
named_escapes = set(["\a", "\b", "\f", "\n", "\r", "\t", "\v"])
def escape(string, extras=""):
# Assumes input bytes are either UTF8 bytes or unicode.
rv = ""
for c in string:
if c in named_escapes:
rv += c.encode("unicode_escape")
elif c == "\\":
rv += "\\\\"
elif c < '\x20':
rv += "\\x%02x" % ord(c)
elif c in extras:
rv += "\\" + c
else:
rv += c
if isinstance(rv, unicode):
return rv.encode("utf8")
else:
return rv
class ManifestSerializer(NodeVisitor):
def __init__(self, skip_empty_data=False):
self.skip_empty_data = skip_empty_data
def serialize(self, root):
self.indent = 2
rv = "\n".join(self.visit(root))
if not rv:
return rv
if rv[-1] != "\n":
rv = rv + "\n"
return rv
def visit_DataNode(self, node):
rv = []
if not self.skip_empty_data or node.children:
if node.data:
rv.append("[%s]" % escape(node.data, extras="]"))
indent = self.indent * " "
else:
indent = ""
for child in node.children:
rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child))
if node.parent:
rv.append("")
return rv
def visit_KeyValueNode(self, node):
rv = [escape(node.data, ":") + ":"]
indent = " " * self.indent
if len(node.children) == 1 and isinstance(node.children[0], (ValueNode, ListNode)):
rv[0] += " %s" % self.visit(node.children[0])[0]
else:
for child in node.children:
rv.append(indent + self.visit(child)[0])
return rv
def visit_ListNode(self, node):
rv = ["["]
rv.extend(", ".join(self.visit(child)[0] for child in node.children))
rv.append("]")
return ["".join(rv)]
def visit_ValueNode(self, node):
if not isinstance(node.data, (str, unicode)):
data = unicode(node.data)
else:
data = node.data |
if "#" in data or (isinstance(node.parent, ListNode) and
("," in data or "]" in data)):
if " | \"" in data:
quote = "'"
else:
quote = "\""
else:
quote = ""
return [quote + escape(data, extras=quote) + quote]
def visit_AtomNode(self, node):
return [atom_names[node.data]]
def visit_ConditionalNode(self, node):
return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)]
def visit_StringNode(self, node):
rv = ["\"%s\"" % escape(node.data, extras="\"")]
for child in node.children:
rv[0] += self.visit(child)[0]
return rv
def visit_NumberNode(self, node):
return [str(node.data)]
def visit_VariableNode(self, node):
rv = escape(node.data)
for child in node.children:
rv += self.visit(child)
return [rv]
def visit_IndexNode(self, node):
assert len(node.children) == 1
return ["[%s]" % self.visit(node.children[0])[0]]
def visit_UnaryExpressionNode(self, node):
children = []
for child in node.children:
child_str = self.visit(child)[0]
if isinstance(child, BinaryExpressionNode):
child_str = "(%s)" % child_str
children.append(child_str)
return [" ".join(children)]
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
children = []
for child_index in [1, 0, 2]:
child = node.children[child_index]
child_str = self.visit(child)[0]
if (isinstance(child, BinaryExpressionNode) and
precedence(node.children[0]) < precedence(child.children[0])):
child_str = "(%s)" % child_str
children.append(child_str)
return [" ".join(children)]
def visit_UnaryOperatorNode(self, node):
return [str(node.data)]
def visit_BinaryOperatorNode(self, node):
return [str(node.data)]
def serialize(tree, *args, **kwargs):
s = ManifestSerializer(*args, **kwargs)
return s.serialize(tree)
|
polyaxon/polyaxon | core/polyaxon/polyflow/references/path.py | Python | apache-2.0 | 1,184 | 0 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specifi | c language governing permissions and
# limitations under the License.
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.polyflow.references.mixin import RefMixin
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class PathRefSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("path_ref"))
path = fields.Str(required=True)
@staticmethod
def schema_config():
return V1PathRef
class V1PathRef(BaseConfig, RefMixin, polyaxon_sdk.V1PathRef):
SCHEMA = PathRefSchema
IDENTIFIER = "path_ref"
def get_kind_value(self):
return self.path
|
sag-enorman/selenium | py/test/selenium/webdriver/firefox/ff_profile_tests.py | Python | apache-2.0 | 8,464 | 0.001418 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, | either express or implied. See the License for the
# specific language governing | permissions and limitations
# under the License.
import base64
import os
import zipfile
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
unicode
except NameError:
unicode = str
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
class TestFirefoxProfile:
def setup_method(self, method):
self.capabilities = {'marionette': False}
self.driver = webdriver.Firefox(capabilities=self.capabilities)
self.webserver = SimpleWebServer()
self.webserver.start()
def test_that_we_can_accept_a_profile(self):
profile1 = webdriver.FirefoxProfile()
profile1.set_preference("browser.startup.homepage_override.mstone", "")
profile1.set_preference("startup.homepage_welcome_url", self.webserver.where_is('simpleTest.html'))
profile1.update_preferences()
profile2 = webdriver.FirefoxProfile(profile1.path)
driver = webdriver.Firefox(
capabilities=self.capabilities,
firefox_profile=profile2)
title = driver.title
driver.quit()
assert "Hello WebDriver" == title
def test_that_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.preference", "hi there")
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith("user.js"):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference",'):
assert line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_unicode_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference('sample.preference.2', unicode('hi there'))
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference.2"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith('user.js'):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference.2",'):
assert line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_integer_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.int.preference", 12345)
profile.update_preferences()
assert 12345 == profile.default_preferences["sample.int.preference"]
def test_that_boolean_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.bool.preference", True)
profile.update_preferences()
assert profile.default_preferences["sample.bool.preference"] is True
def test_that_we_delete_the_profile(self):
path = self.driver.firefox_profile.path
self.driver.quit()
assert not os.path.exists(path)
def test_profiles_do_not_share_preferences(self):
self.profile1 = webdriver.FirefoxProfile()
self.profile1.accept_untrusted_certs = False
self.profile2 = webdriver.FirefoxProfile()
# Default is true. Should remain so.
assert self.profile2.default_preferences["webdriver_accept_untrusted_certs"] is True
def test_none_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = None
try:
self.profile.set_proxy(proxy)
assert False, "exception after passing empty proxy is expected"
except ValueError:
pass
assert "network.proxy.type" not in self.profile.default_preferences
def test_unspecified_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
self.profile.set_proxy(proxy)
assert "network.proxy.type" not in self.profile.default_preferences
def test_manual_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.no_proxy = 'localhost, foo.localhost'
proxy.http_proxy = 'some.url:1234'
proxy.ftp_proxy = None
proxy.sslProxy = 'some2.url'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.MANUAL['ff_value']
assert self.profile.default_preferences["network.proxy.no_proxies_on"] == 'localhost, foo.localhost'
assert self.profile.default_preferences["network.proxy.http"] == 'some.url'
assert self.profile.default_preferences["network.proxy.http_port"] == 1234
assert self.profile.default_preferences["network.proxy.ssl"] == 'some2.url'
assert "network.proxy.ssl_port" not in self.profile.default_preferences
assert "network.proxy.ftp" not in self.profile.default_preferences
def test_pac_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.proxy_autoconfig_url = 'http://some.url:12345/path'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.PAC['ff_value']
assert self.profile.default_preferences["network.proxy.autoconfig_url"] == 'http://some.url:12345/path'
def test_autodetect_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.auto_detect = True
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.AUTODETECT['ff_value']
def teardown_method(self, method):
try:
self.driver.quit()
except:
pass # don't care since we may have killed the browser above
self.webserver.stop()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage( |
achon22/cs231nLung | project-a/labels.py | Python | mit | 343 | 0.052478 | #!/usr/bin/env python
def main():
f = open('stage1_solution.csv')
ones = 0
zer | os = 0
total = 0
for line in f:
if line[:3] == 'id,':
continue
line = line.strip().split(',')
label = int(li | ne[1])
if label == 1:
ones += 1
total += 1
zeros = total-ones
print float(zeros)/total
f.close()
if __name__ == '__main__':
main() |
yousrabk/mne-python | mne/viz/tests/test_raw.py | Python | bsd-3-clause | 4,727 | 0 | # Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from numpy.testing import assert_raises
from mne import io, read_events, pick_types
from mne.utils import requires_version, run_tests_if_main
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def _get_raw():
raw = io.Raw(raw_fname, preload=True)
raw.pick_channels(raw.ch_names[:9])
return raw
def _get_events():
return read_events(event_name)
def test_plot_raw():
"""Test plotting of raw data
"""
import matplotlib.pyplot as plt
raw = _get_raw()
events = _get_events()
plt.close('all') # ensure all are closed
with warnings.catch_warnings(record=True):
fig = raw.plot(events=events, show_options=True)
# test mouse clicks
x = fig.get_axes()[0].lines[1].get_xdata().mean()
y = fig.get_axes()[0].lines[1].get_ydata().mean()
data_ax = fig.get_axes()[0]
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad channel
_fake_click(fig, data_ax, [x, y], xform='data') # unmark a bad channel
_fake_click(fig, data_ax, [0.5, 0.999]) # click elsewhere in 1st axes
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
_fake_click(fig, fig.get_axes()[1], [0.5, 0.5]) # change time
_fake_click(fig, fig.get_axes()[2], [0.5, 0.5]) # change channels
_fake_click(fig, fig.get_axes()[3], [0.5, 0.5]) # open SSP window
fig.canvas.button_press_event(1, 1, 1) # outside any axes
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
# sadly these fail when no renderer is used (i.e., when using Agg):
# ssp_fig = set(plt.get_fignums()) - set([fig.number])
# assert_equal(len(ssp_fig), 1)
# ssp_fig = plt.figure(list(ssp_fig)[0])
# ax = ssp_fig.get_axes()[0] # only one axis is used
# t = [c for c in ax.get_children() if isinstance(c,
# matplotlib.text.Text)]
# pos = np.array(t[0].get_position()) + 0.01
# _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data') # off
# _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data') # on
# test keypresses
fig.canvas.key_press_event('escape')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('?')
fig.canvas.key_press_event('f11')
fig.canvas.key_press_event('escape')
# Color setting
assert_raises(KeyError, raw.plot, event_color={0: 'r'})
assert_raises(TypeError, raw.plot, event_color={'foo': 'r'})
fig = raw.plot(events=events, event_color={-1: 'r', 998: 'b'})
plt.close('all')
@requires_version('scipy', '0.10')
def test_plot_raw_filtered():
"""Test filtering of raw plots
"""
raw = _get_raw()
assert_raises(ValueError, raw.plot, lowpass=raw.info['sfreq'] / 2.)
assert_raises(ValueError, raw.plot, highpass=0)
assert_raises(ValueError, raw.plot, lowpass=1, highpass=1)
assert_raises(ValueError, raw.plot, lowpass=1, filtorder=0)
assert_raises(ValueError, raw.plot, clipping='foo')
raw.plot(lowpass=1, clipping='transparent')
raw.plot(highpass=1, clipping='clamp')
raw.plot(highpass=1, lowpass=2)
@requires_version('scipy', '0.12')
def test_plot_raw_psd():
"""Test plotting of raw psds
"""
import matpl | otlib.pyplot as plt
raw = _get_raw()
# normal mode
raw.plot_psd(tmax=2.0)
# specific mode
picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
raw.plot_psd(picks=picks, area_mode='range')
ax = plt.axes()
# if ax is supplied, picks must be, too:
assert_raises(ValueError, raw.plot_psd, ax=ax)
raw.plot_psd(picks=picks, ax=ax)
plt.close('all')
# topo psd
raw.pl | ot_psd_topo()
plt.close('all')
run_tests_if_main()
|
netsec-ethz/scion | tools/licensechecker.py | Python | apache-2.0 | 2,566 | 0.001949 | #!/usr/bin/env python3
import sys
import subprocess
license_texts = {
"#":"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""",
"//": """
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
""",
}
exceptions = [
"go/lib/scrypto/cms",
"go/lib/serrors/stack.go",
"go/lib/util/duration.go",
"go/scion-pki/certs/certinfo.go",
"go/scion-pki/certs/certformat.go",
]
def is_ignored(f: str) -> bool:
for e in exceptions:
if e in f:
return True
return False
def main():
not_ok = {}
for f in sys.argv[1:]:
if is_ignored(f):
continue
header = subprocess.check_output("head -15 %s" % f, stderr=subprocess.STDOUT, shell=True)
lines = header.splitlines()
if len(lines) < 1:
not_ok[f] = "empty file"
continue
first_line = lines[0].decode("utf-8")
# generated files don't matter
if "generated" in header.decode("utf-8").lower():
continue
comment_marker = "//"
if not first_line.startswith(comment_marker):
comment_marker = "#"
if not first_line.startswith(comment_marker):
not_ok[f] = "no comment / unknown comme | nt marker: %s" % first_line
| continue
if license_texts[comment_marker] not in header.decode("utf-8"):
not_ok[f] = "missing licence"
for f, reason in not_ok.items():
print("%s: %s" % (f, reason), file=sys.stderr)
if len(not_ok) > 0:
sys.exit(1)
if __name__ == "__main__":
main()
|
mileswwatkins/billy | billy/tests/fixtures/ex/__init__.py | Python | bsd-3-clause | 1,554 | 0 | metadata = {
"abbreviation": "ex",
"capitol_timezone": "Etc/UTC",
"legislature_name": "Example Legislature",
"lower_chamber_name": "House of Representatives",
"lower_chamber_term": 2,
"lower_chamber_title": "Representative",
"upper_chamber_name": "Senate",
"upper_chamber_term": 6,
"upper_chamber_title": "Senator",
"name": "Example State",
"terms": [
{
"name": "T0",
"sessions": [
"S0"
],
"start_year": 2009,
"end_year": 2010
},
{
"name": "T1",
| "sessions": [
"S1", "Special1"
],
"start_year": 2011,
"end_year": 2012
},
{
"name": "T2",
"sessions": [
"S2", "Specia | l2"
],
"start_year": 2013,
"end_year": 2014
}
],
"session_details": {
"S0": {"start_date": 1250000000.0, "type": "primary",
"display_name": "Session Zero"},
"S1": {"start_date": 1300000000.0, "type": "primary",
"display_name": "Session One"},
"Special1": {"start_date": 1330000000.0, "type": "special",
"display_name": "Special Session One"},
"S2": {"start_date": 1350000000.0, "type": "primary",
"display_name": "Session Two"},
"Special2": {"start_date": 1360000000.0, "type": "special",
"display_name": "Special Session Two"}
}
}
|
Eric89GXL/PySurfer | examples/plot_probabilistic_label.py | Python | bsd-3-clause | 1,653 | 0 | """
============================
Display Probabilistic Labels
============================
Freesurfer ships with some probabilistic labels of cytoarchitectonic
and visual areas. Here we show several ways to visualize these labels
to help characterize the location of your data.
"""
from os import environ
from os.path import join
import numpy as np
from surfer import Brain
from nibabel.freesurfer import read_label
print(__doc__)
brain = Brain("fsaverage", "lh", "inflated")
"""
Show the morphometry with a continuous grayscale colormap.
"""
brain.add_morphometry("curv", colormap="binary",
min=-.8, max=.8, colorbar=False)
"""
The easiest way to label any vertex that could be in the region is with
add_label.
"""
brain.add_label("BA1_exvivo", color="#A6BDDB")
"""
You can also threshold based on the probability of that region being at each
vertex.
"""
brain.add_label("BA1_exvivo", color="#2B8CBE", scalar_thresh=.5)
"""
It's also possible to plot just the label boundary, in case you wanted to
overlay the label on an activation plot to asses whether it falls within that
region.
"""
brain.add_label("BA45_exvi | vo", color="#F0F8FF", borders=3, scalar_thresh=.5)
brain.add_label("BA45_exvivo", color="#F0F8FF", alpha=.3, scalar_thresh=.5)
"""
Finally, with a few tricks, you can display the whole probabilistic map.
"""
subjects_dir = environ["SUBJECTS_DIR"]
l | abel_file = join(subjects_dir, "fsaverage", "label", "lh.BA6_exvivo.label")
prob_field = np.zeros_like(brain.geo['lh'].x)
ids, probs = read_label(label_file, read_scalars=True)
prob_field[ids] = probs
brain.add_data(prob_field, thresh=1e-5, colormap="RdPu")
|
LLNL/spack | var/spack/repos/builtin/packages/fasttree/package.py | Python | lgpl-2.1 | 1,234 | 0.001621 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fasttree(Package):
"""FastTr | ee infers approximately-maximum-likelihood phylogenetic
trees from alignments of nucleotide or protein sequences.
FastTree can handle alignments with up to a million of sequences
in a reasonable amount of time and memory."""
homepage = "http://www.microbesonline.org/fasttree"
url | = "http://www.microbesonline.org/fasttree/FastTree-2.1.10.c"
version('2.1.10', sha256='54cb89fc1728a974a59eae7a7ee6309cdd3cddda9a4c55b700a71219fc6e926d', expand=False, url='http://www.microbesonline.org/fasttree/FastTree-2.1.10.c')
phases = ['build', 'install']
def build(self, spec, prefix):
cc = Executable(spack_cc)
cc('-O3', self.compiler.openmp_flag,
'-DOPENMP', '-finline-functions', '-funroll-loops', '-Wall',
'-oFastTreeMP', 'FastTree-' + format(spec.version.dotted) + '.c',
'-lm')
def install(self, spec, prefix):
mkdir(prefix.bin)
install('FastTreeMP', prefix.bin)
|
emilroz/openmicroscopy | components/tools/OmeroPy/src/omero/__init__.py | Python | gpl-2.0 | 3,189 | 0.000314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Primary OmeroPy types
Classes:
- omero.client -- Main OmeroPy connector object
Copyright 2007, 2008 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
from omero_version import omero_version
from omero_version import ice_compatibility as compat
import Ice
import os
_sys = __import__("sys")
try:
vers = Ice.stringVersion()
vers = vers.split(".")
compat = compat.split(".")
if compat[0:2] != vers[0:2]:
msg = """
ERROR: Ice version mismatch!
Your OMERO code has been compiled using Ice version %s
but you seem to have Ice version %s installed. If you need
help understanding this issue, please send this error message
to the OME community:
http://www.openmicroscopy.org/site/community
Debugging Info:
--------------
VERSION=%s
PYTHONPATH=%s
""" % (".".join(compat), ".".join(vers), omero_version,
os.path.pathsep.join(_sys.path))
raise Exception(msg)
finally:
del omero_version
del compat
del vers
del Ice
del os
__import_style__ = None
def client_wrapper(*args, **kwargs):
"""
Returns an instance of L{omero.gateway.BlitzGateway} created with all
arguments passed to this method
@return: See above
"""
import omero.gateway
return omero.gateway.BlitzGateway(*args, **kwargs)
def client(*args, **kwargs):
import omero.clients
return omero.clients.BaseClient(*args, **kwargs)
class ClientError(Exception):
"""
Top of client exception hierarchy.
"""
pass
class CmdError(ClientError):
"""
Thrown by omero.client.waitOnCmd() when
failonerror is True and an omero.cmd.ERR
is returned. The only argument
"""
def __init__(self, err, *args, **kwargs):
ClientError.__init__(self, *args, **kwargs)
self.err = err
class UnloadedEntityException(ClientError):
pass
class UnloadedCollectionException(ClientError):
pass
def proxy_to_instance(proxy_string, default=None):
"""
Convert a proxy string to an instance. I | f no
default is provided, the string must be of the
form: 'Image:1' or 'ImageI:1'. With a default,
a string consisting of just the ID is permissible
but not require | d.
"""
import omero
parts = proxy_string.split(":")
if len(parts) == 1 and default is not None:
proxy_string = "%s:%s" % (default, proxy_string)
parts.insert(0, default)
kls = parts[0]
if not kls.endswith("I"):
kls += "I"
kls = getattr(omero.model, kls, None)
if kls is None:
raise ClientError(("Invalid proxy string: %s. "
"Correct format is Class:ID") % proxy_string)
return kls(proxy_string)
#
# Workaround for warning messages produced in
# code-generated Ice files.
#
if _sys.version_info[:2] == (2, 6):
import warnings
warnings.filterwarnings(
action='ignore',
message='BaseException.message has been deprecated as of Python 2.6',
category=DeprecationWarning)
|
madcowfred/evething | thing/views/pi.py | Python | bsd-2-clause | 3,175 | 0.004094 | # ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CON | TRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWI | SE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.conf import settings
from django.contrib.auth.decorators import login_required
from thing.models import * # NOPEP8
from thing.stuff import * # NOPEP8
@login_required
def pi(request):
"""PI"""
tt = TimerThing('pi')
characters = Character.objects.filter(
apikeys__user=request.user,
apikeys__valid=True,
apikeys__key_type__in=[APIKey.ACCOUNT_TYPE, APIKey.CHARACTER_TYPE]
).distinct().select_related('colony_set')
pi_map = {}
for character in characters:
colonies = character.colony_set.all()
if colonies is not None and len(colonies):
char = pi_map.get(character.id, None)
if char is None:
char = {
'character': character,
'colonies': {}
}
for colony in colonies:
char['colonies'][colony.id] = {
'colony': colony
}
char['colonies'][colony.id]['extractors'] = colony.pin_set.filter(
type__in=Pin.EXTRACTORS).all()
char['colonies'][colony.id]['launchpads'] = colony.pin_set.filter(
type__in=Pin.LAUNCHPADS).all()
char['colonies'][colony.id]['storage'] = colony.pin_set.filter(
type__in=Pin.STORAGE).all()
pi_map[character.id] = char
tt.add_time('organizing')
# Render template
out = render_page(
'thing/pi.html',
{
'map': pi_map
},
request,
)
tt.add_time('template')
if settings.DEBUG:
tt.finished()
return out
|
drewet/shaderc | glslc/test/expect.py | Python | apache-2.0 | 15,233 | 0.000985 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A number of common glslc result checks coded in mixin classes.
A test case can use these checks by declaring their enclosing mixin classes
as superclass and providing the expected_* variables required by the check_*()
methods in the mixin classes.
"""
import os.path
from glslc_test_framework import GlslCTest
def convert_to_unix_line_endings(source):
"""Converts all line endings in source to be unix line endings."""
return source.replace('\r\n', '\n').replace('\r', '\n')
def substitute_file_extension(filename, extension):
"""Substitutes file extension, respecting known shader extensions.
foo.vert -> foo.vert.[extension] [similarly for .frag, .comp, etc.]
foo.glsl -> foo.[extension]
foo.unknown -> foo.[extension]
foo -> foo.[extension]
"""
if filename[-5:] not in ['.vert', '.frag', '.tesc', '.tese',
'.geom', '.comp']:
return filename.rsplit('.', 1)[0] + '.' + extension
else:
return filename + '.' + extension
def get_object_filename(source_filename):
"""Gets the object filename for the given source file."""
return substitute_file_extension(source_filename, 'spv')
def get_assembly_filename(source_filename):
"""Gets the assembly filename for the given source file."""
return substitute_file_extension(source_filename, 's')
def verify_file_non_empty(filename):
"""Checks that a given file exists and is not empty."""
if not os.path.isfile(filename):
return False, 'Cannot find file: ' + filename
if not os.path.getsize(filename):
return False, 'Empty file: ' + filename
return True, ''
class ReturnCodeIsZero(GlslCTest):
"""Mixin class for checking that the return code is zero."""
def check_return_code_is_zero(self, status):
if status.returncode:
return False, 'Non-zero return code: {ret}\n'.format(
ret=status.returncode)
return True, ''
class NoOutputOnStdout(GlslCTest):
"""Mixin class for checking that there is no output on stdout."""
def check_no_output_on_stdout(self, status):
if status.stdout:
return False, 'Non empty stdout: {out}\n'.format(out=status.stdout)
return True, ''
class NoOutputOnStderr(GlslCTest):
"""Mixin class for checking that there is no output on stderr."""
def check_no_output_on_stderr(self, status):
if status.stderr:
return False, 'Non empty stderr: {err}\n'.format(err=status.stderr)
return True, ''
class SuccessfulReturn(ReturnCodeIsZero, NoOutputOn | Stdout, NoOutputOnStderr):
"""Mixin class for checking that return code is zero and no output on
stdout and stderr."""
pass
class CorrectObjectFilePreamble(GlslCTest):
"""Provides methods for verifying preamble for a SPV object file."""
def verify_object_file_preamble(self, filename):
"""Checks that the given SPIR-V binary file has correct preamble. | """
def read_word(binary, index, little_endian):
"""Reads the index-th word from the given binary file."""
word = binary[index * 4:(index + 1) * 4]
if little_endian:
word = reversed(word)
return reduce(lambda w, b: (w << 8) | ord(b), word, 0)
def check_endianness(binary):
"""Checks the endianness of the given SPIR-V binary file.
Returns:
True if it's little endian, False if it's big endian.
None if magic number is wrong.
"""
first_word = read_word(binary, 0, True)
if first_word == 0x07230203:
return True
first_word = read_word(binary, 0, False)
if first_word == 0x07230203:
return False
return None
success, message = verify_file_non_empty(filename)
if not success:
return False, message
with open(filename, 'rb') as object_file:
object_file.seek(0, os.SEEK_END)
num_bytes = object_file.tell()
if num_bytes % 4 != 0:
return False, ('Incorrect SPV binary: size should be a multiple'
' of words')
if num_bytes < 20:
return False, 'Incorrect SPV binary: size less than 5 words'
object_file.seek(0)
preamble = bytes(object_file.read(20))
little_endian = check_endianness(preamble)
# SPIR-V module magic number
if little_endian is None:
return False, 'Incorrect SPV binary: wrong magic number'
# SPIR-V version number
if read_word(preamble, 1, little_endian) != 99:
return False, 'Incorrect SPV binary: wrong version number'
# glslang SPIR-V magic number
if read_word(preamble, 2, little_endian) != 0x051a00bb:
return False, ('Incorrect SPV binary: wrong generator magic '
'number')
# reserved for instruction schema
if read_word(preamble, 4, little_endian) != 0:
return False, 'Incorrect SPV binary: the 5th byte should be 0'
return True, ''
class CorrectAssemblyFilePreamble(GlslCTest):
"""Provides methods for verifying preamble for a SPV assembly file."""
def verify_assembly_file_preamble(self, filename):
success, message = verify_file_non_empty(filename)
if not success:
return False, message
with open(filename) as assembly_file:
first_line = assembly_file.readline()
second_line = assembly_file.readline()
if (first_line != '// Module Version 99\n' or
second_line != '// Generated by (magic number): 51a00bb\n'):
return False, 'Incorrect SPV assembly'
return True, ''
class ValidObjectFile(SuccessfulReturn, CorrectObjectFilePreamble):
"""Mixin class for checking that every input file generates a valid object
file following the object file naming rule, and there is no output on
stdout/stderr."""
def check_object_file_preamble(self, status):
for input_filename in status.input_filenames:
object_filename = get_object_filename(input_filename)
success, message = self.verify_object_file_preamble(
os.path.join(status.directory, object_filename))
if not success:
return False, message
return True, ''
class ValidNamedObjectFile(SuccessfulReturn, CorrectObjectFilePreamble):
"""Mixin class for checking that a list of object files with the given
names are correctly generated, and there is no output on stdout/stderr.
To mix in this class, subclasses need to provide expected_object_filenames
as the expected object filenames.
"""
def check_object_file_preamble(self, status):
for object_filename in self.expected_object_filenames:
success, message = self.verify_object_file_preamble(
os.path.join(status.directory, object_filename))
if not success:
return False, message
return True, ''
class ValidFileContents(GlslCTest):
"""Mixin class to test that a specific file contains specific text
To mix in this class, subclasses need to provide expected_file_contents as
the contents of the file and target_filename to determine the location."""
def check_file(self, status):
target_filename = os.path.join(status.directory, self.tar |
BoasWhip/Black | Code/marketData.py | Python | mit | 5,694 | 0.019143 | # -*- coding: utf-8 -*-
import numpy as np
from pandas import read_csv as importDB
import pandas as pd
database = r'\\UBSPROD.MSAD.UBS.NET\UserData\ozsanos\RF\Desktop\Black\stockData.csv'
tickers = ['AAPL','ADBE','ADI','AMD','AXP','BRCM','C','GLD','GOOG','GS','HNZ','HPQ','IBM','MSFT','TXN','XOM']
dateRange = [("2010-01-01","2010-12-31"),("2011-01-01","2011-12-31")]
# dateRange = pd.date_range(startDate, endDate)
'''
Pre-weightings permutations
'''
schemes = []
points = range(0, 11, 1)
for i in points:
for j in points:
for k in points:
z = i + j + k
if z <= 10:
schemes.append((round(i/10.0,1), round(j/10.0,1), round(k/10.0,1), round(1.0 - z/10.0,1)))
schemes = tuple(schemes)
'''
*** Code Body ***
'''
def getData(startDate, endDate, symbolSet):
return importDB(database, usecols = ['Close'] + symbolSet, index_col = 'Close').loc[startDate : endDate]
def simulate(startDate, endDate, symbolSet, weights):
marketData = getData(startDate, endDate, symbolSet).values
days = len(marketData)
portfolio = np.zeros(days)
returns = portfolio.copy()
for e in range(len(marketData[0])):
marketData[:,e] = weights[e] * marketData[:,e] / marketData[0,e]
portfolio += marketData[:,e]
for e in range(days):
if e > 0: returns[e] = (portfolio[e]/portfolio[e-1]) - 1
meanDailyReturn = np.average(returns)
stdDailyReturn = np.std(returns)
cummDailyReturn = portfolio[-1]
SharpeRatio = (days**0.5) * (meanDailyReturn / stdDailyReturn)
return [round(SharpeRatio,6), round(meanDailyReturn,6), round(stdDailyReturn,6), round(cummDailyReturn,6)]
def optimise(symbolSet, dateFlag):
maxSharpe = 0.0
metrics = []
for e in schemes:
#print e,
s = simulate(dateRange[dateFlag][0], dateRange[dateFlag][1], symbolSet, e)
#print s
if s[0] > maxSharpe:
maxSharpe = s[0]
metrics = [s, e]
print('\n+ - + - +')
print "\nPortfolio:"
print tuple(symbolSet)
print "\nOptimal Weights:"
print metrics[1]
print "\nPerformance Metrics:"
print tuple(metrics[0])
print('\n+ - + - +\n\n\n\n')
'''
Portfolios
'''
'''
# Test 1
optimise(['AAPL', 'GLD', 'GOOG', 'XOM'], True)
# Test 2
optimise(['AXP', 'HPQ', 'IBM', 'HNZ'], False)
'''
# Quiz 1
optimise(['AAPL', 'GOOG', 'IBM', 'MSFT'], True)
# Quiz 2
optimise(['BRCM', 'ADBE', 'AMD', 'ADI'], False)
# Quiz 3
optimise(['BRCM', 'TXN', 'AMD', 'ADI'], True)
# Quiz 4
optimise(['BRCM', 'TXN', 'IBM', 'HNZ'], False)
# Quiz 5
optimise(['C', 'GS', 'IBM', 'HNZ'], False)
'''
# Test 1
is2011 = True
symbolSet = ['AAPL', 'GLD', 'GOOG', 'XOM']
weights = [0.4,0.4,0.0,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Test 2
is2011 = False
symbolSet = ['AXP', 'HPQ', 'IBM', 'HNZ']
weights = [0.0,0.0,0.0,1.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
'''
# Quiz 1
is | 2011 = True
symbolSet = ['AAPL', 'GOOG', 'IBM', 'MSFT']
weights = [0.5,0.0,0.5,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.0,0.8,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.2,0.2,0.4]
print simulate(dateRange[is2011][0] | , dateRange[is2011][1], symbolSet, weights)
weights = [0.1,0.1,0.8,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 2
is2011 = False
symbolSet = ['BRCM', 'ADBE', 'AMD', 'ADI']
weights = [0.0,0.2,0.8,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.0,1.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [1.0,0.0,0.0,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.1,0.9]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 3
is2011 = True
symbolSet = ['BRCM', 'TXN', 'AMD', 'ADI']
weights = [0.0,0.0,0.8,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.2,0.0,0.8]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.1,0.9]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.0,0.0,0.0,1.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 4
is2011 = False
symbolSet = ['BRCM', 'TXN', 'IBM', 'HNZ']
weights = [0.1,0.1,0.6,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.3,0.0,0.7,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.1,0.1,0.0,0.8]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.4,0.4,0.0,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
# Quiz 5
is2011 = False
symbolSet = ['C', 'GS', 'IBM', 'HNZ']
weights = [0.0,0.0,1.0,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.0,0.0,0.8]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.4,0.6,0.0,0.0]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
weights = [0.2,0.2,0.4,0.2]
print simulate(dateRange[is2011][0], dateRange[is2011][1], symbolSet, weights)
print('\n')
|
ActiveState/code | recipes/Python/59892_Testing_if_a_variable_is_defined/recipe-59892.py | Python | mit | 190 | 0 | # Ensure variable is defined
try:
x |
except NameEr | ror:
x = None
# Test whether variable is defined to be None
if x is None:
some_fallback_operation()
else:
some_operation(x)
|
ox-it/moxie | moxie/tests/test_cors_views.py | Python | apache-2.0 | 3,333 | 0.0048 | import unittest, json
from moxie.core.app import Moxie
from moxie.core.views import ServiceView
class TestCORSWithCredentials(ServiceView):
cors_allow_headers = 'X-DAVE'
cors_allow_credentials = True
cors_max_age = 20
methods = ['GET', 'OPTIONS', 'PUT']
def handle_request(self):
return {'name': 'Dave'}
class TestCORSWithoutCredentials(TestCORSWithCredentials):
cors_allow_credentials = False
class CORSViewsTestCase(unittest.TestCase):
def setUp(self):
self.app = Moxie(__name__)
self.app.config['DEFAULT_ALLOW_ORIGINS'] = ['foo.domain']
self.app.add_url_rule('/creds', view_func=TestCORSWithCredentials.as_view('creds'))
self.app.add_url_rule('/nocreds', view_func=TestCORSWithoutCredentials.as_view('nocreds'))
def test_credential_true(self):
with self.app.test_client() as c:
rv = c.open('/creds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(rv.headers['Access-Control-Allow-Credentials'], 'true')
def test_credential_allow_methods(self):
with self.app.test_client() as c:
rv = c.open('/creds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(set([m.strip() for m in rv.headers['Access-Control-Allow-Methods'].split(',')]), set(['PUT', 'GET', 'OPTIONS', 'HEAD']))
def test_credential_allow_headers(self):
with self.app.test_client() as c:
rv = c.open('/creds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(rv.headers['Access-Control-Allow-Headers'], "X-DAVE")
def test_credential_max_age(self):
with self.app.test_client() as c:
rv = c.open('/creds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(rv.headers['Access-Control-Max-Age'], "20")
def test_credential_echo_origin(self):
with self.app.test_client() as c:
rv = c.open('/creds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(rv.headers['Access-Control-Allow-Origin'], 'foo.domain')
def test_credential_bad_origin(self):
with self.app.test_client() as c:
rv = c.open('/creds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foobar.domain')])
self.assertEqual(rv.status_code, 400)
def test_without_creds_wildcard(self):
with self.app.test_client() as c:
rv = c.get('/nocreds', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(rv.headers['Access-Control-Allow-Origin'], '*')
def test_preflight_c | ontent(self):
with self.app.test_client() as c:
rv = c.open('/nocreds', method='OPTIONS', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
self.assertEqual(rv.data, '')
def test_actual_content(self):
with self.app.test_client() as c:
rv = c.get('/nocreds', headers=[('Accept', 'application/json'), ('Origin', 'foo.domain')])
data = json.loads(rv.data)
self.assertEq | ual(data['name'], 'Dave')
|
JioCloud/oslo.log | oslo_log/formatters.py | Python | apache-2.0 | 8,844 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import logging
import logging.config
import logging.handlers
import sys
import traceback
import six
from six import moves
from oslo_context import context as context_utils
from oslo_serialization import jsonutils
def _dictify_context(context):
if context is None:
return {}
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
# A configuration object is given to us when the application registers
# the logging options.
_CONF = None
def _store_global_conf(conf):
global _CONF
_CONF = conf
def _update_record_with_context(record):
"""Given a log record, update it with context information.
The request context, if there is one, will either be in the
extra values for the incoming record or in the global
thread-local store.
"""
context = record.__dict__.get(
'context',
context_utils.get_current()
)
d = _dictify_context(context)
# Copy the context values directly onto the record so they can be
# used by the formatting strings.
for k, v in d.items():
setattr(record, k, v)
return context
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
# Build the extra values that were given to us, including
# the context.
context = _update_record_with_context(record)
if hasattr(record, 'extra'):
extra = record.extra.copy()
else:
extra = {}
for key in getattr(record, 'extra_keys', []):
if key not in extra:
extra[key] = getattr(record, key)
# If we saved a context object, explode it into the extra
# dictionary because the values are more useful than the
# object reference.
if 'context' in extra:
extra.update(_dictify_context(context))
del extra['context']
message['extra'] = extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatt | ing if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
| """Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
self.conf = kwargs.pop('config', _CONF)
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
context = _update_record_with_context(record)
if context:
# FIXME(dhellmann): We should replace these nova-isms with
# more generic handling in the Context class. See the
# app-agnostic-logging-parameters blueprint.
instance = getattr(context, 'instance', None)
instance_uuid = getattr(context, 'instance_uuid', None)
# resource_uuid was introduced in oslo_context's
# RequestContext
resource_uuid = getattr(context, 'resource_uuid', None)
instance_extra = ''
if instance:
instance_extra = (self.conf.instance_format
% {'uuid': instance})
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
elif resource_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': resource_uuid})
record.instance = instance_extra
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity', 'resource'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = self.conf.logging_context_format_string
else:
fmt = self.conf.logging_default_format_string
if (record.levelno == logging.DEBUG and
self.conf.logging_debug_format_suffix):
fmt += " " + self.conf.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.fo |
apophys/freeipa | ipalib/install/service.py | Python | gpl-3.0 | 4,662 | 0 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Base service installer module
"""
from ipalib.util import validate_domain_name
from ipapython.install import common, core, typing
from ipapython.install.core import group, knob
def prepare_only(obj):
"""
Decorator which makes an installer attribute appear only in the prepare
phase of the install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'enroll'}
return obj
def enroll_only(obj):
"""
Decorator which makes an installer attribute appear only in the enroll
phase of the install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'prepare'}
return obj
def master_install_only(obj):
"""
Decorator which makes an installer attribute appear only in master install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'replica_install'}
return obj
def replica_install_only(obj):
"""
Decorator which makes an installer attribute appear only in replica install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'master_install'}
return obj
def _does(cls, arg):
def remove(name):
def removed(self):
raise AttributeError(name)
return property(removed)
return type(
cls.__name__,
(cls,),
{
n: remove(n) for n in dir(cls)
if arg in getattr(getattr(cls, n), '__exclude__', set())
}
)
def prepares(cls):
"""
Returns installer class stripped of attributes not related to the prepare
phase of the install
"""
return _does(cls, 'prepare')
def enrolls(cls):
"""
Returns installer class stripped of attributes not related to the enroll
phase of the install
"""
return _does(cls, 'enroll')
def installs_master(cls):
"""
Returns installer class stripped of attributes not related to master
install
"""
return _does(cls, 'master_install')
def installs_replica(cls):
"""
Returns installer class stripped of attributes not related to replica
install
"""
return _does(cls, 'replica_i | nstall')
@group
class ServiceInstallInterface(common.Installable,
common.Interactive,
core.Composite):
"""
Interface common to all service installers
"""
description = "Basic"
domain_name = knob(
str, None,
description="primary DNS domain of the IPA deployment " |
"(not necessarily related to the current hostname)",
cli_names='--domain',
)
@domain_name.validator
def domain_name(self, value):
validate_domain_name(value)
servers = knob(
# pylint: disable=invalid-sequence-index
typing.List[str], None,
description="FQDN of IPA server",
cli_names='--server',
cli_metavar='SERVER',
)
realm_name = knob(
str, None,
description="Kerberos realm name of the IPA deployment (typically "
"an upper-cased name of the primary DNS domain)",
cli_names='--realm',
)
@realm_name.validator
def realm_name(self, value):
validate_domain_name(value, entity="realm")
host_name = knob(
str, None,
description="The hostname of this machine (FQDN). If specified, the "
"hostname will be set and the system configuration will "
"be updated to persist over reboot. By default the result "
"of getfqdn() call from Python's socket module is used.",
cli_names='--hostname',
)
ca_cert_files = knob(
# pylint: disable=invalid-sequence-index
typing.List[str], None,
description="load the CA certificate from this file",
cli_names='--ca-cert-file',
cli_metavar='FILE',
)
replica_file = knob(
str, None,
description="a file generated by ipa-replica-prepare",
)
replica_file = replica_install_only(replica_file)
dm_password = knob(
str, None,
sensitive=True,
description="Directory Manager password (for the existing master)",
)
class ServiceAdminInstallInterface(ServiceInstallInterface):
"""
Interface common to all service installers which require admin user
authentication
"""
principal = knob(
str, None,
)
principal = enroll_only(principal)
principal = replica_install_only(principal)
admin_password = knob(
str, None,
sensitive=True,
)
admin_password = enroll_only(admin_password)
|
hjanime/bcbio-nextgen | bcbio/rnaseq/cufflinks.py | Python | mit | 10,155 | 0.001871 | """Assess transcript abundance in RNA-seq experiments using Cufflinks.
http://cufflinks.cbcb.umd.edu/manual.html
"""
import os
import tempfile
from bcbio.utils import get_in, file_exists, safe_makedir
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.rnaseq import gtf, annotate_gtf
import pandas as pd
def run(align_file, ref_file, data):
config = data["config"]
cmd = _get_general_options(align_file, config)
cmd.extend(_get_no_assembly_options(ref_file, data))
out_dir = _get_output_dir(align_file, data)
tracking_file = os.path.join(out_dir, "genes.fpkm_tracking")
fpkm_file = os.path.join(out_dir, data['rgnames']['sample']) + ".fpkm"
tracking_file_isoform = os.path.join(out_dir, "isoforms.fpkm_tracking")
fpkm_file_isoform = os.path.join(out_dir, data['rgnames']['sample']) + ".isoform.fpkm"
if not file_exists(fpkm_file):
with file_transaction(data, out_dir) as tmp_out_dir:
safe_makedir(tmp_out_dir)
cmd.extend(["--output-dir", tmp_out_dir])
cmd.extend([align_file])
cmd = map(str, cmd)
do.run(cmd, "Cufflinks on %s." % (align_file))
fpkm_file = gene_tracking_to_fpkm(tracking_file, fpkm_file)
fpkm_file_isoform = gene_tracking_to_fpkm(tracking_file_isoform, fpkm_file_isoform)
return out_dir, fpkm_file, fpkm_file_isoform
def gene_tracking_to_fpkm(tracking_file, out_file):
"""
take a gene-level tracking file from Cufflinks and output a two column
table with the first column as IDs and the second column as FPKM for the
sample. combines FPKM from the same genes into one FPKM value to fix
this bug: http://seqanswers.com/forums/showthread.php?t=5224&page=2
"""
if file_exists(out_file):
return out_file
df = pd.io.parsers.read_table(tracking_file, sep="\t", header=0)
df = df[['tracking_id', 'FPKM']]
df = df.groupby(['tracking_id']).sum()
df.to_csv(out_file, sep="\t", header=False, index_label=False)
return out_file
def _get_general_options(align_file, config):
options = []
cufflinks = config_utils.get_program("cufflinks", config)
options.extend([cufflinks])
options.extend(["--num-threads", config["algorithm"].get("num_cores", 1)])
options.extend(["--quiet"])
options.extend(["--no-update-check"])
options.extend(["--max-bundle-frags", 2000000])
options.extend(_get_stranded_flag(config))
return options
def _get_no_assembly_options(ref_file, data):
options = []
options.extend(["--frag-bias-correct", ref_file])
options.extend(["--multi-read-correct"])
options.extend(["--upper-quartile-norm"])
gtf_file = data["genome_resources"]["rnaseq"].get("transcripts", "")
if gtf_file:
options.extend(["--GTF", gtf_file])
mask_file = data["genome_resources"]["rnaseq"].get("transcripts_mask", "")
if mask_file:
options.extend(["--mask-file", mask_file])
return options
def _get_stranded_flag(config):
strand_flag = {"unstranded": "fr-unstranded",
"firststrand": "fr-firststrand",
"secondstrand": "fr-secondstrand"}
stranded = get_in(config, ("algorithm", "strandedness"), "unstranded").lower()
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', "
"'secondstrand' and 'unstranded" % (stranded))
flag = strand_flag[stranded]
return ["--library-type", flag]
def _get_output_dir(align_file, data, sample_dir=True):
config = data["config"]
name = data["rgnames"]["sample"] if sample_dir else ""
return os.path.join(get_in(data, ("dirs", "work")), "cufflinks", name)
def assemble(bam_file, ref_file, num_cores, out_dir, data):
out_dir = os.path.join(out_dir, data["rgnames"]["sample"])
safe_makedir(out_dir)
out_file = os.path.join(out_dir, data["rgnames"]["sample"], "transcripts.gtf")
if file_exists(out_file):
return out_file
with file_transaction(data, out_dir) as tmp_out_dir:
cmd = ("cufflinks --output-dir {tmp_out_dir} --num-threads {num_cores} "
"--frag-bias-correct {ref_file} "
"--multi-read-correct --upper-quartile-norm {bam_file}")
cmd = cmd.format(**locals())
do.run(cmd, "Assembling transcripts with Cufflinks using %s." % bam_file)
return out_file
def clean_as | sembly(gtf_file, clean=None, dirty=None):
"""
clean the likely garbage transcripts from the GTF file including:
1. any novel single-exon transcripts
2. any features with an unknown strand
"""
base, ext = os.path.splitext(gtf_file)
db = gtf.get_gtf_db(gtf_file, in_memory=True)
clean = clean if clean else base + ".clean" + ext
dirty = dirty if dirty else | base + ".dirty" + ext
if file_exists(clean):
return clean, dirty
with open(clean, "w") as clean_handle, open(dirty, "w") as dirty_handle:
for gene in db.features_of_type('gene'):
for transcript in db.children(gene, level=1):
if is_likely_noise(db, transcript):
write_transcript(db, dirty_handle, transcript)
else:
write_transcript(db, clean_handle, transcript)
return clean, dirty
def write_transcript(db, handle, transcript):
for feature in db.children(transcript):
handle.write(str(feature) + "\n")
def is_likely_noise(db, transcript):
if is_novel_single_exon(db, transcript):
return True
if strand_unknown(db, transcript):
return True
def strand_unknown(db, transcript):
"""
for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them
"""
features = list(db.children(transcript))
strand = features[0].strand
if strand == ".":
return True
else:
return False
def is_novel_single_exon(db, transcript):
features = list(db.children(transcript))
exons = [x for x in features if x.featuretype == "exon"]
class_code = features[0].attributes.get("class_code", None)[0]
if len(exons) == 1 and class_code == "u":
return True
return False
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None):
"""
replace the cufflinks gene_id and transcript_id with the
gene_id and transcript_id from ref_gtf, where available
"""
base, ext = os.path.splitext(merged_gtf)
fixed = out_file if out_file else base + ".clean.fixed" + ext
if file_exists(fixed):
return fixed
ref_db = gtf.get_gtf_db(ref_gtf)
merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True)
ref_tid_to_gid = {}
for gene in ref_db.features_of_type('gene'):
for transcript in ref_db.children(gene, level=1):
ref_tid_to_gid[transcript.id] = gene.id
ctid_to_cgid = {}
ctid_to_oid = {}
for gene in merged_db.features_of_type('gene'):
for transcript in merged_db.children(gene, level=1):
ctid_to_cgid[transcript.id] = gene.id
feature = list(merged_db.children(transcript))[0]
oid = feature.attributes.get("oId", [None])[0]
if oid:
ctid_to_oid[transcript.id] = oid
cgid_to_gid = {}
for ctid, oid in ctid_to_oid.items():
cgid = ctid_to_cgid.get(ctid, None)
oid = ctid_to_oid.get(ctid, None)
gid = ref_tid_to_gid.get(oid, None) if oid else None
if cgid and gid:
cgid_to_gid[cgid] = gid
with file_transaction(data, fixed) as tmp_fixed_file:
with open(tmp_fixed_file, "w") as out_handle:
for gene in merged_db.features_of_type('gene'):
for transcript in merged_db.children(gene, level=1):
for feature in merged_db.children(transcript):
cgid = feature.attributes.get("gene_id", [None])[0]
gid = cgid_to_gid.get(cgid, |
wrouesnel/ansible | test/runner/lib/sanity/pslint.py | Python | gpl-3.0 | 5,220 | 0.001533 | """Sanity test using PSScriptAnalyzer."""
from __future__ import absolute_import, print_function
import collections
import json
import os
import re
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
find_executable,
)
from lib.config import (
SanityConfig,
)
from lib.test import (
calculate_confidence,
calculate_best_confidence,
)
PSLINT_SKIP_PATH = 'test/sanity/pslint/skip.txt'
PSLINT_IGNORE_PATH = 'test/sanity/pslint/ignore.txt'
class PslintTest(SanitySingleVersion):
"""Sanity test using PSScriptAnalyzer."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: SanityResult
"""
with open(PSLINT_SKIP_PATH, 'r') as skip_fd:
skip_paths = skip_fd.read().splitlines()
invalid_ignores = []
with open(PSLINT_IGNORE_PATH, 'r') as ignore_fd:
ignore_entries = ignore_fd.read().splitlines()
ignore = collections.defaultdict(dict)
line = 0
for ignore_entry in ignore_entries:
line += 1
if ' ' not in ignore_entry:
invalid_ignores.append((line, 'Invalid syntax'))
continue
| path, code = ignore_entry.split(' ', 1)
| if not os.path.exists(path):
invalid_ignores.append((line, 'Remove "%s" since it does not exist' % path))
continue
ignore[path][code] = line
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.ps1', '.psm1', '.psd1') and i.path not in skip_paths)
if not paths:
return SanitySkipped(self.name)
if not find_executable('pwsh', required='warning'):
return SanitySkipped(self.name)
cmd = ['test/sanity/pslint/pslint.ps1'] + paths
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
severity = [
'Information',
'Warning',
'Error',
]
cwd = os.getcwd() + '/'
# replace unicode smart quotes with ascii versions
stdout = re.sub(u'[\u2018\u2019]', "'", stdout)
stdout = re.sub(u'[\u201c\u201d]', '"', stdout)
messages = json.loads(stdout)
errors = [SanityMessage(
code=m['RuleName'],
message=m['Message'],
path=m['ScriptPath'].replace(cwd, ''),
line=m['Line'] or 0,
column=m['Column'] or 0,
level=severity[m['Severity']],
) for m in messages]
line = 0
filtered = []
for error in errors:
if error.code in ignore[error.path]:
ignore[error.path][error.code] = None # error ignored, clear line number of ignore entry to track usage
else:
filtered.append(error) # error not ignored
errors = filtered
for invalid_ignore in invalid_ignores:
errors.append(SanityMessage(
code='A201',
message=invalid_ignore[1],
path=PSLINT_IGNORE_PATH,
line=invalid_ignore[0],
column=1,
confidence=calculate_confidence(PSLINT_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None,
))
for path in skip_paths:
line += 1
if not os.path.exists(path):
# Keep files out of the list which no longer exist in the repo.
errors.append(SanityMessage(
code='A101',
message='Remove "%s" since it does not exist' % path,
path=PSLINT_SKIP_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PSLINT_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
for path in paths:
if path not in ignore:
continue
for code in ignore[path]:
line = ignore[path][code]
if not line:
continue
errors.append(SanityMessage(
code='A102',
message='Remove since "%s" passes "%s" test' % (path, code),
path=PSLINT_IGNORE_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PSLINT_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
|
sozlukus/sozlukus.com | sozlukus/registration/tests/default_backend.py | Python | mit | 8,430 | 0 | import datetime
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from registration.forms import RegistrationForm
from registration.backends.default.views import RegistrationView
from registration.models import RegistrationProfile
from registration.users import UserModel
class DefaultBackendViewTests(TestCase):
"""
Test the default registration backend.
Running these tests successfully will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
urls = 'test_app.urls_default'
def setUp(self):
"""
Create an instance of the default backend for use in testing,
and set ``ACCOUNT_ACTIVATION_DAYS`` if it's not set already.
"""
self.old_activation = getattr(settings,
'ACCOUNT_ACTIVATION_DAYS', None)
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = 7 # pragma: no cover
def tearDown(self):
"""
Yank ``ACCOUNT_ACTIVATION_DAYS`` back out if it wasn't
originally set.
"""
if self.old_activation is None:
# pragma: no cover
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation
def test_allow(self):
"""
The setting ``REGISTRATION_OPEN`` appropriately controls
whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
settings.REGISTRATION_OPEN = False
# Now all attempts to hit the register view should redirect to
# the 'registration is closed' message.
resp = self.client.get(reverse('registration_register'))
self.assertRedirects(resp, reverse('registration_disallowed'))
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertRedirects(resp, reverse('registration_disallowed'))
settings.REGISTRATION_OPEN = old_allowed
def test_registration_get(self):
"""
HTTP ``GET`` to the registration view uses the appropriate
template and populates a registration form into the context.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp,
| 'registration/registration_form.html')
self.failUnless(isinstance(resp.context['form'],
RegistrationForm))
def test_registration(self):
"""
Registration creates a new inactive account and a new profile
with activation key, populates the correct account data and
sends an activation email.
"""
resp = self.cli | ent.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertRedirects(resp, reverse('registration_complete'))
new_user = UserModel().objects.get(username='bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.failIf(new_user.is_active)
# A registration profile was created, and an activation email
# was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_no_email(self):
"""
Overriden Registration view does not send an activation email if the
associated class variable is set to ``False``
"""
class RegistrationNoEmailView(RegistrationView):
SEND_ACTIVATION_EMAIL = False
request_factory = RequestFactory()
view = RegistrationNoEmailView.as_view()
view(request_factory.post('/', data={
'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'}))
UserModel().objects.get(username='bob')
# A registration profile was created, and no activation email was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 0)
@override_settings(
INSTALLED_APPS=('django.contrib.auth', 'registration',)
)
def test_registration_no_sites(self):
"""
Registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertEqual(302, resp.status_code)
new_user = UserModel().objects.get(username='bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
self.failIf(new_user.is_active)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'notsecret'})
self.assertEqual(200, resp.status_code)
self.failIf(resp.context['form'].is_valid())
self.assertEqual(0, len(mail.outbox))
def test_activation(self):
"""
Activation of an account functions properly.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
profile = RegistrationProfile.objects.get(user__username='bob')
resp = self.client.get(
reverse('registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}))
self.assertRedirects(resp, reverse('registration_activation_complete'))
def test_activation_expired(self):
"""
An expired account can't be activated.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
profile = RegistrationProfile.objects.get(user__username='bob')
user = profile.user
user.date_joined -= datetime.timedelta(
days=settings.ACCOUNT_ACTIVATION_DAYS)
user.save()
resp = self.client.get(
reverse('registration_activate',
args=(),
kwargs={'activa |
CareerVillage/slack-moderation | src/accounts/models.py | Python | mit | 446 | 0.002242 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class AuthToken(models.Model):
"""
Store | auth tokens returned by the moderation backend service (Slack)
"""
service_name = models.TextField()
service_entity_auth_name = models.TextField()
service_entity_auth_i | d = models.TextField()
service_auth_token = models.TextField()
username = models.CharField(max_length=50)
|
marshall/mintest | mintest.py | Python | apache-2.0 | 919 | 0.004353 | #!/usr/bin/env python
#
# mintest - a minimal C unit testing framework, inspired by minunit
#
# Copyright 2013, Marshall Culpepper
# Licensed under the Apache License, Version 2.0
#
# waf tool for configuring and building mintest
import os
this_dir = os.path.abspath(os.path.dirname(__file__))
def options(opt):
opt.add_option('--mintest-output', default='human',
help='output format [human, json]. default %default')
def configure(cfg):
out_format = 'MT_OUT_HUMAN'
if cfg.options.mintest_output.lower() == 'json':
ou | t_format = 'MT_OUT_JSON'
cfg.env.append_unique('CFLAGS', ['-DMT_OUT_FORMAT=%s' % out_format])
cfg.env.append_unique('CXXFLAGS', ['-DMT_OUT_FORMAT=%s' % out_format])
cfg.env.append_unique('INCLUDES', [os.path.join(this_dir, 'include')])
def build(bld):
bld.env.MINTEST_SOURCES = bld.root | .ant_glob(os.path.join(this_dir[1:], 'src', '**/*.c'))
|
behas/bitcoingraph | tests/rpc_mock.py | Python | mit | 2,105 | 0 | from bitcoingraph.bitcoind import BitcoinProxy, BitcoindException
from pathlib import Path
import json
TEST_DATA_PATH = "tests/data"
class BitcoinProxyMock(BitcoinProxy):
def __init__(self, host=None, port=None):
super().__init__(host, port)
self.heights = {}
self.blocks = {}
self.txs = {}
self.load_testdata()
# Load test data into local dicts
def load_testdata(self):
p = Path(TEST_DATA_PATH)
files = [x for x in p.iterdir()
if x.is_file() and x.name.endswith('json')]
for f in files:
if f.name.startswith("block"):
height = f.name[6:-5]
with f.open() as jf:
raw_block = json.load(jf)
block_hash = raw_block['hash']
self.heights[int(height)] = block_hash
self.blocks[block_hash] = raw_block
elif f.name.startswith("tx"):
tx_hash = f.name[3:-5]
with f.open() as jf:
raw_block = json.load(jf)
self.txs[tx_hash] = raw_block
# Override production proxy methods
def getblock(self, block_hash):
if block_hash not in self.blocks:
raise BitcoindException("Unknown block", block_hash)
else:
return self.blocks[block_hash]
def getblockcount(self):
return max(self.heights.keys())
def getblockhash(self, block_height):
if block_height not in self.heights:
raise BitcoindException("Unknown height", block_height)
else:
return self.heights[block_height]
def getinfo(self):
print("No info")
def getrawtransaction(self, tx_id, verbose=1):
if tx_id not in self.txs:
raise BitcoindException("Unknown transaction", tx_id)
else:
return self.txs[tx_id]
def getrawtransactions(self, tx_ids, verbose=1):
results = []
for tx_id in tx_ids:
results.append(sel | f.ge | trawtransaction(tx_id, verbose))
return results
|
Zorro666/renderdoc | docs/python_api/examples/renderdoc/decode_mesh.py | Python | mit | 9,433 | 0.02417 | import sys
# Import renderdoc if not already imported (e.g. in the UI)
if 'renderdoc' not in sys.modules and '_renderdoc' not in sys.modules:
import renderdoc
# Alias renderdoc for legibility
rd = renderdoc
# We'll need the struct data to read out of bytes objects
import struct
# We base our data on a MeshFormat, but we add some properties
class MeshData(rd.MeshFormat):
indexOffset = 0
name = ''
# Recursively search for the drawcall with the most vertices
def biggestDraw(prevBiggest, d):
ret = prevBiggest
if ret == None or d.numIndices > ret.numIndices:
ret = d
for c in d.children:
biggest = biggestDraw(ret, c)
if biggest.numIndices > ret.numIndices:
ret = biggest
return ret
# Unpack a tuple of the given format, from the data
def unpackData(fmt, data):
# We don't handle 'special' formats - typically bit-packed such as 10:10:10:2
if fmt.Special():
raise RuntimeError("Packed formats are not supported!")
formatChars = {}
# 012345678
formatChars[rd.CompType.UInt] = "xBHxIxxxL"
formatChars[rd.CompType.SInt] = "xbhxixxxl"
formatChars[rd.CompType.Float] = "xxexfxxxd" # only 2, 4 and 8 are valid
# These types have identical decodes, but we might post-process them
formatChars[rd.CompType.UNorm] = formatChars[rd.CompType.UInt]
formatChars[rd.CompType.UScaled] = formatChars[rd.CompType.UInt]
formatChars[rd.CompType.SNorm] = formatChars[rd.CompType.SInt]
formatChars[rd.CompType.SScaled] = formatChars[rd.CompType.SInt]
# We need to fetch compCount components
vertexFormat = str(fmt.compCount) + formatChars[fmt.compType][fmt.compByteWidth]
# Unpack the data
value = struct.unpack_from(vertexFormat, data, 0)
# If the format needs post-processing such as normalisation, do that now
if fmt.compType == rd.CompType.UNorm:
divisor = float((2 ** (fmt.compByteWidth * 8)) - 1)
value = tuple(float(i) / divisor for i in value)
elif fmt.compType == rd.CompType.SNorm:
maxNeg = -float(2 ** (fmt.compByteWidth * 8)) / 2
divisor = float(-(maxNeg-1))
value = tuple((float(i) if (i == maxNeg) else (float(i) / divisor)) for i in value)
# If the format is BGRA, swap the two components
if fmt.BGRAOrder():
value = tuple(value[i] for i in [2, 1, 0, 3])
return value
# Get a list of MeshData objects describing the vertex inputs at this draw
def getMeshInputs(controller, draw):
state = controller.GetPipelineState()
# Get the index & vertex buffers, and fixed vertex inputs
ib = state.GetIBuffer()
vbs = state.GetVBuffers()
attrs = state.GetVertexInputs()
meshInputs = []
for attr in attrs:
# We don't handle instance attributes
if attr.perInstance:
raise RuntimeError("Instanced properties are not supported!")
meshInput = MeshData()
meshInput.indexResourceId = ib.resourceId
meshInput.indexByteOffset = ib.byteOffset
meshInput.indexByteStride = ib.byteStride
meshInput.baseVertex = draw.baseVertex
meshInput.indexOffset = draw.indexOffset
meshInput.numIndices = draw.numIndices
# If the draw doesn't use an index buffer, don't use it even if bound
if not (draw.flags & rd.ActionFlags.Indexed):
meshInput.indexResourceId = rd.ResourceId.Null()
# The total offset is the attribute offset from the base of the vertex
meshInput.vertexByteOffset = attr.byteOffset + vbs[attr.vertexBuffer].byteOffset + draw.vertexOffset * vbs[attr.vertexBuffer].byteStride
meshInput.format = attr.format
meshInput.vertexResourceId = vbs[attr.vertexBuffer].resourceId
meshInput.vertexByteStride = vbs[attr.vertexBuffer].byteStride
meshInput.name = attr.name
meshInputs.append(meshInput)
return meshInputs
# Get a list of MeshData objects describing the vertex outputs at this draw
def getMeshOutputs(controller, postvs):
meshOutputs = []
posidx = 0
vs = controller.GetPipelineState().GetShaderReflection(rd.ShaderStage.Vertex)
# Repeat the process, but this time sourcing the data from postvs.
# Since these are outputs, we iterate over the list of outputs from the
# vertex shader's reflection data
for attr in vs.outputSignature:
# Copy most properties from the postvs struct
meshOutput = MeshData()
meshOutput.indexResourceId = postvs.indexResourceId
meshOutput.indexByteOffset = postvs.indexByteOffset
meshOutput.indexByteStride = postvs.indexByteStride
meshOutput.baseVertex = postvs.baseVertex
meshOutput.indexOffset = 0
meshOutput.numIndices = postvs.numIndices
# The total offset is the attribute offset from the base of the vertex,
# as calculated by the stride per index
meshOutput.vertexByteOffset = postvs.vertexByteOffset
meshOutput.vertexResourceId = postvs.vertexResourceId
meshOutput.vertexByteStride = postvs.vertexByteStride
# Construct a resource format for this element
meshOutput.format = rd.ResourceFormat()
meshOutput.format.compByteWidth = rd.VarTypeByteSize(attr.varType)
meshOutput.format.compCount = attr.compCount
meshOutput.format.compType = rd.VarTypeCompType(attr.varType)
meshOutput.format.type = rd.ResourceFormatType.Regular
meshOutput.name = attr.semanticIdxName if attr.varName == '' else attr.varName
if attr.systemValue == rd.ShaderBuiltin.Position:
posidx = len(meshOutputs)
meshOutputs.append(meshOutput)
# Shuffle the position element to the front
if posidx > 0:
pos = meshOutputs[posidx]
del meshOutputs[posidx]
meshOutputs.insert(0, pos)
accumOffset = 0
for i in range(0, len(meshOutputs)):
meshOutputs[i].vertexByteOffset = accumOffset
# Note that some APIs such as Vulkan will pad the size | of the attribute here
# while others will tightly pack
fmt = meshOutputs[i].format
accumOffset += (8 if fmt.compByteWidth > 4 else 4) * fmt.compCount
return meshOutputs
def getIndices(controller, mesh):
# Get the character for the | width of index
indexFormat = 'B'
if mesh.indexByteStride == 2:
indexFormat = 'H'
elif mesh.indexByteStride == 4:
indexFormat = 'I'
# Duplicate the format by the number of indices
indexFormat = str(mesh.numIndices) + indexFormat
# If we have an index buffer
if mesh.indexResourceId != rd.ResourceId.Null():
# Fetch the data
ibdata = controller.GetBufferData(mesh.indexResourceId, mesh.indexByteOffset, 0)
# Unpack all the indices, starting from the first index to fetch
offset = mesh.indexOffset * mesh.indexByteStride
indices = struct.unpack_from(indexFormat, ibdata, offset)
# Apply the baseVertex offset
return [i + mesh.baseVertex for i in indices]
else:
# With no index buffer, just generate a range
return tuple(range(mesh.numIndices))
def printMeshData(controller, meshData):
indices = getIndices(controller, meshData[0])
print("Mesh configuration:")
for attr in meshData:
print("\t%s:" % attr.name)
print("\t\t- vertex: %s / %d stride" % (attr.vertexResourceId, attr.vertexByteStride))
print("\t\t- format: %s x %s @ %d" % (attr.format.compType, attr.format.compCount, attr.vertexByteOffset))
# We'll decode the first three indices making up a triangle
for i in range(0, 3):
idx = indices[i]
print("Vertex %d is index %d:" % (i, idx))
for attr in meshData:
# This is the data we're reading from. This would be good to cache instead of
# re-fetching for every attribute for every index
offset = attr.vertexByteOffset + attr.vertexByteStride * idx
data = controller.GetBufferData(attr.vertexResourceId, offset, 0)
# Get the value from the data
value = unpackData(attr.format, data)
# We don't go into the details of semantic matching here, just print both
print("\tAttribute '%s': %s" % (attr.name, value))
def sampleCode(controller):
# Find the biggest drawcall in the whole capture
draw = None
for d in controller.GetRootActions():
draw = biggestDraw(draw, d)
# Move to that draw
controller.SetFrameEvent(draw.eventId, True)
print("Decoding mesh inputs at %d: %s\n\n" % (draw.eventId, draw.GetName(controller.GetStructuredFile())))
# Calculate the mesh input configuration
meshInputs = getMeshInputs(controller, draw)
# Fetch and print the data from the mesh inputs
printMeshData(controller, meshInputs)
print("Decoding mesh outputs\n\n")
# Fetch the postvs data
postvs |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/openstack/common/utils.py | Python | gpl-2.0 | 515 | 0 |
def mysql_read():
mysql_info = {}
with open('/etc/openstack.cfg', 'r') as f:
for i in f.readlines():
if i.split('=', 1)[0] in ('DASHBOARD_HOST',
'DASHBOARD_PASS',
'DASHBOARD_NAME',
| 'DASHBOARD_USER',
'DASHBOARD_PORT'):
data = i.split('=', 1)
mysql_info | [data[0]] = data[1].strip()
return mysql_info
|
persepolisdm/persepolis | persepolis/scripts/mac_notification.py | Python | gpl-3.0 | 1,424 | 0.000702 | # -*- coding: utf-8 -*-
# This pr | ogram is free software: you can redistribute | it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# native notification on mac! needs Xcode (latest version) installed and pyobjc
# library from pip
import Foundation
import AppKit
import objc
NSUserNotification = objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
def notifyMac(title, subtitle, info_text, delay=0):
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(info_text)
notification.setDeliveryDate_(Foundation.NSDate.dateWithTimeInterval_sinceDate_(
delay, Foundation.NSDate.date()))
NSUserNotificationCenter.defaultUserNotificationCenter(
).scheduleNotification_(notification)
|
stephenrjones/geoq | geoq/mgrs/__init__.py | Python | mit | 230 | 0.008696 | # -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C- | F600, and
# is subject to the Rig | hts in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
|
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/odr/odrpack.py | Python | mit | 41,254 | 0.000654 | """
Python wrappers for Orthogonal Distance Regression (ODRPACK).
Notes
=====
* Array formats -- FORTRAN stores its arrays in memory column first, i.e. an
array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
efficiency and convenience, the input and output arrays of the fitting
function (and its Jacobians) are passed to FORTRAN without transposition.
Therefore, where the ODRPACK documentation says that the X array is of shape
(N, M), it will be passed to the Python function as an array of shape (M, N).
If M==1, the one-dimensional case, then nothing matters; if M>1, then your
Python functions will be dealing with arrays that are indexed in reverse of
the ODRPACK documentation. No real biggie, but watch out for your indexing of
the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th
observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
you can always use the transpose() function from scipy explicitly.
* Examples -- See the accompanying file test/test.py for examples of how to set
up fits of your own. Some are taken from the User's Guide; some are from
other sources.
* Models -- Some common models are instantiated in the accompanying module
models.py . Contributions are welcome.
Credits
=======
* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
Robert Kern
robert.kern@gmail.com
"""
from __future__ import division, print_function, absolute_import
import numpy
from warnings import warn
from scipy.odr import __odrpack
__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop',
'Data', 'RealData', 'Model', 'Output', 'ODR',
'odr_error', 'odr_stop']
odr = __odrpack.odr
class OdrWarning(UserWarning):
"""
Warning indicating that the data passed into
ODR will cause problems when passed into 'odr'
that the user should be aware of.
"""
pass
class OdrError(Exception):
"""
Exception indicating an error in fitting.
This is raised by `scipy.odr` if an error occurs during fitting.
"""
pass
class OdrStop(Exception):
"""
Exception stopping fitting.
You can raise this exception in your objective function to tell
`scipy.odr` to stop fitting.
"""
pass
# Backwards compatibility
odr_error = OdrError
odr_stop = OdrStop
__odrpack._set_exceptions(OdrError, OdrStop)
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = numpy.asarray(obj)
else:
obj = numpy.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info//10000 % 10,
info//1000 % 10,
info//100 % 10,
info//10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrec | t')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] | == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
class Data(object):
"""
The data to fit.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covariant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a positive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th input variable observation
(single-dimensional only).
If `wd` is a rank-2 array of shape (m, m), then this is the full
covariant weighting matrix broadcast to each observation.
If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
fix : array_like of ints, optional
The `fix` argument is the same as ifixx in the class ODR. It is |
shahzebsiddiqui/BuildTest | buildtest/cli/__init__.py | Python | gpl-3.0 | 15,098 | 0.002318 | """
buildtest cli: include functions to build, get test configurations, and
interact with a global configuration for buildtest.
"""
import argparse
import os
from termcolor import colored
from buildtest import BUILDTEST_VERSION, BUILDTEST_COPYRIGHT
from buildtest.defaults import BUILD_REPORT
from buildtest.schemas.defaults import schema_table
def handle_kv_string(val):
"""This method is used as type field in --filter argument in ``buildtest buildspec find``.
This method returns a dict of key,value pair where input is in format
key1=val1,key2=val2,key3=val3
:param val: input value
:type val: str
:return: dictionary of key/value pairs
:rtype: dict
"""
kv_dict = {}
if "," in val:
args = val.split(",")
for kv in args:
if "=" not in kv:
raise argparse.ArgumentTypeError("Must specify k=v")
key, value = kv.split("=")[0], kv.split("=")[1]
kv_dict[key] = value
return kv_dict
if "=" not in val:
raise argparse.ArgumentTypeError("Must specify in key=value format")
key, value = val.split("=")[0], val.split("=")[1]
kv_dict[key] = value
return kv_dict
def positive_number(value):
"""Checks if input value is positive value and within range of 1-50. This method
is used for --rebuild option
"""
value = int(value)
if value <= 0:
raise argparse.ArgumentTypeError(
f"{value} must be a positive number between [1-50]"
)
return value
def get_parser():
epilog_str = f"""
References
_______________________________________________________________________________________
GitHub: https://github.com/buildtesters/buildtest
Documentation: https://buildtest.readthedocs.io/en/latest/index.html
Schema Documentation: https://buildtesters.github.io/buildtest/
Slack: http://hpcbuildtest.slack.com/
Please report issues at https://github.com/buildtesters/buildtest/issues
{BUILDTEST_COPYRIGHT}
"""
if os.getenv("BUILDTEST_COLOR") == "True":
epilog_str = colored(epilog_str, "blue", attrs=["bold"])
parser = argparse.ArgumentParser(
prog="buildtest",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="buildtest is a HPC testing framework for building and running tests.",
usa | ge="%(prog)s [options] [COMMANDS]",
epilog=epilog_str,
)
parser.add_argument(
"-V",
"--version",
action="version",
version=f"%(prog)s version {BUILDTEST_VERSION}",
)
parser.add_argument(
"-c", "--config_file", help="Specify alternate configuration file"
)
subparsers = parser.add_subparsers(title="COMMANDS", dest="subcommands", metavar="")
build_menu(subparsers)
history_menu(subparsers)
buildspec_menu(sub | parsers)
config_menu(subparsers)
report_menu(subparsers)
inspect_menu(subparsers)
schema_menu(subparsers)
cdash_menu(subparsers)
subparsers.add_parser("docs", help="Open buildtest docs in browser")
subparsers.add_parser("schemadocs", help="Open buildtest schema docs in browser")
return parser
def history_menu(subparsers):
"""This method builds the command line menu for ``buildtest history`` command"""
history_subcmd = subparsers.add_parser("history", help="Query build history")
history_subparser = history_subcmd.add_subparsers(
metavar="", description="Query build history file", dest="history"
)
history_subparser.add_parser("list", help="List a summary of all builds")
query = history_subparser.add_parser(
"query", help="Query information for a particular build"
)
query.add_argument("id", type=int, help="Select a build ID")
query.add_argument(
"-l",
"--log",
action="store_true",
help="Display logfile for corresponding build id",
)
def build_menu(subparsers):
"""This method implements command line menu for ``buildtest build`` command."""
parser_build = subparsers.add_parser("build", help="Build and Run test")
parser_build.add_argument(
"-b",
"--buildspec",
help="Specify a buildspec (file or directory) to build. A buildspec must end in '.yml' extension.",
action="append",
)
parser_build.add_argument(
"-x",
"--exclude",
action="append",
help="Exclude one or more buildspecs (file or directory) from processing. A buildspec must end in '.yml' extension.",
)
parser_build.add_argument(
"-t",
"--tags",
action="append",
type=str,
help="Discover buildspecs by tags found in buildspec cache",
)
parser_build.add_argument(
"-ft",
"--filter-tags",
action="append",
type=str,
help="Filter buildspecs by tags when building tests.",
)
parser_build.add_argument(
"-e",
"--executor",
action="append",
type=str,
help="Discover buildspecs by executor name found in buildspec cache",
)
parser_build.add_argument(
"-s",
"--stage",
help="control behavior of buildtest build",
choices=["parse", "build"],
)
parser_build.add_argument(
"--testdir",
help="Specify a custom test directory where to write tests. This overrides configuration file and default location.",
)
parser_build.add_argument(
"--rebuild",
type=positive_number,
help="Rebuild test X number of times. Must be a positive number between [1-50]",
)
parser_build.add_argument(
"-r",
"--report_file",
help="Specify a report file where tests will be written.",
)
parser_build.add_argument(
"--max-pend-time",
type=positive_number,
help="Specify Maximum Pending Time (sec) for job before cancelling job. This only applies for batch job submission.",
)
parser_build.add_argument(
"--poll-interval",
type=positive_number,
help="Specify Poll Interval (sec) for polling batch jobs",
)
parser_build.add_argument(
"-k",
"--keep-stage-dir",
action="store_true",
help="Keep stage directory after job completion.",
)
def buildspec_menu(subparsers):
"""This method implements ``buildtest buildspec`` command"""
parser_buildspec = subparsers.add_parser(
"buildspec", help="Options for querying buildspec cache"
)
subparsers_buildspec = parser_buildspec.add_subparsers(
description="Find buildspec from cache file",
metavar="",
)
buildspec_find = subparsers_buildspec.add_parser("find", help="find all buildspecs")
buildspec_find.add_argument(
"--root",
help="Specify root buildspecs (directory) path to load buildspecs into buildspec cache.",
type=str,
action="append",
)
buildspec_find.add_argument(
"-r",
"--rebuild",
help="Rebuild buildspec cache and find all buildspecs again",
action="store_true",
)
buildspec_find.add_argument(
"-t", "--tags", help="List all available tags", action="store_true"
)
buildspec_find.add_argument(
"-b",
"--buildspec",
help="Get all buildspec files from cache",
action="store_true",
)
buildspec_find.add_argument(
"-e",
"--executors",
help="get all unique executors from buildspecs",
action="store_true",
)
buildspec_find.add_argument(
"-p", "--paths", help="print all root buildspec paths", action="store_true"
)
buildspec_find.add_argument(
"--group-by-tags", action="store_true", help="Group tests by tag name"
)
buildspec_find.add_argument(
"--group-by-executor",
action="store_true",
help="Group tests by executor name",
)
buildspec_find.add_argument(
"-m",
"--maintainers",
help="Get all maintainers for all buildspecs",
action="store_true",
)
buildsp |
agusmakmun/dracos-markdown-editor | martor/fields.py | Python | gpl-3.0 | 535 | 0 | # -*- coding: u | tf-8 -*-
from __future__ import unicode_literals
from django import forms
from .settings import MARTOR_ENABLE_LABEL
from .widgets import MartorWidget
class MartorFormField(forms.CharField):
def __init__(self, *args, **kwargs):
# to setup the editor without label
if not MARTOR_ENABLE_LABEL:
kwargs['label'] = ''
super(M | artorFormField, self).__init__(*args, **kwargs)
if not issubclass(self.widget.__class__, MartorWidget):
self.widget = MartorWidget()
|
anchor/vaultaire-tools | telemetry/marquise_throughput.py | Python | bsd-3-clause | 14,550 | 0.010584 | #!/usr/bin/env python
'''use marquise_telemetry to build throughput info as visible from the client
e.g.:
$ marquse_telemetry broker | marquise_throughput.py
'''
import sys
from time import *
import os
import fcntl
class TimeAware(object):
'''simple timing aware mixin
The default implementation of on_tick_change() is to call every function
passed to the constructor in tick_handlers
'''
def __init__(self, ticklen=1, tick_handlers=[]):
self.last_tick = self.start_time = time()
self.ticklen = ticklen
self.tick_handlers = tick_handlers
self.n_ticks = 0
self.totalticktime = 0
def check_for_tick_changed(self):
'''run on_tick_change once for every ticklen seconds that has passed since last_tick
'''
tnow = time()
while tnow - self.last_tick >= self.ticklen:
self.n_ticks += 1
self.totalticktime += self.ticklen
self.last_tick += self.ticklen
self.on_tick_change()
def on_tick_change(self):
'''handler for a tick change
the timestamp marking the 'tick' being handled is in self.last_tick
The current time may however be significantly after self.last_tick if
check_for_tick_changed is not called more often than self.ticklen
'''
for f in self.tick_handlers: f()
def run_forever(self,sleep_time=None):
'''run in a loop regularly calling on_tick_change
'''
if sleep_time == None: sleep_time = self.ticklen/10.0
while True:
self.check_for_tick_changed()
sleep(sleep_time)
class TimeHistogram(TimeAware):
'''implements a rolling histogram'''
def __init__(self, nbins, seconds_per_bin=1):
TimeAware.__init__(self, seconds_per_bin)
self.nbins = nbins
self._bins = [0 for n in range(nbins)]
self.current_bin = 0
def on_tick_change(self):
self.current_bin = (self.current_bin + 1) % self.nbins
self._bins[self.current_bin] = 0
def add(self, n=1):
'''add 'n' to the current histogram bin
'''
self.check_for_tick_changed()
self._bins[self.current_bin] += n
def sum(self, k=60):
'''return the total entries per second over the last k seconds
'''
| bins_to_check = k/self.ticklen
return sum(self.bins[-bins_to_check:])
def mean(self, k=60):
'''return the mean entries per second over the last k seconds
'''
if self.totalticktime < k:
k = self.totalticktime # Only average over t | he time we've been running
bins_to_check = k/self.ticklen
return self.sum(k) / float(bins_to_check) if bins_to_check else 0
@property
def bins(self):
'''get bins in time order, oldest to newest'''
self.check_for_tick_changed()
return self._bins[self.current_bin+1:]+self._bins[:self.current_bin+1]
class ThroughputCounter(object):
def __init__(self, input_stream=sys.stdin):
self.input_stream=input_stream
self.point_hist = TimeHistogram(600)
self.burst_hist = TimeHistogram(600)
self.acked_burst_hist = TimeHistogram(600)
self.latency_hist = TimeHistogram(600)
self.ack_hist = TimeHistogram(600)
self.defer_write_points_hist = TimeHistogram(600)
self.defer_read_points_hist = TimeHistogram(600)
self.timed_out_points_hist = TimeHistogram(600)
self.outstanding_points = 0
self.outstanding_bursts = {} # burstid -> start timestamp,points
self._reader_state = {}
self.using_marquised = set() # Hosts that relay through marquised
def get_outstanding(self,last_n_seconds=[600,60,1]):
total_burst_counts = map(self.point_hist.sum, last_n_seconds)
total_ack_counts = map(self.ack_hist.sum, last_n_seconds)
return [nbursts-nacks for nbursts,nacks in zip(total_burst_counts,total_ack_counts)]
def get_total_outstanding_points(self):
return sum(points for timestamp,points in self.outstanding_bursts.itervalues())
def get_points_per_seconds(self,over_seconds=[600,60,1]):
return map(self.point_hist.mean, over_seconds)
def get_total_bursts(self,over_seconds=[600,60,1]):
return map(self.burst_hist.mean, over_seconds)
def get_acks_per_second(self,over_seconds=[600,60,1]):
return map(self.ack_hist.mean, over_seconds)
def get_deferred_points_written_per_second(self,over_seconds=[600,60,1]):
return map(self.defer_write_points_hist.mean, over_seconds)
def get_timed_out_points_per_second(self,over_seconds=[600,60,1]):
return map(self.timed_out_points_hist.mean, over_seconds)
def get_deferred_points_read_per_second(self,over_seconds=[600,60,1]):
return map(self.defer_read_points_hist.mean, over_seconds)
def get_average_latencies(self,over_seconds=[600,60,1]):
burst_counts = map(self.acked_burst_hist.sum, over_seconds)
latency_sums = map(self.latency_hist.sum, over_seconds)
return [latencysum/float(nbursts) if nbursts > 0 else 0 for latencysum,nbursts in zip(latency_sums,burst_counts)]
def process_burst(self, data):
if not all(k in data for k in ('identity','message id','points')):
print >> sys.stderr, 'malformed databurst info. ignoring'
return
msgtag = data['identity']+data['message id']
points = int(data['points'])
timestamp = time()
self.outstanding_bursts[msgtag] = timestamp,points
self.outstanding_points += points
self.burst_hist.add(1)
self.point_hist.add(points)
def _msg_tag_from_data(self, data):
return (data['identity'].replace('marquised:',''))+data['message id']
def process_deferred_write(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.defer_write_points_hist.add(points)
def process_deferred_read(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.defer_read_points_hist.add(points)
def process_send_timeout(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.timed_out_points_hist.add(points)
def process_ack(self, data):
if not all(k in data for k in ('identity','message id')):
print >> sys.stderr, 'malformed ack info. ignoring'
return
if data['identity'][:10] == 'marquised:':
# ACK is coming back to marquised from the broker
host = data['identity'][10:]
self.using_marquised.add(host)
else:
host = data['identity']
if host in self.using_marquised:
# If a client is using marquised, that client will
# recieve an ack back from marquised immediately.
#
# We ignore this ack here, and wait for the one
# received by marquised
return
msgtag = host+data['message id']
burst_timestamp,points = self.outstanding_bursts.pop(msgtag,(None,None))
if burst_timestamp == None:
# Got an ACK we didn't see the burst for. Ignoring it.
return
latency = time() - burst_timestamp
self.ack_hist.add(points)
self.acked_burst_hist.add(1)
self.latency_hist.add(latency)
self.outstanding_points -= points
def process_line(self, line):
'''process a line of marquise telemetry
At the moment, only look at bursts being created by the collate_thread
and acked by the marquise poller_thread
sample:
fishhook.engineroom.anchor.net.au 1395212041732118000 8c087c0b collator_thread created_databurst frames = 1618 compressed_bytes = 16921
....
|
jonparrott/google-cloud-python | bigquery_datatransfer/google/cloud/bigquery_datatransfer_v1/proto/datatransfer_pb2_grpc.py | Python | apache-2.0 | 16,649 | 0.005346 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.bigquery_datatransfer_v1.proto import datatransfer_pb2 as google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2
from google.cloud.bigquery_datatransfer_v1.proto import transfer_pb2 as google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataTransferServiceStub(object):
"""The Google BigQuery Data Transfer Service API enables BigQuery users to
configure the transfer of their data from other Google Products into BigQuery.
This service contains methods that are end user exposed. It backs up the
frontend.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataSource = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetDataSource',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetDataSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DataSource.FromString,
)
self.ListDataSources = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListDataSources',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesResponse.FromString,
)
self.CreateTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/CreateTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CreateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.UpdateTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/UpdateTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.UpdateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.DeleteTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.ListTransferConfigs = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferConfigs',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsResponse.FromString,
)
self.ScheduleTransferRuns = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ScheduleTransferRuns',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsResponse.FromString,
)
self.GetTransferRun = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferRun',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferRunRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferRun.FromString,
)
self.DeleteTransferRun = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferRun',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferRunRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListTransferRuns = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferRuns',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsResponse.FromString,
)
self.ListTransferLogs = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferLogs',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsResponse.FromString,
)
self.CheckValidCreds = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/CheckValidCreds',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsResponse.FromString,
)
class DataTransferServiceServicer(object):
"""The Google BigQuery Data Transfer Service API enables BigQuery users to
configure th | e transfer of their data from other Google P | roducts into BigQuery.
This service contains methods that are end user exposed. It backs up the
frontend.
"""
def GetDataSource(self, request, context):
"""Retrieves a supported data source and returns its settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListDataSources(self, request, context):
"""Lists supported data sources and returns their settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTransferConfig(self, request, context):
"""Creates a new data transfer configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateTransferConfig(self, request, context):
"""Updates a data transfer configuration.
All fields must be set, even if they are not updated.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTransferConfig(self, request, context):
"""Deletes a data transfer configuration,
incl |
codewarrior0/Shiboken | tests/py3kcompat.py | Python | gpl-2.0 | 1,372 | 0.002915 | # -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 | USA
import sys
IS_PY3K = sys.version_info[0] == 3
if IS_PY3K:
def unicode(s):
return s
def b(s):
return bytes(s, "UTF8")
def l(n):
return n
long = int
else:
def b(s):
return s
def l(n):
return long(n)
unicode = unicode
l | ong = long
|
ninastoessinger/word-o-mat | word-o-mat.roboFontExt/lib/wordomat.py | Python | mit | 30,817 | 0.005584 | # coding=utf-8
#
"""
word-o-mat is a RoboFont extension that generates test words for type testing, sketching etc.
I assume no responsibility for inappropriate words found on those lists and rendered by this script :)
v2.2.4 / Nina Stössinger / 31.05.2015
Thanks to Just van Rossum, Frederik Berlaen, Tobias Frere-Jones, Joancarles Casasín, James Edmondson
Also to Roberto Arista, Sindre Bremnes, Mathieu Christe/David Hodgetts for help with wordlists
"""
import codecs
import re
import webbrowser
from lib.UI.noneTypeColorWell import NoneTypeColorWell
from lib.UI.spaceCenter.glyphSequenceEditText import GlyphSequenceEditText
from mojo.events import addObserver, removeObserver
from mojo.extensions import *
from mojo.roboFont import OpenWindow
from mojo.UI import OpenSpaceCenter, AccordionView
from vanilla.dialogs import getFile, message
from vanilla import *
from random import choice
import wordcheck
try:
reload
except NameError:
# in py3
from importlib import reload
reload(wordcheck)
warned = False
class WordomatWindow:
def __init__(self):
"""Initialize word-o-mat UI, open the window."""
# load stuff
self.loadPrefs()
self.loadDictionaries()
# Observers for font events
addObserver(self, lambda: self.g1.base.enable(True), "fontDidOpen")
addObserver(self, "fontClosed", "fontWillClose")
# The rest of this method is just building the window / interface
self.w = Window((250, 414), 'word-o-mat', minSize=(250,111), maxSize=(250,436))
padd, bPadd = 12, 3
groupW = 250 - 2*padd
# Panel 1 - Basic Settings
self.g1 = Group((padd, 8, groupW, 104))
topLineFields = {
"wordCount": [0, self.wordCount, 20],
"minLength": [108, self.minLength, 3],
"maxLength": [145, self.maxLength, 10],
}
topLineLabels = {
"wcText": [31, 78, 'words with', 'left'],
"lenTextTwo": [133, 10, u'–', 'center'],
"lenTextThree": [176, -0, 'letters', 'left'],
}
for label, values in topLineFields.items():
setattr(self.g1, label, EditText((values[0], 0, 28, 22), text=values[1], placeholder=str(values[2])))
for label, values in topLineLabels.items():
setattr(self.g1, label, TextBox((values[0], 3, values[1], 22), text=values[2], alignment=values[3]))
# language selection
languageOptions = list(self.languageNames)
languageOptions.extend(["OSX Dictionary", "Any language", "Custom wordlist..."])
self.g1.source = PopUpButton((0, 32, 85, 20), [], sizeStyle="small", callback=self.changeSourceCallback)
self.g1.source.setItems(languageOptions)
self.g1.source.set(int(self.source))
# case selection
caseList = [u"don’t change case", "make lowercase", "Capitalize", "ALL CAPS"]
self.g1.case = PopUpButton((87, 32, -0, 20), caseList, sizeStyle="small")
self.g1.case.set(self.case)
# character set
charsetList = ["Use any characters", "Use characters in current font", "Use only selected glyphs", "Use only glyphs with mark color:"]
self.g1.base = PopUpButton((0, 61, -0, 20), charsetList, callback=self.baseChangeCallback, sizeStyle="small")
if not CurrentFont():
self.g1.base.set(0) # Use any
self.g1.base.enable(False) # Disable selection
else:
if self.limitToCharset == False:
self.g1.base.set(0) # Use any
else:
self.g1.base.set(1) # Use current font
# mark color selection
self.g1.colorWell = NoneTypeColorWell((-22, 61, -0, 22))
self.g1.colorWell.set(None)
# populate from prefs
if self.reqMarkColor is not "None": # initial pref
try:
r, g, b, a = self.reqMarkColor
savedColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, a)
self.g1.colorWell.set(savedColor)
except:
pass
if self.g1.base.get() != 3:
self.g1.colorWell.show(0)
# Panel 2 - Match letters
self.g2 = Group((0, 2, 250, 172))
# Match mode selection
matchBtnItems = [
dict(width=40, title="Text", enabled=True),
dict(width=120, title="GREP pattern match", enabled=True)
]
self.g2.matchMode = SegmentedButton((50, 5, -0, 20), matchBtnItems, callback=self.switchMatch | ModeCallback, sizeStyle="small")
| rePanelOn = 1 if self.matchMode == "grep" else 0
self.g2.matchMode.set(rePanelOn)
# Text/List match mode
self.g2.textMode = Box((padd,29,-padd,133))
labelY = [2, 42]
labelText = ["Require these letters in each word:", "Require one per group in each word:"]
for i in range(2):
setattr(self.g2.textMode, "reqLabel%s" % i, TextBox((bPadd, labelY[i], -bPadd, 22), labelText[i], sizeStyle="small"))
self.g2.textMode.mustLettersBox = EditText((bPadd+2, 18, -bPadd, 19), text=", ".join(self.requiredLetters), sizeStyle="small")
### consider using a subclass that allows copy-pasting of glyphs to glyphnames
y2 = 37
attrNameTemplate = "group%sbox"
for i in range(3):
j = i+1
y2 += 20
optionsList = ["%s: %s" % (key, ", ".join(value)) for key, value in self.groupPresets]
if len(self.requiredGroups[i]) > 0 and self.requiredGroups[i][0] != "":
optionsList.insert(0, "Recent: " + ", ".join(self.requiredGroups[i]))
attrName = attrNameTemplate % j
setattr(self.g2.textMode, attrName, ComboBox((bPadd+2, y2, -bPadd, 19), optionsList, sizeStyle="small"))
groupBoxes = [self.g2.textMode.group1box, self.g2.textMode.group2box, self.g2.textMode.group3box]
for i in range(3):
if len(self.requiredGroups[i]) > 0 and self.requiredGroups[i][0] != "":
groupBoxes[i].set(", ".join(self.requiredGroups[i]))
# RE match mode
self.g2.grepMode = Box((padd,29,-padd,133))
self.g2.grepMode.label = TextBox((bPadd, 2, -bPadd, 22), "Regular expression to match:", sizeStyle="small")
self.g2.grepMode.grepBox = EditText((bPadd+2, 18, -bPadd, 19), text=self.matchPattern, sizeStyle="small")
splainstring = u"This uses Python’s internal re parser.\nExamples:\nf[bhkl] = words with f followed by b, h, k, or l\n.+p.+ = words with p inside them\n^t.*n{2}$ = words starting with t, ending in nn"
self.g2.grepMode.explainer = TextBox((bPadd, 42, -bPadd, 80), splainstring, sizeStyle="mini")
self.g2.grepMode.refButton = Button((bPadd, 108, -bPadd, 14), "go to syntax reference", sizeStyle="mini", callback=self.loadREReference)
self.g2.grepMode.show(0)
self.toggleMatchModeFields() # switch to text or grep panel depending
# Panel 3 - Options
self.g3 = Group((padd, 5, groupW, 48))
self.g3.checkbox0 = CheckBox((bPadd, 2, 18, 18), "", sizeStyle="small", value=self.banRepetitions)
self.g3.checkLabel = TextBox((18, 4, -bPadd, 18), "No repeating characters per word", sizeStyle="small")
self.g3.listOutput = CheckBox((bPadd, 18, 18, 18), "", sizeStyle="small")
self.g3.listLabel = TextBox((18, 20, -bPadd, 18), "Output as list sorted by width", sizeStyle="small")
# Display Accordion View
accItems = [
dict(label="Basic settings", view=self.g1, size=104, collapsed=False, canResize=False),
dict(label="Specify required letters", view=self.g2, size=173, collapsed=False, canResize=False),
dict(label="Options", view=self.g3, size=48, collapsed=False, canResize=False)
]
self.w.panel1 = Group((0, 0, 250, -35))
self.w.panel1.accView = AccordionView((0, 0, -0, -0), accItems)
self.w.submit = Button((padd, -30, -padd, 22), 'make words!', callback=self.makeWords)
self.w.bind("close", self.windowClose)
self.w.setDefaultButton(self.w.submit)
self.w.open()
def l |
pch957/python-bts-v0.9 | scripts/bts_publish_market.py | Python | mit | 8,079 | 0.000124 | #!/usr/bin/env python2
# coding=utf8 sw=1 expandtab ft=python
from __future__ import print_function
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp import auth
from autobahn.twisted.util import sleep
from twisted.internet.protocol import ReconnectingClientFactory
from autobahn.websocket.protocol import parseWsUrl
from autobahn.twisted import wamp, websocket
from autobahn.wamp import types
import time
import os
import json
from bts.market import BTSMarket
from bts.api import BTS
def get_prefix(quote, base):
prefix = "%s_%s" % (quote, base)
prefix = prefix.replace('.', '-')
return prefix
class PublishMarket(object):
def __init__(self):
self.pusher = None
self.order_book = {}
self.order_book_brief = {}
self.load_config()
self.init_market()
def load_config(self):
config_file = os.getenv("HOME")+"/.python-bts/publish_market.json"
fd_config = open(config_file)
self.config = json.load(fd_config)
fd_config.close()
def init_market(self):
config_file = os.getenv("HOME") + "/.python-bts/bts_client.json"
fd_config = open(config_file)
config_bts = json.load(fd_config)[self.config["bts_client"]]
fd_config.close()
self.bts_client = BTS(config_bts["user"], config_bts["password"],
config_bts["host"], config_bts["port"])
self.market = BTSMarket(self.bts_client)
client_info = self.bts_client.get_info()
self.height = int(client_info["blockchain_head_block_num"])
def myPublish(self, topic, event):
if self.pusher:
self.pusher.publish(topic, event, c=topic)
def publish_deal_trx(self, deal_trx):
for trx in deal_trx:
if trx["type"] == "bid":
deal_type = "buy"
else:
deal_type = "sell"
prefix = get_prefix(trx["quote"], trx["base"])
format_trx = [prefix, trx["block"], trx["timestamp"],
deal_type, trx["price"], trx["volume"]]
self.myPublish(
u'bts.orderbook.%s.trx' % (format_trx[0]), format_trx[1:])
self.myPublish(u'bts.orderbook.trx', format_trx)
print(format_trx)
def publish_place_trx(self, place_trx):
trx_id = ""
for trx in place_trx:
if trx_id == trx["trx_id"]:
continue
prefix = get_prefix(trx["quote"], trx["base"])
trx_id = trx["trx_id"]
if trx["cancel"]:
trx["type"] = "cancel " + trx["type"]
format_trx = [prefix, trx["block"], trx["timestamp"],
trx["type"], trx["price"], trx["amount"]]
self.myPublish(
u'bts.orderbook.%s.order' % (format_trx[0]), format_trx[1:])
self.myPublish(u'bts.orderbook.order', format_trx)
print(format_trx)
def publish_order_book(self):
market_list = self.config["market_list"]
for quote, base in market_list:
prefix = get_prefix(quote, base)
order_book = self.market.get_order_book(
quote, base, cover=True)
order_book["bids"] = order_book["bids"][:10]
order_book["asks"] = order_book["asks"][:10]
if (prefix not in self.order_book or
self.order_book[prefix] != order_book):
self.order_book[prefix] = order_book
self.myPublish(
u'bts.orderbook.%s' % prefix, order_book)
self.publish_order_book_brief(quote, base, order_book)
def publish_order_book_brief(self, quote, base, order_book):
prefix = get_prefix(quote, base)
order_book_brief = {"quote": quote, "base": base,
"ask1": None, "bid1": None}
if order_book["bids"]:
order_book_brief["bid1"] = order_book["bids"][0][0]
if order_book["asks"]:
order_book_brief["ask1"] = order_book["asks"][0][0]
if (prefix not in self.order_book_brief or
self.order_book_brief[prefix] != order_book_brief):
self.order_book_brief[prefix] = order_book_brief
self.myPublish(
u'bts.orderbook.%s.brief' % prefix, order_book_brief)
def execute(self):
client_info = self.bts_client.get_info()
height_now = int(client_info["blockchain_head_block_num"])
if(self.height < height_now):
time_stamp = client_info["blockchain_head_block_timestamp"]
self.myPublish(u'bts.blockchain.info',
{"height": height_now, "time_stamp": time_stamp})
self.publish_order_book()
while self.height < height_now:
self.height += 1
trxs = self.bts_client.get_block_transactions(
self.height)
recs = self.market.get_order_deal_rec(self.height)
self.publish_deal_trx(recs)
recs = self.market.get_order_place_rec(trxs)
self.publish_place_trx(recs)
self.market.update_order_owner(recs)
class Component(ApplicationSession):
task_token = 0
config = {}
publish_market = None
@staticmethod
def init():
Component.publish_market = PublishMarket()
config_file = os.getenv("HOME")+"/.python-bts/pusher.json"
fd_config = open(config_file)
Component.config = json.load(fd_config)
fd_config.close()
def onConnect(self):
self.join(self.config.realm, [u"wampcra"], Component.config["user"])
def onChallenge(self, challenge):
key = Component.config["password"].encode('utf8')
signature = auth.compute_wcs(
key, challenge.extra['challenge'].encode('utf8'))
return signature.decode('ascii')
@inlineCallbacks
def onJoin(self, details):
Component.task_token += 1
my_token = Component.task_token
print("session attached")
Component.publish_market.pusher = self
period = float(
Component.publish_m | arket.bts_client.chain_info["block_interval"])
while my_tok | en == Component.task_token:
try:
Component.publish_market.execute()
except Exception as e:
print(e)
now = time.time()
nexttime = int(time.time()/period + 1)*period - now
yield sleep(nexttime+1)
def onLeave(self, details):
Component.publish_market.pusher = None
#print("onLeave: {}".format(details))
print("onLeave()")
def onDisconnect(self):
print("onDisconnect()")
class MyClientFactory(websocket.WampWebSocketClientFactory,
ReconnectingClientFactory):
maxDelay = 30
def clientConnectionFailed(self, connector, reason):
# print "reason:", reason
ReconnectingClientFactory.clientConnectionFailed(
self, connector, reason)
def clientConnectionLost(self, connector, reason):
print("Connection Lost")
# print "reason:", reason
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
if __name__ == '__main__':
Component.init()
# 1) create a WAMP application session factory
component_config = types.ComponentConfig(realm=Component.config["realm"])
session_factory = wamp.ApplicationSessionFactory(config=component_config)
session_factory.session = Component
# 2) create a WAMP-over-WebSocket transport client factory
url = Component.config["url"]
transport_factory = MyClientFactory(session_factory, url=url, debug=False)
# 3) start the client from a Twisted endpoint
isSecure, host, port, resource, path, params = parseWsUrl(url)
transport_factory.host = host
transport_factory.port = port
websocket.connectWS(transport_factory)
# 4) now enter the Twisted reactor loop
reactor.run()
|
Sverchok/SverchokRedux | core/compiler.py | Python | gpl-3.0 | 376 | 0.007979 | from .execute import GraphNode
from . | import preprocess
def compile(layout_dict):
preprocess.proprocess(layout_dict)
# get nodes without any outputs
root_nodes = layout_dict["nodes"].keys() - {l[0] for l in layout_dict["links"]}
graph_dict = {}
ou | t = [GraphNode.from_layout(root_node, layout_dict, graph_dict) for root_node in root_nodes]
return out
|
quimaguirre/diana | diana/toolbox/parse_clinical_trials.py | Python | mit | 12,367 | 0.03105 | ##############################################################################
# Clinical trials parser
#
# eg 2013-2016
##############################################################################
import cPickle, os, re
def main():
#base_dir = "../data/ct/"
base_dir = "/home/eguney/data/ct/"
file_name = base_dir + "ct.csv"
output_data(base_dir, file_name)
return
def output_data(base_dir, file_name):
drug_to_ctids = get_interventions(base_dir, include_other_names=True) #False)
print len(drug_to_ctids), drug_to_ctids.items()[:5]
ctid_to_conditions = get_ctid_to_conditions(base_dir)
print len(ctid_to_conditions), ctid_to_conditions.items()[:5]
ctid_to_values = get_ctid_to_details(base_dir)
print len(ctid_to_values), ctid_to_values.items()[:5]
f = open(file_name, 'w')
f.write("Drug\tClinical trial Id\tPhase\tStatus\tFDA regulated\tWhy stopped\tResults date\tConditions\n")
for drug, ctids in drug_to_ctids.iteritems():
for ctid in ctids:
values = [ drug, ctid ]
if ctid in ctid_to_values:
#phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
values.extend(ctid_to_values[ctid])
if ctid in ctid_to_conditions:
conditions = ctid_to_conditions[ctid]
values.append(" | ".join(conditions))
f.write("%s\n" % "\t".join(values))
f.close()
return
def get_disease_specific_drugs(drug_to_diseases, phenotype_to_mesh_id):
disease_to_drugs = {}
mesh_id_to_phenotype = {}
for phenotype, mesh_id in phenotype_to_mesh_id.items():
mesh_id_to_phenotype[mesh_id] = phenotype
for drugbank_id, diseases in drug_to_diseases.iteritems():
for phenotype, dui, val in diseases:
if val > 0:
if dui in mesh_id_to_phenotype: # In the disease data set
disease = mesh_id_to_phenotype[dui].lower()
disease_to_drugs.setdefault(disease, set()).add(drugbank_id)
return disease_to_drugs
def get_drug_disease_mapping(base_dir, selected_drugs, name_to_drug, synonym_to_drug, mesh_id_to_name, mesh_id_to_name_with_synonyms, dump_file):
if os.path.exists(dump_file):
drug_to_diseases = cPickle.load(open(dump_file))
return drug_to_diseases
# Get mesh name to mesh id mapping
mesh_name_to_id = {}
for mesh_id, names in mesh_id_to_name_with_synonyms.iteritems():
for name in names:
for name_mod in [ name, name.replace(",", ""), name.replace("-", " "), name.replace(",", "").replace("-", " ") ]:
mesh_name_to_id[name_mod] = mesh_id
# Get CT info
drug_to_ctids, ctid_to_conditions, ctid_to_values = get_ct_data(base_dir, include_other_names=True)
# Get CT - MeSH disease mapping
intervention_to_mesh_name = {}
interventions = reduce(lambda x,y: x|y, ctid_to_conditions.values())
for intervention in interventions:
if intervention.endswith('s'):
intervention = intervention[:-1]
idx = intervention.find("(")
if idx != -1:
intervention = intervention[:idx].rstrip()
try:
exp = re.compile(r"\b%ss{,1}\b" % re.escape(intervention))
except:
print "Problem with regular expression:", intervention
for mesh_name, dui in mesh_name_to_id.iteritems():
m = exp.search(mesh_name)
if m is None:
continue
elif len(mesh_name.split()) != len(intervention.split()): # no partial overlap
continue
phenotype = mesh_id_to_name[dui]
intervention_to_mesh_name[intervention] = phenotype
break
#print len(intervention_to_mesh_name), intervention_to_mesh_name.items()[:5]
# Get interventions
phase_to_value = { "Phase 0": 0.5, "Phase 1": 0.6, "Phase 1/Phase 2": 0.65, "Phase 2": 0.7, "Phase 2/Phase 3": 0.75, "Phase 3": 0.8, "Phase 3/Phase 4":0.85, "Phase 4": 0.9, "N/A": 0.5 }
status_to_value = { "Terminated": -0.5, "Withdrawn": -1} #,"Completed", "Recruiting", "Not yet recruiting"
drug_to_diseases = {}
drug_to_diseases_n_study = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
if selected_drugs is not None and drugbank_id not in selected_drugs:
continue
phenotype_to_count = {}
for ctid in ctids:
phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
val = 0.5
if phase not in phase_to_value:
print "Unknown phase:", phase
if status in status_to_value and phase in phase_to_value:
val = phase_to_value[phase] - 0.1
for intervention in ctid_to_conditions[ctid]:
if intervention not in intervention_to_mesh_name:
continue
phenotype = intervention_to_mesh_name[intervention]
i = phenotype_to_count.setdefault(phenotype, 0)
phenotype_to_count[phenotype] = i + 1
dui = mesh_name_to_id[phenotype]
# Phase based value assignment
drug_to_diseases.setdefault(drugbank_id, set()).add((phenotype, dui, val))
# Number of study based value assignment
for phenotype, val in phenotype_to_count.iteritems():
dui = mesh_name_to_id[phenotype]
drug_to_diseases_n_study.setdefault(drugbank_id, set()).add((phenotype, dui, val))
#drug_to_diseases = drug_to_diseases_n_study
#print "Non matching drugs:", len(non_matching_drugs)
#print len(drug_to_diseases), drug_to_diseases.items()[:5]
cPickle.dump(drug_to_diseases, open(dump_file, 'w') | )
return drug_to_diseases
def get_ct_data(base_dir, includ | e_other_names=True, dump_file=None):
if dump_file is not None and os.path.exists(dump_file):
values = cPickle.load(open(dump_file))
#drug_to_ctids, ctid_to_conditions, ctid_to_values = values
return values
drug_to_ctids = get_interventions(base_dir, include_other_names)
ctid_to_conditions = get_ctid_to_conditions(base_dir)
ctid_to_values = get_ctid_to_details(base_dir)
values = drug_to_ctids, ctid_to_conditions, ctid_to_values
if dump_file is not None:
cPickle.dump(values, open(dump_file, 'w'))
return values
def get_ctid_to_conditions(base_dir):
condition_file = base_dir + "conditions.txt"
condition_file2 = base_dir + "condition_browse.txt"
# Get conditions
ctid_to_conditions = {}
f = open(condition_file)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
f = open(condition_file2)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
def get_ctid_to_details(base_dir):
study_file = base_dir + "clinical_study.txt" # _noclob
# Get phase etc information
f = open(study_file)
line = f.readline()
words = line.strip().split("|")
header_to_idx = dict((word.lower(), i) for i, word in enumerate(words))
text = None
ctid_to_values = {}
while line:
line = f.readline()
if line.startswith("NCT"):
if text is not None:
words = text.strip().split("|")
ctid = words[0]
try:
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
fda_regulated = words[header_to_idx["is_fda_regulated"]]
why_stopped = words[header_to_idx["why_stopped"]]
results_date = words[header_to_idx["firstreceived_results_date"]]
except:
print words
return
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
text = line
else:
text += line
f.close()
words = text.strip().split("|")
ctid = words[0]
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
return ctid_to_values
def get_interventions(base_dir, include |
jmchilton/galaxy-central | galaxy/app.py | Python | mit | 1,636 | 0.017115 | from galaxy import config, tools, jobs, web
import galaxy.model.mapping
class UniverseApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, **kwargs ):
# Read config file and check for errors
self.config = config.Configuration( **kwargs )
self.config.check()
config.configure_logging( self.config )
# Connect up the object model
if self.config.database_connection:
self.model = galaxy.model.mapping.init( self.config.file_path,
self.config.database_connection,
create_tables = True )
else:
self.model = galaxy.model.mapping.init( self.config.file_path,
"sqlite://%s?isolation_level=IMMEDIATE" % self.config.database,
create_tables = True )
# | Initialize the tools
self.toolbox = tools.ToolBox( self.config.tool_config, self.config.tool_path )
# Start the job queue
self.job_queue = jobs.JobQueue( self.config.job_queue_workers, self )
self.heartbeat = None
# Start the heartbeat process if configured and available
if self.config.use_heartbeat:
from galaxy import heartbeat
if heartbeat.Heartbeat:
self.heartbeat = heartbeat.Heartbeat()
| self.heartbeat.start()
def shutdown( self ):
self.job_queue.shutdown()
if self.heartbeat:
self.heartbeat.shutdown() |
okfn/jsontableschema-py | tests/types/test_yearmonth.py | Python | mit | 842 | 0 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
| from tableschema import types
from tableschema.config import ERROR
# Tests
@pytest.mark.parametrize('format, value, result', [
('default', [2000, 10], (2000, 10)),
('default', (2000, 10), (2000, 10)),
('default', '2000-10', (2000, 10)),
('default | ', (2000, 10, 20), ERROR),
('default', '2000-13-20', ERROR),
('default', '2000-13', ERROR),
('default', '2000-0', ERROR),
('default', '13', ERROR),
('default', -10, ERROR),
('default', 20, ERROR),
('default', '3.14', ERROR),
('default', '', ERROR),
])
def test_cast_yearmonth(format, value, result):
assert types.cast_yearmonth(format, value) == result
|
gabegaster/python-timer | timer/timer.py | Python | mit | 3,254 | 0.000307 | '''
# Gabe Gaster, 2013
#
# about every minute, eta will report how many iterations
# have been performed and the Expected Time to Completion (the E.T.C.)
#
#####################################################################
#
# Examples:
#
import timer
for stuff in timer.show_progress(range(100)):
# .... analysis here
timer.sleep(.5)
# prints (to standard error) :
# 0.1 min elapsed, 25.0 % done, ETA: 0.3 min
# 0.1 min elapsed, 45.0 % done, ETA: 0.3 min
# 0.2 min elapsed, 65.0 % done, ETA: 0.3 min
# 0.3 min elapsed, 85.0 % done, ETA: 0.3 min
'''
import time
import sys
def get_time_str(num_secs):
minutes = num_secs * 1. / 60
hours = minutes / 60
days = hours / 24
if num_secs < 60:
if num_secs < .001:
return "%5.3f microsec" % num_secs*10**6
elif .001 < num_secs < 1:
return "%5.3f millisec" % num_secs*10**3
else:
return "%5.1f sec" % num_secs
if minutes < 60:
return "%5.1f min" % minutes
elif hours < 24:
return "%5.1f hours" % hours
else:
return "%5.1f days" % days
def show_progress(iterable, update_time=60, length=None):
"""Wraps any iterable and passes values right through. Every
update_time's worth of seconds (defaulting to 60 seconds), print a
report on total time elapsed, percent done and estimated total
time of completion.
If length is not spe | cified, look to the length of iterable (if
it's defined in __len__). |
If there is no length, then report number (instead of percent)
done, and report time per iteration (instead of ETA).
"""
name, length = get_name_length(iterable, length)
start = last_update = time.time()
# if the length is unknown, don't estimate completion time, but
# still show periodic progress updates.
for count, thing in enumerate(iterable):
now = time.time()
if now - last_update > update_time:
last_update = now
msg = get_msg(count, length, now, start)
print >> sys.stderr, "%s : %s" % (name, msg)
yield thing
time_string = get_time_str(time.time() - start)
print >> sys.stderr, "%s : DONE in %s" % (name, time_string)
def get_name_length(iterable, length):
try:
name = iterable.__name__
except AttributeError:
name = "" # iterable.__repr__()
if not hasattr(iterable, "__iter__"):
raise TypeError("Object %s not iterable" % name)
elif length is None:
length = iterable.__len__()
return name, length
def get_msg(count, length, now, start):
time_elapsed = now - start
if length:
percent_done = 1. * count / length
if percent_done:
etc_secs = time_elapsed / percent_done
etc_time = get_time_str(etc_secs)
else:
etc_time = "NA"
msg = "%s elapsed, %5.3f%% done, ETC: %s"
msg = msg % (get_time_str(time_elapsed),
percent_done * 100,
etc_time)
else:
msg = "%s elapsed, %s done : %s per iter"
msg = msg % (get_time_str(time_elapsed),
count,
get_time_str(time_elapsed / count))
return msg
|
asmaps/as_poweradmin | as_poweradmin/main/migrations/0001_initial.py | Python | mit | 4,679 | 0.007908 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CustomerProfile'
db.create_table(u'main_customerprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal(u'main', ['CustomerProfile'])
def backwards(self, orm):
# Deleting model 'CustomerProfile'
db.delete_table(u'main_customerprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True | '})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cont | enttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.customerprofile': {
'Meta': {'object_name': 'CustomerProfile'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['main'] |
Multiscale-Genomics/mg-dm-api | tests/test_meta_modification.py | Python | apache-2.0 | 1,367 | 0.000732 | """
.. See the NOTICE file distributed with this work for additio | nal information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
| http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dmp import dmp
def test_files_by_user():
"""
Test that it is possible to add and then remove a piece of meta data from
a pre-existing file within the DM API.
"""
user = "adam"
dm_handle = dmp(test=True)
results = dm_handle.get_files_by_user(user)
assert isinstance(results, type([])) is True
file_id = dm_handle.add_file_metadata(user, results[0]['_id'], 'test', 'An example string')
result = dm_handle.get_file_by_id(user, file_id)
assert 'test' in result['meta_data'].keys()
dm_handle.remove_file_metadata(user, file_id, 'test')
result = dm_handle.get_file_by_id(user, file_id)
assert 'test' not in result['meta_data'].keys()
|
KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/core/checks/utils.py | Python | gpl-3.0 | 245 | 0 | import copy
from django.conf import settings
def patch_middleware_message(error):
if settings.MIDDLEWARE is None:
error = copy.copy(error)
error.msg = error.msg.replace('MIDDLEWARE', 'MIDDLEWARE_CLASSES | ')
return error
| |
wendlers/mpfshell | setup.py | Python | mit | 673 | 0 | #!/usr/bin/env python
from setuptools import setup
from mp import version
setup(
name="mpfshell",
version=version.FULL,
description="A simple shell based file explorer for ESP8266 and WiPy "
"Micropython devices.",
author="Stefan Wendler",
author_email="sw@kaltpost.de",
url="https: | //github.com/wendlers/mpfshell",
download_url="https://github.com/wendlers/mpfshell/archive/0.8.1.tar.gz",
install_requires=["pyserial", "colorama", "websocket_client"],
packages=["mp"],
keywords=["micropython", "shell", "file transfer", "development"],
classifiers=[],
| entry_points={"console_scripts": ["mpfshell=mp.mpfshell:main"]},
)
|
jasonamyers/pynash-click | complex/complex/logger.py | Python | mit | 467 | 0 | import | logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
file_log_handler = logging.FileHandler('combinator-cli.log')
logger.addHandler(file_log_handler)
stderr_log_handler = logging.StreamHandler()
logger.addHandler(stderr_log_handler)
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)
| |
cfjhallgren/shogun | examples/undocumented/python/features_dense_io.py | Python | gpl-3.0 | 325 | 0.046154 | #!/usr/bin/env python
parameter_list=[[]]
def features_dense_io():
from shogun import RealFeatu | res, CSVFile
feats=RealFeatures()
f=CSVFile("../data/fm_train_real.dat","r")
f.set_delimiter(" ")
feats.load(f)
return feats
if __name__= | ='__main__':
print('Dense Real Features IO')
features_dense_io(*parameter_list[0])
|
Antiun/stock-logistics-workflow | stock_move_backdating/__openerp__.py | Python | agpl-3.0 | 1,703 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012+ BREMSKERL-REIBBELAGWERKE EMMERLING GmbH & Co. KG
# Author Marco Dieckhoff
# Copyright (C) 2013 Agile Business Group sagl (<h | ttp://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# |
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Stock Move Backdating",
"version": "1.0",
'author': ['Marco Dieckhoff, BREMSKERL', 'Agile Business Group'],
"category": "Stock Logistics",
'website': 'www.bremskerl.com',
'license': 'AGPL-3',
"depends": ["stock"],
"summary": "Allows back-dating of stock moves",
"description": """This module allows to register old stock moves
(with date != now).
On stock moves, user can specify the "Actual Movement Date", that will be
used as movement date""",
'data': [
"view/stock_view.xml",
"wizard/stock_partial_picking_view.xml",
],
'demo': [],
'installable': False,
}
|
psychopy/psychopy | psychopy/demos/coder/misc/encrypt_data.py | Python | gpl-3.0 | 1,545 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Demo to illustrate encryption and decryption of a data file using pyFileSec
"""
from pyfilesec import SecFile, GenRSA
import os
# Logging is available, optional:
# from psychopy import logging
# logging.console.setLevel(logging.INFO)
# pfs.logging = logging
# We need a data file to encrypt, e.g., containing "sensitive" info:
datafile = 'data.txt'
with open(datafile, 'wb') as file:
file.write("confidential data, e.g., participant's drug-use history")
# To set up for encryption, give it to a SecFile:
sf = SecFile(datafile)
msg = 'make a file:\n file name: "%s"\n contents: "%s"'
print(msg % (sf.file, sf.snippet))
print(' is encrypted: %s' % sf.is_encrypted)
# These particular RSA keys are ONLY for testing
# see pyfilesec.genrsa() to make your own keys)
# paths to new tmp files that hold the keys
pubkey, privkey, passphrase = GenRSA().demo_rsa_keys()
# To encrypt the file, use the RSA public key | :
sf.encrypt(pubkey)
msg = 'ENCRYPT it:\n file name: "%s"\n contents (base64): "%s . . ."'
print(msg % (sf.file, sf.snippet))
pr | int(' is encrypted: %s' % sf.is_encrypted)
# To decrypt the file, use the matching RSA private key (and its passphrase):
sf.decrypt(privkey, passphrase)
msg = 'DECRYPT it:\n file name: "%s"\n contents: "%s"'
print(msg % (sf.file, sf.snippet))
print(' is encrypted: %s' % sf.is_encrypted)
# clean-up the tmp files:
for file in [sf.file, pubkey, privkey, passphrase]:
os.unlink(file)
# The contents of this file are in the public domain.
|
triump0870/movie_task | src/movie/models.py | Python | mit | 719 | 0.006954 | from django.db import models
from django.conf import settings
# Create your models here.
User = settings.AUTH_USER_MODEL
class Genre(models.Mode | l):
genre = models.CharField(max_length=30)
def __unicode__(self):
return self.genre
class Movie(models.Model):
name = models.CharField(max_length=255)
director = models.CharField(max_length=255)
genre = models.ManyToManyField(Genre, null=False, blank=False)
release = models.DateField(editable=True)
imdb_score = models.FloatField()
popularity = models.IntegerField()
owner = models.ForeignKey(User, related_name='movies', null=False, blank=False | ,on_delete=models.CASCADE)
def __unicode__(self):
return self.name
|
rasbt/protein-science | scripts-and-tools/strip_h/strip_h.py | Python | gpl-3.0 | 5,260 | 0.009316 | # Sebas | tian Raschka 2014
# Python 3 strip hydrogen atoms from PDB files
#
# run
# ./stip_h.py -h
# for help
#
import os
class Pdb(object):
""" Object that allows operations with protein files in PDB format. """
def __init__( | self, file_cont=[], pdb_code=""):
self.cont = []
self.fileloc = ""
if isinstance(file_cont, list):
self.cont = file_cont[:]
elif isinstance(file_cont, str):
try:
with open(file_cont, 'r') as pdb_file:
self.cont = [row.strip() for row in pdb_file.read().split('\n') if row.strip()]
except FileNotFoundError as err:
print(err)
def strip_h(self):
""" Removes hydrogen atoms from a PDB file content list """
out = []
for row in self.cont:
if len(row) > 5:
if row.startswith(('ATOM', 'HETATM', 'TER', 'ANISOU')):
if len(row) > 11 and (row[12] == 'H' or row[13] == 'H'):
continue
out.append(row)
return out
def write(self, outfile):
""" Writes PDB to output file. """
with open(outfile, 'w') as out:
for line in self.cont[:-1]:
out.write(line + '\n')
out.write(self.cont[-1])
def printout(self):
""" Prints PDB to output file to stdout. """
for line in self.cont:
print(line)
def find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None):
"""
Function that finds files in a directory based on substring matching.
Parameters
----------
substring : `str`
Substring of the file to be matched.
path : `str`
Path where to look.
recursive: `bool`
If true, searches subdirectories recursively.
check_ext: `str`
If string (e.g., '.txt'), only returns files that
match the specified file extension.
ignore_invisible : `bool`
If `True`, ignores invisible files (i.e., files starting with a period).
ignore_substring : `str`
Ignores files that contain the specified substring.
Returns
----------
results : `list`
List of the matched files.
"""
def check_file(f, path):
if not (ignore_substring and ignore_substring in f):
if substring in f:
compl_path = os.path.join(path, f)
if os.path.isfile(compl_path):
return compl_path
return False
results = []
if recursive:
for par, nxt, fnames in os.walk(path):
for f in fnames:
fn = check_file(f, par)
if fn:
results.append(fn)
else:
for f in os.listdir(path):
if ignore_invisible and f.startswith('.'):
continue
fn = check_file(f, path)
if fn:
results.append(fn)
if check_ext:
results = [r for r in results if os.path.splitext(r)[-1] == check_ext]
return results
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Renumber residues in a pdb file',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-i', '--input', help='Input PDB file or directory')
parser.add_argument('-o', '--output', help='Output PDB file or directory')
parser.add_argument('-r', '--recursive', action='store_true', help='Applies strip_h recursively if --input is a directory')
parser.add_argument('-v', '--version', action='version', version='v. 1.0')
args = parser.parse_args()
if not args.input:
print('{0}\nPlease provide an input file or directory.\n{0}'.format(50* '-'))
parser.print_help()
quit()
# if input is a directory
if os.path.isdir(args.input):
in_files = find_files(substring='',
path=args.input,
recursive=args.recursive,
check_ext='.pdb',
ignore_invisible=True,
ignore_substring=None)
if not args.output:
print('{0}\nWould strip the following files:\n{0}'.format(50*'-'))
for f in in_files:
print(f)
print('{0}\nPlease provide an output directory!\n{0}'.format(50*'-'))
parser.print_help()
quit()
out_files = [os.path.join(args.output, f.split(args.input)[-1]) for f in in_files]
for i,o in zip(in_files, out_files):
pdb = Pdb(file_cont=i)
pdb.cont = pdb.strip_h()
if not os.path.isdir(os.path.dirname(o)):
os.makedirs(os.path.dirname(o))
pdb.write(o)
# if input is a file:
else:
pdb = Pdb(file_cont=args.input)
pdb.cont = pdb.strip_h()
if not args.output:
pdb.printout()
else:
pdb.write(args.output)
|
westurner/pkgsetcomp | pkgsetcomp/pyrpo.py | Python | bsd-3-clause | 33,675 | 0.000475 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
"""Search for code repositories and generate reports"""
import datetime
import errno
import logging
import os
import pprint
import re
import subprocess
import sys
from collections import deque, namedtuple
from distutils.util import convert_path
from itertools import chain, imap, izip_longest
# TODO: arrow
from dateutil.parser import parse as parse_date
try:
from collections import OrderedDict as Dict
except ImportError as e:
Dict = dict
# def parse_date(*args, **kwargs):
# print(args)
# print(kwargs)
# logging.basicConfig()
log = logging.getLogger('repos')
dtformat = lambda x: x.strftime('%Y-%m-%d %H:%M:%S %z')
def itersplit(s, sep=None):
if not s:
yield s
return
exp = re.compile(r'\s+' if sep is None else re.escape(sep))
pos = 0
while True:
m = exp.search(s, pos)
if not m:
if pos < len(s) or sep is not None:
yield s[pos:]
break
if pos < m.start() or sep is not None:
yield s[pos:m.start()]
pos = m.end()
DEFAULT_FSEP = ' ||| '
DEFAULT_LSEP = ' |..|'
# DEFAULT_FSEP=u' %s ' % unichr(0xfffd)
# DEFAULT_LSEP=unichr(0xfffc)
def itersplit_to_fields(_str,
fsep=DEFAULT_FSEP,
revtuple=None,
fields=[],
preparse=None):
if preparse:
_str = preparse(_str)
_fields = itersplit(_str, fsep)
if revtuple is not None:
try:
values = (t[1] for t in izip_longest(revtuple._fields, _fields))
return revtuple(*values)
except:
log.error(revtuple)
log.error(_fields)
raise
return tuple(izip_longest(fields, _fields, fillvalue=None))
_missing = unichr(822)
class cached_property(object):
"""Decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class must have a `__dict__` (e.g. be a subclass of object)
:copyright: BSD
see: https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/utils.py
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, _type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
# TODO: sarge
def sh(cmd, ignore_error=False, cwd=None, *args, **kwargs):
kwargs.update({
'shell': True,
'cwd': cwd,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE})
log.debug('cmd: %s %s' % (cmd, kwargs))
p = subprocess.Popen(cmd, **kwargs)
p_stdout = p.communicate()[0]
if p.returncode and not ignore_error:
raise Exception("Subprocess return code: %d\n%r\n%r" % (
p.returncode, cmd, p_stdout))
return p_stdout
class Repository(object):
label = None
prefix = None
preparse = None
fsep = DEFAULT_FSEP
lsep = DEFAULT_LSEP
fields = []
clone_cmd = 'clone'
def __init__(self, fpath):
self.fpath = os.path.abspath(fpath)
self.symlinks = []
def __new__(cls, name):
self = super(Repository, cls).__new__(cls, name)
self._tuple = self._name | dtuple
return self
@property
def relpath(self):
here = os.path.abspath(os.path.curdir)
relpath = os.path.relpath(self.fpath, here)
| return relpath
@cached_property
def _namedtuple(cls):
return namedtuple(
''.join((str.capitalize(cls.label), "Rev")),
(f[0] for f in cls.fields))
def unique_id(self):
"""
:returns: str
"""
pass
def status(self):
"""
:returns: str
"""
pass
def remote_url(self):
"""
:returns: str
"""
pass
def diff(self):
"""
:returns: str
"""
pass
def current_id(self):
"""
:returns: str
"""
pass
def branch(self):
"""
:returns: str
"""
pass
@cached_property
def last_commit(self):
return self.log_iter(maxentries=1).next()
def log(self, n=None, **kwargs):
"""
:returns: str
"""
pass
def itersplit_to_fields(self, _str):
if self.preparse:
_str = self.preparse(_str)
_fields = itersplit(_str, self.fsep)
try:
values = (
t[1] for t in izip_longest(self._tuple._fields, _fields))
return self._tuple(*values)
except:
log.error(self._tuple)
log.error(_fields)
raise
_parselog = itersplit_to_fields
def log_iter(self, maxentries=None, template=None, **kwargs):
# op = self.sh((
# "hg log %s --template"
# % (maxentries and ('-l%d' % maxentries) or '')),
# ignore_error=True
# )
template = repr(template or self.template)
op = self.log(n=maxentries, template=template, **kwargs)
if not op:
return
print(op)
for l in itersplit(op, self.lsep):
l = l.strip()
if not l:
continue
try:
yield self._parselog(l,)
except Exception:
log.error("%s %r" % (str(self), l))
raise
return
# def search_upwards():
# """ Implemented for Repositories that store per-directory
# metadata """
# pass
def full_report(self):
yield ''
yield "# %s" % self.origin_report().next()
yield "%s [%s]" % (self.last_commit, self)
if self.status:
for l in self.status.split('\n'):
yield l
yield ''
if hasattr(self, 'log_iter'):
for r in self.log_iter():
yield r
return
@cached_property
def eggname(self):
return os.path.basename(self.fpath)
@classmethod
def to_normal_url(cls, url):
return url
def str_report(self):
yield pprint.pformat(self.to_dict())
def sh_report(self):
output = []
if not self.remote_url:
output.append('#')
output.extend([
self.label,
self.clone_cmd,
repr(self.remote_url), # TODO: shell quote?
repr(self.relpath)
])
yield ' '.join(output)
def pip_report(self):
comment = '#' if not self.remote_url else ''
if os.path.exists(os.path.join(self.fpath, 'setup.py')):
yield u"%s-e %s+%s@%s#egg=%s" % (
comment,
self.label,
self.to_normal_url(self.remote_url),
self.current_id,
self.eggname)
return
def origin_report(self):
yield "%s://%s = %s" % (
self.label,
self.fpath,
self.remote_url,
# revid
)
return
def status_report(self):
yield '######'
yield self.sh_report().next()
yield self.last_commit
yield self.status
yield ""
def hgsub_report(self):
if self.relpath == '.':
return
yield "%s = [%s]%s" % (
self.fpath.lstrip('./'),
self.label,
self.remote_url)
def gitsubmodule_report(self):
fpath = self.relpath
if fpath == '.':
return
yield '[submodule "%s"]' % fpath.rep |
mfelsche/dwd_weather_data | weather/data/solar.py | Python | apache-2.0 | 1,704 | 0.001763 | # -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing perm | issions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from .base import DWDDataSourcePa | rser
class SolarRadiationParser(DWDDataSourceParser):
NAME = "solar"
def get_name(cls):
return cls.NAME
def extract_data(self, row):
return {
"sunshine_duration": self.get_float(row[3]), # Stundensumme der Sonnenscheindauer in minutes
"diffuse_sky_radiation": self.get_float(row[4]), # Stundensumme der kurzwelligen diffusen Himmelsstrahlung in J/cm²
"global_radiation": self.get_float(row[5]), # Stundensumme der kurzwelligen Globalstrahlung in J/cm²,
"sun_zenith": self.get_float(row[7]) # Sonnenzenit
}
def expected_columns(self):
return 8
|
WenqinSHAO/rtt | localutils/benchmark.py | Python | mit | 18,510 | 0.004646 | """
benchmark.py provides functions for various evaluation tasks in this work
"""
import collections
import sys
import munkres
import numpy as np
def evaluation(fact, detection):
"""classify the detections into true positive, true negative, false positive and false negative
Args:
fact (list of int): ground fact, should only contain only 0, 1; 1 for events meant to be detected;
detection (list of int): results to be tested against ground fact; 1 for detected events;
Returns:
dict: {'tp':int, 'fp':int, 'fn':int, 'tn':int, 'precision':float, 'recall':float}
"""
if len(fact) != len(detection):
raise ValueError('fact and prediction are not of same length.')
if not (set(fact) == set(detection) == set([0, 1])):
raise ValueError('fact or/and prediction contain other value than 0/1.')
tp, fp, fn, tn = [0] * 4
for f, p in zip(fact, detection):
if f == p:
if f == 1:
tp += 1
else:
tn += 1
else:
if f == 1:
fn += 1
else:
fp += 1
return dict(tp=tp, fp=fp, fn=fn, tn=tn, precision=float(tp)/(tp+fp), recall=float(tp)/(tp+fn))
def evaluation_window(fact, detection, window=0, return_match=False):
"""classify the detections with window option
We construct a bipartite graph G = (V + W, E), where V is fact and W is detection.
e = (v, w), e in G, if distance(v, w) <= window.
cost(e) = distance(v, w)
We find the minimum-cost maximum matching M of G.
tp = |M|
fp = |W| - |M|
fn = |V| - |M|
dis = C(M)/|M| average distance between fact and detection in mapping
Args:
fact (list of int): the index or timestamp of facts/events to be detected
detection (list of int): index or timestamp of detected events
window (int): maximum distance for the correlation between fact and detection
| return_match (bool): returns the matching tuple idx [(fact_idx, detection_idx),... | ] if set true
Returns:
dict: {'tp':int, 'fp':int, 'fn':int, 'precision':float, 'recall':float, 'dis':float, 'match': list of tuple}
"""
if len(fact) == 0:
summary = dict(tp=None, fp=len(detection), fn=None,
precision=None, recall=None,
dis=None, match=[])
return summary
elif len(detection) == 0:
summary = dict(tp=0, fp=0, fn=len(fact),
precision=None, recall=0,
dis=None, match=[])
return summary
cost_matrix = make_cost_matrix(fact, detection, window) # construct the cost matrix of bipartite graph
# handle the case there is actually no edges between fact and detection
if all([cost_matrix[i][j] == sys.maxint for i in range(len(fact)) for j in range(len(detection))]):
summary = dict(tp=0, fp=len(detection), fn=len(fact),
precision=0, recall=0,
dis=None, match=[])
return summary
match = munkres.Munkres().compute(cost_matrix) # calculate the matching
match = [(i, j) for i, j in match if cost_matrix[i][j] <= window] # remove dummy edges
# i and j here are the indices of fact and detection, i.e. ist value in fact and jst value in detection matches
tp = len(match)
fp = len(detection) - tp
fn = len(fact) - tp
summary = dict(tp=tp, fp=fp, fn=fn,
precision=float(tp) / (tp + fp) if len(detection) > 0 else None,
recall=float(tp) / (tp + fn) if len(fact) > 0 else None,
dis=sum([cost_matrix[i][j] for i, j in match]) / float(tp) if tp > 0 else None)
if return_match:
summary['match'] = match
return summary
def evaluation_window_adp(fact, detection, window=0, return_match=False):
""" a variation of evaluation_window() which is adapted to sparse cost matrix generated from fact and detection.
If fact or detection contain many elements, say more than one hundred. It will take a significant amount of time,
even with hungarian algo, to compute the min cost maximum matching.
In our specific case, since the cost matrix is very specific, and can only have values at limited places.
It is thus possible to cut the initial cost matrix into several non-connecting ones. For example:
cost_matrix = [[62, 0, 0, 0, 0, 0, 0],
[11, 11, 82, 0, 0, 0, 0],
[0, 0, 81, 12, 0, 0, 0],
[0, 0, 0, 0, 12, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 0, 12]]
The given cost matrix is composed of three separate parts:
cost_matrix[0:4][0:5], cost_matrix[3:4][4:5] and cost_matrix[5:end][6:end].
Calculating the matching separately for the two sub-matrices will be faster.
Args:
fact (list of int): the index or timestamp of facts/events to be detected
detection (list of int): index or timestamp of detected events
window (int): maximum distance for the correlation between fact and detection
return_match (bool): returns the matching tuple idx [(fact_idx, detection_idx),...] if set true
Returns:
dict: {'tp':int, 'fp':int, 'fn':int, 'precision':float, 'recall':float, 'dis':float, 'match': list of tuple}
"""
if len(fact) == 0 or len(detection) == 0:
return evaluation_window(fact, detection, window, return_match)
cost_matrix = make_cost_matrix(fact, detection, window)
# handle the case there is actually no edges between fact and detection
if all([cost_matrix[i][j] == sys.maxint for i in range(len(fact)) for j in range(len(detection))]):
summary = dict(tp=0, fp=len(detection), fn=len(fact),
precision=0, recall=0,
dis=None, match=[])
return summary
cut = cut_matrix(cost_matrix, sys.maxint) # [((fact/line range), (detect/column range)),...]
match_cut = [evaluation_window(fact[i[0][0]:i[0][1]], detection[i[1][0]:i[1][1]], window, True) for i in cut]
tp = sum([i['tp'] for i in match_cut if i['tp']]) # in general is not possible to have i['tp'] is None
fp = len(detection) - tp
fn = len(fact) - tp
match = []
for i, res in enumerate(match_cut):
match.extend([(f+cut[i][0][0], d+cut[i][1][0]) for f, d in res['match']]) # adjust index according to starting
summary = dict(tp=tp, fp=fp, fn=fn,
precision=float(tp) / (tp + fp) if len(detection) > 0 else None,
recall=float(tp) / (tp + fn) if len(fact) > 0 else None,
dis=sum([abs(fact[i]-detection[j]) for i, j in match]) / float(tp) if tp > 0 else None)
if return_match:
summary['match'] = match
return summary
def cut_matrix(mat, no_edge=0):
""" given a cost matrix, cut it into non-connecting parts
For example:
cost_matrix = [[62, 0, 0, 0, 0, 0, 0],
[11, 11, 82, 0, 0, 0, 0],
[0, 0, 81, 12, 0, 0, 0],
[0, 0, 0, 0, 12, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 0, 12]]
expect return: [((0, 4), (0, 5)), ((3, 4), (4, 5)), ((5,7),(6,7))]
Input like this is as well acceptable, though such case is not possible in the usage of this project.
cost_matrix = [[62, 0, 0, 0, 0, 0, 0],
[11, 11, 82, 0, 0, 0, 0],
[0, 0, 81, 12, 0, 0, 0],
[0, 0, 12, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 12],
[0, 0, 0, 0, 0, 11, 12],
[0, 0, 0, 0, 0, 0, 12]]
the lower-righter sub-matrix doesn't have edge as the top left corner.
Args:
mat (list of list of equal length): the cost matrix
no_edge (int): the value in matrix meaning the the two nodes are not connected, thus no_edge
Return:
list of tuple: [((row from, to), (column from, to)), (another sub-matrix)... |
avanzosc/odoo-addons | stock_orderpoint_generation/__manifest__.py | Python | agpl-3.0 | 483 | 0 | # Copyright 2021 Daniel Campos - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.h | tml
{
"name": "Stock Orderpoint Generation",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"depends": [
"stock",
],
"author": "AvanzOSC",
"website": "http://www.avanzosc.es",
"category": "Custom",
"data": [
"security/ir.model.access.csv",
"wizards/stock_orderpoin | t_generator_view.xml",
],
"installable": True,
}
|
sunrin92/LearnPython | 0-ThinkPython/get_wordlist.py | Python | mit | 158 | 0 | def getWordlist(wordtxt):
fin = open(word | txt)
words = []
for line in fin:
word = line.strip()
words.append(word)
retur | n words
|
laborautonomo/pip | pip/vcs/mercurial.py | Python | mit | 5,087 | 0.000197 | import os
import tempfile
import re
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.log import logger
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
call_subprocess(
[self.cmd, 'archive', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
config_file = open(repo_config, 'w')
config.write(config_file)
config_file.close()
except (OSError, configparser.NoSectionError) as exc:
logger.warn(
'Could not switch Mercurial repository to %s: %s'
% (url, exc))
else:
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
call_subprocess([self.cmd, 'pull', '-q'], cwd=dest)
call_subprocess(
[self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning hg %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '--noupdate', '-q', url, dest])
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = call_subprocess(
[self.cmd, 'branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = call_subprocess(
[self.cmd, 'parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = call_subprocess(
[self.cmd, 'parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
| full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (
egg_project_name,
branch_revs[current_rev],
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs | .register(Mercurial)
|
ptcrypto/p2pool-adaptive | extra_modules/x11_hash/test.py | Python | gpl-3.0 | 344 | 0.008721 | import quark_hash
import weakref
import binascii
import Str | ingIO
from binascii import unhexlify
teststart = '700000005d385ba114d079970b29a9418fd0549e7d68a95c7f168621a314201000000000578586d149fd07b22f3a8a347c516de7052f034d2b76ff68e0d6ecff9b77a45489e3fd511 | 732011df0731000';
testbin = unhexlify(teststart)
hash_bin = x11_hash.getPoWHash(testbin) |
PDOK/data.labs.pdok.nl | data/bag-brk/modules/FindApartment.py | Python | mit | 492 | 0.002033 | # Initialize the lookup table for apar | tments
import csv
lut = []
reader = csv.DictReader(open('data/Apprechtcomplex-met-Grondpercelen | -mei2017.csv'), fieldnames=['apartment', 'parcel'])
# Populate the lookup table: allows file handle to be released.
for row in reader:
lut.append(row)
def find_apartment(apartment):
parcel_matches = []
for entry in lut:
if entry['apartment'] == apartment:
parcel_matches.append(entry['parcel'])
return parcel_matches
|
dypublic/PyConChina2016 | fabfile.py | Python | mit | 690 | 0 | # -*- coding: utf-8 -*-
from os.path import dirname, realpath, join
from fabric.api import local, env
from fabric.contrib.project import rsync_project
env.user = 'imust'
env.hosts = ['119.254.110.163']
VPS_DEPLOY_PATH = '/home/imust/data/www/public/'
PROJECT_ROOT_DIR = realpath(dirname(__file__))
DIST_DIR = join(PROJECT_ROOT_DIR, 'public') | + '/'
| VENV_PYTHON = join(PROJECT_ROOT_DIR, 'venv', 'bin', 'python2')
def clean():
local(u'rm -rf {}'.format(DIST_DIR))
def build():
clean()
local(u'{python} bin/app.py -g'.format(python=VENV_PYTHON))
def deploy_vps():
build()
rsync_project(remote_dir=VPS_DEPLOY_PATH,
local_dir=DIST_DIR, delete=True)
|
Southpaw-TACTIC/TACTIC | src/pyasm/application/maya/maya_environment.py | Python | epl-1.0 | 1,612 | 0.003102 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['MayaEnvironment']
import os
from pyasm.application.common import AppEnvironment
class MayaEnvironment(AppEnvironment):
'''Sets up the maya environment. Because all of the maya code can be
run on both the server and the client, this package must be independent
of all other Tactic software. This class allows the poplulation of
the necessary information for the proper functions of these classes
| in this package'''
def set_up(info):
# set up application environment, by getting information from the info
# object. This info object, contains data retrieved from some
# external source
# get the environment | and application
env = AppEnvironment.get()
from maya_app import Maya, Maya85
# detect if this is Maya 8.5 or later
app = None
try:
import maya
app = Maya85()
except ImportError:
from pyasm.application.maya import Maya
app = Maya()
info.app = app
env.set_app(app)
env.set_info(info)
# DEPRECATED: info object shouldn't know anything about
# populate the info object with this information
info.env = env
set_up = staticmethod(set_up)
|
UManPychron/pychron | pychron/dvc/share.py | Python | apache-2.0 | 4,172 | 0.000479 | # ===============================================================================
# Copyright 2015 Jake R | oss
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the Lic | ense.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import re
import subprocess
from traits.api import HasTraits, Str, Bool, List
from traitsui.api import View, UItem, VGroup, TableEditor
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.handler import Controller
from traitsui.table_column import ObjectColumn
from pychron.core.helpers.filetools import ilist_gits
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.github import Organization
from pychron.paths import paths
class ShareableRepo(HasTraits):
name = Str
enabled = Bool
remote_name = Str('origin')
remote_url = Str('http://github.com')
remote_re = re.compile(r'\[remote ".+"\]')
class PushExperimentsModel(HasTraits):
shareables = List
def __init__(self, org, usr, pwd, oauth_token, root=None, *args, **kw):
self._org = org
self._usr = usr
self._pwd = pwd
self._oauth_token = oauth_token
super(PushExperimentsModel, self).__init__(*args, **kw)
ss = []
if root is None:
root = paths.repository_dataset_dir
for exp in ilist_gits(root):
cfg = os.path.join(root, exp, '.git', 'config')
with open(cfg, 'r') as rfile:
for line in rfile:
if remote_re.match(line):
break
else:
ss.append(ShareableRepo(name=exp, enabled=True,
root=os.path.join(root, exp)))
self.shareables = ss
@property
def names(self):
return [s.name for s in self.shareables]
def create_remotes(self):
cmd = lambda x: ['git', 'remote', 'add', x.remote_name, x.remote_url]
for si in self.shareables:
if si.enabled:
root = si.root
ret = subprocess.call(cmd(si), cwd=root)
# check if url exists
if subprocess.call(['git', 'ls-remote'], cwd=root):
# add repo to github
org = Organization(self._org, self._usr, self._pwd, self._oauth_token)
# org.create_repo(si.name)
class PushExperimentsView(Controller):
def closed(self, info, is_ok):
if is_ok:
self.model.create_remotes()
def traits_view(self):
cols = [CheckboxColumn(name='enabled', width=30),
ObjectColumn(name='name', editable=False),
ObjectColumn(name='remote_name', editable=False,
label='Remote', width=50),
ObjectColumn(name='remote_url', editable=False,
label='URL', width=300),
]
ev = View(UItem('name'),
UItem('enabled'),
VGroup(UItem('remote_name', label='Name'),
UItem('remote_url', label='URL')))
v = okcancel_view(UItem('shareables',
editor=TableEditor(columns=cols,
edit_view=ev)),
title='Shareable Experiments')
return v
if __name__ == '__main__':
root = '/Users/ross/Sandbox/testdir'
pm = PushExperimentsModel(root)
pv = PushExperimentsView(model=pm)
pv.configure_traits()
# ============= EOF =============================================
|
shubhamdhama/zulip | zproject/dev_settings.py | Python | apache-2.0 | 6,566 | 0.000761 | import os
import pwd
from typing import Optional, Set, Tuple
ZULIP_ADMINISTRATOR = "desdemona+admin@zulip.com"
# We want LOCAL_UPLOADS_DIR to be an absolute path so that code can
# chdir without having problems accessing it. Unfortunately, this
# means we need a duplicate definition of DEPLOY_ROOT with the one in
# settings.py.
DEPLOY_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
LOCAL_UPLOADS_DIR = os.path.join(DEPLOY_ROOT, 'var/uploads')
# We assume dev droplets are the only places where
# users use zulipdev as the user.
IS_DEV_DROPLET = pwd.getpwuid(os.getuid()).pw_name == 'zulipdev'
FORWARD_ADDRESS_CONFIG_FILE = "var/forward_address.ini"
# Check if test_settings.py set EXTERNAL_HOST.
external_host_env = os.getenv('EXTERNAL_HOST')
if external_host_env is None:
if IS_DEV_DROPLET:
# For our droplets, we use the hostname (eg github_username.zulipdev.org) by default.
EXTERNAL_HOST = os.uname()[1].lower() + ":9991"
else:
# For local development environments, we use localhost by
# default, via the "zulipdev.com" hostname.
EXTERNAL_HOST = 'zulipdev.com:9991'
# Serve the main dev realm at the literal name "localhost",
# so it works out of the box even when not on the Internet.
REALM_HOSTS = {
'zulip': 'localhost:9991',
}
else:
EXTERNAL_HOST = external_host_env
REALM_HOSTS = {
'zulip': EXTERNAL_HOST,
}
ALLOWED_HOSTS = ['*']
# Uncomment extra backends if you want to test with them. Note that
# for Google and GitHub auth you'll need to do some pre-setup.
AUTHENTICATION_BACKENDS = (
'zproject.backends.DevAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend',
'zproject.backends.SAMLAuthBackend',
# 'zproject.backends.AzureADAuthBackend',
'zproject.backends.GitLabAuthBackend',
'zproject.backends.AppleAuthBackend',
) # type: Tuple[str, ...]
EXTERNAL_URI_SCHEME = "http://"
EMAIL_GATEWAY_PATTERN = "%s@" + EXTERNAL_HOST.split(':')[0]
NOTIFICATION_BOT = "notification-bot@zulip.com"
ERROR_BOT = "error-bot@zulip.com"
EMAIL_GATEWAY_BOT = "emailgateway@zulip.com"
PHYSICAL_ADDRESS = "Zulip Headquarters, 123 Octo Stream, South Pacific Ocean"
EXTRA_INSTALLED_APPS = ["zilencer", "analytics", "corporate"]
# Disable Camo in development
CAMO_URI = ''
OPEN_REALM_CREATION = True
INVITES_MIN_USER_AGE_DAYS = 0
EMBEDDED_BOTS_ENABLED = True
SAVE_FRONTEND_STACKTRACES = True
EVENT_LOGS_ENABLED = True
STAGING_ERROR_NOTIFICATIONS = True
SYSTEM_ONLY_REALMS = set() # type: Set[str]
USING_PGROONGA = True
# Flush cache after migration.
POST_MIGRATION_CACHE_FLUSHING = True # type: bool
# Don't require anything about password strength in development
PASSWORD_MIN_LENGTH = 0
PASSWORD_MIN_GUESSES = 0
# SMTP settings for forwarding emails sent in development
# environment to an email account.
EMAIL_HOST = ""
EMAIL_HOST_USER = ""
# Two factor authentication: Use the fake backend for development.
TWO_FACTOR_CALL_GATEWAY = 'two_factor.gateways.fake.Fake'
TWO_FACTOR_SMS_GATEWAY = 'two_factor.gateways.fake.Fake'
# Make sendfile use django to serve files in development
SENDFILE_BACKEND = 'django_sendfile.backends.development'
# Set this True to send all hotspots in development
ALWAYS_SEND_ALL_HOTSPOTS = False # type: bool
# FAKE_LDAP_MODE supports using a fake LDAP database in the
# development environment, without needing an LDAP server!
#
# Three modes are allowed, and each will setup Zulip and the fake LDAP
# database in a way appropriate for the corresponding mode described
# in https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap-including-active-directory
# (A) If users' email addresses are in LDAP and used as username.
# (B) If LDAP only has usernames but email addresses are of the form
# username@example.com
# (C) If LDAP usernames are completely unrelated to email addresses.
#
# Fake LDAP data has e.g. ("ldapuser1", "ldapuser1@zulip.com") for username/email.
FAKE_LDAP_MODE = None # type: Optional[str]
# FAKE_LDAP_NUM_USERS = 8
if FAKE_LDAP_MODE:
import ldap
from django_auth_ldap.config import LDAPSearch
# To understand these parameters, read the docs in
# prod_settings_template.py and on ReadTheDocs.
LDAP_APPEND_DOMAIN = None
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL, "(uid=%(user)s)")
AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL, "(email=%(email)s)")
if FAKE_LDAP_MODE == 'a':
AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL, "(uid=%(email)s)")
AUTH_LDAP_USERNAME_ATTR = "uid"
AUTH_LDAP_USER_ATTR_MAP | = {
"full_name": "cn",
"avatar": "thumbnailPhoto",
# This won't do much unless one changes | the fact that
# all users have LDAP_USER_ACCOUNT_CONTROL_NORMAL in
# zerver/lib/dev_ldap_directory.py
"userAccountControl": "userAccountControl",
}
elif FAKE_LDAP_MODE == 'b':
LDAP_APPEND_DOMAIN = 'zulip.com'
AUTH_LDAP_USER_ATTR_MAP = {
"full_name": "cn",
"avatar": "jpegPhoto",
"custom_profile_field__birthday": "birthDate",
"custom_profile_field__phone_number": "phoneNumber",
}
elif FAKE_LDAP_MODE == 'c':
AUTH_LDAP_USERNAME_ATTR = "uid"
LDAP_EMAIL_ATTR = 'email'
AUTH_LDAP_USER_ATTR_MAP = {
"full_name": "cn",
}
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPAuthBackend',)
THUMBOR_URL = 'http://127.0.0.1:9995'
THUMBNAIL_IMAGES = True
SEARCH_PILLS_ENABLED = bool(os.getenv('SEARCH_PILLS_ENABLED', False))
BILLING_ENABLED = True
LANDING_PAGE_NAVBAR_MESSAGE = None
# Test Custom TOS template rendering
TERMS_OF_SERVICE = 'corporate/terms.md'
# Our run-dev.py proxy uses X-Forwarded-Port to communicate to Django
# that the request is actually on port 9991, not port 9992 (the Django
# server's own port); this setting tells Django to read that HTTP
# header. Important for SAML authentication in the development
# environment.
USE_X_FORWARDED_PORT = True
# Override the default SAML entity ID
SOCIAL_AUTH_SAML_SP_ENTITY_ID = "http://localhost:9991"
MEMCACHED_USERNAME = None
|
chienlieu2017/it_management | odoo/addons/procurement/models/procurement.py | Python | gpl-3.0 | 12,497 | 0.002641 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from psycopg2 import OperationalError
from odoo import api, fields, models, registry, _
from odoo.exceptions import UserError
import odoo.addons.decimal_precision as dp
PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')]
class ProcurementGroup(models.Model):
'''
The procurement group class is used to group products together
when computing procurements. (tasks, physical products, ...)
The goal is that when you have one sale order of several products
and the products are pulled from the same or several location(s), to keep
having the moves grouped into pickings that represent the sale order.
Used in: sales order (to group delivery order lines like the so), pull/push
rules (to pack like the delivery order), on orderpoints (e.g. for wave picking
all the similar products together).
Grouping is made only if the source and the destination is the same.
Suppose you have 4 lines on a picking from Output where 2 lines will need
to come from Input (crossdock) and 2 lines coming from Stock -> Output As
the four procurement orders will have the same group ids from the SO, the
move from input will have a stock.picking with 2 grouped lines and the move
from stock will have 2 grouped lines also.
The name is usually the name of the original document (sale order) or a
sequence computed if created manually.
'''
_name = 'procurement.group'
_description = 'Procurement Requisition'
_order = "id desc"
name = fields.Char(
'Reference',
default=lambda self: self.env['ir.sequence'].next_by_code('procurement.group') or '',
required=True)
move_type = fields.Selection([
('direct', 'Partial'),
('one', 'All at once')], string='Delivery Type', default='direct',
required=True)
procurement_ids = fields.One2many('procurement.order', 'group_id', 'Procurements')
class ProcurementRule(models.Model):
''' A rule describe what a procurement should do; produce, buy, move, ... '''
_name = 'procurement.rule'
_description = "Procurement Rule"
_order = "name"
name = fields.Char(
'Name', required=True, translate=True,
help="This field will fill the packing origin and the name of its moves")
active = fields.Boolean(
'Active', default=True,
help="If unchecked, it will allow you to hide the rule without removing it.")
group_propagation_option = fields.Selection([
('none', 'Leave Empty'),
('propagate', 'Propagate'),
('fixed', 'Fixed')], string="Propagation of Procurement Group", default='propagate')
group_id = fields.Many2one('procurement.group', 'Fixed Procurement Group')
action = fields.Selection(
selection='_get_action', string='Action',
required=True)
sequence = fields.Integer('Sequence', default=20)
company_id = fields.Many2one('res.company', 'Company')
@api.model
def _get_action(self):
return []
class ProcurementOrder(models.Model):
""" Procurement Orders """
_name = "procurement.order"
_description = "Procurement"
_order = 'priority desc, date_planned, id asc'
_inherit = ['mail.thread','ir.needaction_mixin']
name = fields.Text('Description', required=True)
origin = fields.Char('Source Document', help="Reference of the document that created this Procurement. This is automatically completed by Odoo.")
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('procurement.orer'),
required=True)
# These two fields are used for scheduling
priority = fields.Selection(
PROCUREMENT_PRIORITIES, string='Priority', default='1',
required=True, index=True, track_visibility='onchange')
date_planned = fields.Datetime(
'Scheduled Date', default=fields.Datetime.now,
required=True, index=True, track_visibility='onchange')
group_id = fields.Many2one('procurement.group', 'Procurement Group')
rule_id = fields.Many2one(
'procurement.rule', 'Rule',
track_visibility='onchange',
help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior.")
product_id = fields.Many2one(
'product.product', 'Product',
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_qty = fields.Float(
'Quantity',
digits=dp.get_precision('Product Unit of Measure'),
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_uom = fields.Many2one(
'product.uom', 'Product Unit of Measure',
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
state = fields.Selection([
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('running', 'Running'),
('done', 'Done')], string='Status', default='confirmed',
copy=False, required=True, track_visibility='onchange')
@api.model
def _needaction_domain_get(self):
return [('state', '=', 'exception')]
@api.model
def create(self, vals):
procurement = super(ProcurementOrder, self).create(vals)
if not self._context.get('procurement_autorun_defer'):
procurement.run()
return procurement
@api.multi
def unlink(self):
if any(procurement.state == 'cancel' for procurement in self):
raise UserError(_('You cannot delete procurements that are in cancel state.'))
return super(ProcurementOrder, self).unlink()
@api.multi
def do_view_procurements(self):
'''
This function returns an action that display existing procurement orders
of same procurement group of given ids.
'''
action = self.env.ref('procurement.do_view_procurements').read()[0]
action['domain'] = [('group_id', 'in', self.mapped('group_id').ids)]
return action
@api.onchange('product_id')
def onchange_product_id(self):
""" Finds UoM of changed product. """
if self.product_id:
self.product_uom = self.product_id.uom_id.id
@api.multi
def cancel(self):
to_cancel = self.filtered(lambda procurement: procurement.state != 'done')
if to_cancel:
return to_cancel.write({'state': 'cancel'})
@api.multi
def reset_to_confirmed(self):
return self.write({'state': 'confirmed'})
@api.multi
def run(self, autocommit=False):
# TDE FIXME: avoid browsing everything -> avoid prefetching ?
for procurement in self:
# we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy
# and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration
# will fetch all the ids again)
if procurement.state not in ("running", "done") | :
try:
if procurement._assign():
res = procurement._run()
| if res:
procurement.write({'state': 'running'})
else:
procurement.write({'state': 'exception'})
else:
procurement.message_post(body=_('No rule matching this procurement'))
procurement.write({'state': 'exception'})
if autocommit:
self.env.cr.commit()
except OperationalError:
if autocommit:
self.env.cr.rollback()
continue
else:
raise
return True
@api.multi
@api.returns('self', lambda procurements: [procurement.id for p |
ongair/yowsup | yowsup/registration/existsrequest.py | Python | mit | 938 | 0.009595 | from yowsup.common.http.warequest import WARequest
from yowsup.common.http.waresponseparser import JSONResponseParser
from yowsup.env import YowsupEnv
class WAExistsReque | st(WARequest):
def __init__ | (self,cc, p_in, idx):
super(WAExistsRequest,self).__init__()
self.addParam("cc", cc)
self.addParam("in", p_in)
self.addParam("id", idx)
self.addParam("lg", "en")
self.addParam("lc", "GB")
self.addParam("token", YowsupEnv.getCurrent().getToken(p_in))
self.url = "v.whatsapp.net/v2/exist"
self.pvars = ["status", "reason", "sms_length", "voice_length", "result","param", "pw", "login", "type", "expiration", "kind",
"price", "cost", "currency", "price_expiration"
]
self.setParser(JSONResponseParser())
def send(self, parser = None):
res = super(WAExistsRequest, self).send(parser)
return res
|
AloneGu/amira_image_cls | img_cls/model/img_process.py | Python | mit | 6,753 | 0.002369 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Jackling Gu
@file: img_process.py
@time: 2017-06-13 11:13
"""
from ..util import getcfg, data_load, get_abspath, get_y_labels, preprocess_img
from sklearn.preprocessing import LabelEncoder
from keras.models import load_model, Model
from keras.callbacks import CSVLogger
from keras.utils import to_categorical
from keras.applications import vgg16, vgg19, inception_v3
from scipy.misc import imread, imresize
import numpy as np
import os
from keras.preprocessing.image import ImageDataGenerator
def update_app_model(tmp_model, num_class):
from keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout
# add a global spatial average pooling layer
x = tmp_model.output
try:
x = Flatten()(x) # vgg ?
except:
pass # inception
x = Dense(256, activation='relu', name='fc1')(x)
x = Dropout(0.3)(x)
if num_class == 2:
# prediction layer
x = Dense(1, activation='sigmoid', name='final_predictions')(x)
my_model = Model(inputs=tmp_model.input, outputs=x)
my_model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
else:
# prediction layer
x = Dense(num_class, activation='softmax', name='final_predictions')(x)
my_model = Model(inputs=tmp_model.input, outputs=x)
my_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return my_model
class ImageClassification(object):
def __init__(self):
self.img_w = getcfg('IMG_WIDTH', 224)
self.img_h = getcfg('IMG_HEIGHT', 224)
self.epoch = getcfg('EPOCH', 10)
self.default_shape = (3, self.img_h, self.img_w) # channel first
self.model_name = getcfg('MODEL_NAME', 'ALEXNET')
self.data_path = getcfg('DATA_DIR', '../data/dog_vs_cat')
self.model_save_path = get_abspath('../models/{}_{}_model.h5'.format(self.model_name, self.epoch))
print('MODEL NAME', self.model_name, 'EPOCHS', self.epoch, 'DATA PATH', self.data_path)
print('MODEL SAVE PATH', self.model_save_path)
# get y information first
self.encoder = LabelEncoder()
self.encoder.fit(get_y_labels(self.data_path))
self.num_class = len(self.encoder.classes_)
if os.path.exists(self.model_save_path):
# load model , do not have to load x data
print('LOAD EXIST MODEL')
self.model = load_model(self.model_save_path)
else:
# get data
self.x, self.y = data_load(self.data_path, img_height=self.img_h, img_width=self.img_w)
print('x shape', self.x.shape)
self.label_y = self.encoder.transform(self.y)
if self.num_class == 2:
self.binary_y = self.label_y
else:
self.binary_y = to_categorical(self.label_y)
print(self.num_class, self.y[:2], self.label_y[:2], self.binary_y[:2])
# already shuffle ,split
tmp_data_cnt = len(self.x)
self.train_data_cnt = int(tmp_data_cnt * 0.65)
self.x_train = self.x[:self.train_data_cnt]
self.x_test = self.x[self.train_data_cnt:]
self.y_train = self.binary_y[:self.train_data_cnt]
self.y_test = self.binary_y[self.train_data_cnt:]
self.model = None
if self.model_name == 'ALEXNET':
from .alexnet import AlexNet
self.model = AlexNet(self.img_h, self.img_w, self.num_class).get_model()
if self.model_name == 'SIMPLENET':
from .simple_cnn import SimpleNet
self.model = SimpleNet(self.img_h, self.img_w, self.num_class).get_model()
# remove top fully connection layers and do not use imagenet weights ( hard to download )
elif self.model_name == 'VGG16':
tmp_model = vgg16.VGG16(input_shape=self.default_shape, include_top=True, weights='imagenet')
self.model = update_app_model(tmp_model, self.num_class)
elif self.model_name == 'VGG19':
tmp_model = vgg19.VGG19(input_shape=self.default_shape, include_top=True, weights='imagenet')
self.model = update_app_model(tmp_model, self.num_class)
elif self.model_name == 'INCEPTIONV3':
tmp_model = inception_v3.InceptionV3(input_shape=self.default_shape, include_top=True,
weights='imagenet')
self.model = update_app_model(tmp_model, self.num_class)
elif self.model_name == 'DENSENET':
from .densenet import DenseNet
tmp_model = DenseNet((3, self.img_h, self.img_w), depth=10, growth_rate=3,
nb_filter=4) # change to small size
self.model = update_app_model(tmp_model, self.num_class)
if self.model is not None:
self.model.summary()
self.train()
def train | (self):
| # use data augmentation
datagen = ImageDataGenerator(
shear_range=0.15,
rotation_range=0.15,
zoom_range=0.15,
vertical_flip=True,
horizontal_flip=True) # randomly flip images
# self.model.fit(self.x, self.binary_y, epochs=self.epoch, validation_split=0.2)
log_path = get_abspath('../models/{}_{}_training.log'.format(self.model_name, self.epoch))
csv_logger = CSVLogger(log_path)
bat_size = 50
steps = int(self.train_data_cnt / bat_size) + 20
self.model.fit_generator(datagen.flow(self.x_train, self.y_train, batch_size=bat_size),
steps_per_epoch=steps,
validation_data=(self.x_test, self.y_test),
epochs=self.epoch, verbose=1,
callbacks=[csv_logger])
self.model.save(self.model_save_path)
def run(self, img_file_path):
img = imread(img_file_path)
img = imresize(img, (self.img_h, self.img_w))
img = np.transpose(img, (2, 1, 0))
img = preprocess_img(img)
np_img = np.array([img])
if self.num_class == 2:
pred = self.model.predict(np_img)[0]
class_type = int(pred) # binary class 0 or 1
else:
pred = self.model.predict_proba(np_img)[0]
class_type = np.argmax(pred)
return_res = {'type': self.encoder.inverse_transform(class_type), 'prediction': str(pred)}
return return_res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.