repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mapycz/python-mapnik
|
test/python_tests/query_tolerance_test.py
|
Python
|
lgpl-2.1
| 1,397
| 0.000716
|
import os
from nose.tools import eq_
import mapnik
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'shape' in mapnik.DatasourceCache.plugin_names():
def test_query_tolerance():
srs = '+init=epsg:4326'
lyr = mapnik.Layer('test')
ds = mapnik.Shapefile(file='../data/shp/arrows.shp')
lyr.datasource = ds
lyr.srs = srs
_width = 256
_map = mapnik.Map(_width,
|
_width, srs)
_map.layers.append(lyr)
# zoom determines tolerance
_map.zoom_all()
_map_env = _map.envelope()
tol = (_map_env.maxx - _map_env.minx) / _width * 3
# 0.046875 for arrows.shp and zoom_all
eq_(tol, 0.046875)
# check point really exists
x, y = 2.0, 4.0
features = _map.query_point(0, x, y)
eq_(len(list(features)), 1)
# check inside tolerance limit
x = 2.0
|
+ tol * 0.9
features = _map.query_point(0, x, y)
eq_(len(list(features)), 1)
# check outside tolerance limit
x = 2.0 + tol * 1.1
features = _map.query_point(0, x, y)
eq_(len(list(features)), 0)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
IljaGrebel/OpenWrt-SDK-imx6_HummingBoard
|
staging_dir/host/lib/scons-2.3.5/SCons/Tool/aixc++.py
|
Python
|
gpl-2.0
| 2,413
| 0.002072
|
"""SCons.Tool.aixc++
Tool-specific initialization for IBM xlC / Visual Age C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the S
|
oftware, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be inclu
|
ded
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixc++.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog"
import os.path
import SCons.Platform.aix
cplusplus = __import__('c++', globals(), locals(), [])
packages = ['vacpp.cmp.core', 'vacpp.cmp.batch', 'vacpp.cmp.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CXX', 'xlC')
return SCons.Platform.aix.get_xlc(env, xlc, packages)
def generate(env):
"""Add Builders and construction variables for xlC / Visual Age
suite to an Environment."""
path, _cxx, version = get_xlc(env)
if path and _cxx:
_cxx = os.path.join(path, _cxx)
if 'CXX' not in env:
env['CXX'] = _cxx
cplusplus.generate(env)
if version:
env['CXXVERSION'] = version
def exists(env):
path, _cxx, version = get_xlc(env)
if path and _cxx:
xlc = os.path.join(path, _cxx)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
guoqiao/django-nzpower
|
nzpower/migrations/0001_initial.py
|
Python
|
mit
| 981
| 0.001019
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(m
|
igrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', mode
|
ls.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('slug', models.SlugField(unique=True, blank=True)),
('site', models.URLField(null=True, blank=True)),
('rate', models.IntegerField(default=50)),
('bank', models.CharField(default=b'anz', max_length=100)),
('bank_account_name', models.CharField(max_length=100)),
('bank_account_no', models.CharField(max_length=30)),
],
options={
},
bases=(models.Model,),
),
]
|
datastax/python-driver
|
tests/integration/cloud/test_cloud.py
|
Python
|
apache-2.0
| 10,950
| 0.003288
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from cassandra.datastax.cloud import parse_metadata_info
from cassandra.query import SimpleStatement
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table, create_keyspace_simple
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
import unittest
import six
from ssl import SSLContext, PROTOCOL_TLS
from cassandra import DriverException, ConsistencyLevel, InvalidRequest
from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster, _execution_profile_to_string
from cassandra.connection import SniEndPoint
from cassandra.auth import PlainTextAuthProvider
from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy
from mock import patch
from tests.integration import requirescloudproxy
from tests.util import wait_until_not_raised
from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER
DISALLOWED_CONSISTENCIES = [
ConsistencyLevel.ANY,
ConsistencyLevel.ONE,
ConsistencyLevel.LOCAL_ONE
]
@requirescloudproxy
class CloudTests(CloudProxyCluster):
def hosts_up(self):
return [h for h in self.cluster.metadata.all_hosts() if h.is_up]
def test_resolve_and_connect(self):
self.connect(self.creds)
self.assertEqual(len(self.hosts_up()), 3)
for host in self.cluster.metadata.all_hosts():
self.assertTrue(host.is_up)
self.assertIsInstance(host.endpoint, SniEndPoint)
self.assertEqual(str(host.endpoint), "{}:{}:{}".format(
host.endpoint.address, host.endpoint.port, host.host_id))
self.assertIn(host.endpoint._resolved_address, ("127.0.0.1", '::1'))
def test_match_system_local(self):
self.connect(self.creds)
self.assertEqual(len(self.hosts_up()), 3)
for host in self.cluster.metadata.all_hosts():
row = self.session.execute('SELECT * FROM system.local', host=host).one()
self.assertEqual(row.host_id, host.host_id)
self.assertEqual(row.rpc_address, host.broadcast_rpc_address)
def test_set_auth_provider(self):
self.connect(self.creds)
self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider)
self.assertEqual(self.cluster.auth_provider.username, 'user1')
self.assertEqual(self.cluster.auth_provider.password, 'user1')
def test_support_leaving_the_auth_unset(self):
with self.assertRaises(NoHostAvailable):
self.connect(self.creds_no_auth)
self.assertIsNone(self.cluster.auth_provider)
|
def test_support_overriding_auth_provider(self):
try:
self.connect(self.creds, auth_provider=PlainTextAuthProvider('invalid', 'invalid'))
except:
pass # this will fail soon when sni_single_endpoint is updated
self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider)
self.assertEqual(self.cluster.aut
|
h_provider.username, 'invalid')
self.assertEqual(self.cluster.auth_provider.password, 'invalid')
def test_error_overriding_ssl_context(self):
with self.assertRaises(ValueError) as cm:
self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLS))
self.assertIn('cannot be specified with a cloud configuration', str(cm.exception))
def test_error_overriding_ssl_options(self):
with self.assertRaises(ValueError) as cm:
self.connect(self.creds, ssl_options={'check_hostname': True})
self.assertIn('cannot be specified with a cloud configuration', str(cm.exception))
def _bad_hostname_metadata(self, config, http_data):
config = parse_metadata_info(config, http_data)
config.sni_host = "127.0.0.1"
return config
def test_verify_hostname(self):
with patch('cassandra.datastax.cloud.parse_metadata_info', wraps=self._bad_hostname_metadata):
with self.assertRaises(NoHostAvailable) as e:
self.connect(self.creds)
self.assertIn("hostname", str(e.exception).lower())
def test_error_when_bundle_doesnt_exist(self):
try:
self.connect('/invalid/path/file.zip')
except Exception as e:
if six.PY2:
self.assertIsInstance(e, IOError)
else:
self.assertIsInstance(e, FileNotFoundError)
def test_load_balancing_policy_is_dcawaretokenlbp(self):
self.connect(self.creds)
self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy,
TokenAwarePolicy)
self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy._child_policy,
DCAwareRoundRobinPolicy)
def test_resolve_and_reconnect_on_node_down(self):
self.connect(self.creds,
idle_heartbeat_interval=1, idle_heartbeat_timeout=1,
reconnection_policy=ConstantReconnectionPolicy(120))
self.assertEqual(len(self.hosts_up()), 3)
CLOUD_PROXY_SERVER.stop_node(1)
wait_until_not_raised(
lambda: self.assertEqual(len(self.hosts_up()), 2),
0.02, 250)
host = [h for h in self.cluster.metadata.all_hosts() if not h.is_up][0]
with patch.object(SniEndPoint, "resolve", wraps=host.endpoint.resolve) as mocked_resolve:
CLOUD_PROXY_SERVER.start_node(1)
wait_until_not_raised(
lambda: self.assertEqual(len(self.hosts_up()), 3),
0.02, 250)
mocked_resolve.assert_called()
def test_metadata_unreachable(self):
with self.assertRaises(DriverException) as cm:
self.connect(self.creds_unreachable, connect_timeout=1)
self.assertIn('Unable to connect to the metadata service', str(cm.exception))
def test_metadata_ssl_error(self):
with self.assertRaises(DriverException) as cm:
self.connect(self.creds_invalid_ca)
self.assertIn('Unable to connect to the metadata', str(cm.exception))
def test_default_consistency(self):
self.connect(self.creds)
self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM)
# Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT,
# EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT
for ep_key in six.iterkeys(self.cluster.profile_manager.profiles):
ep = self.cluster.profile_manager.profiles[ep_key]
self.assertEqual(
ep.consistency_level,
ConsistencyLevel.LOCAL_QUORUM,
"Expecting LOCAL QUORUM for profile {}, but got {} instead".format(
_execution_profile_to_string(ep_key), ConsistencyLevel.value_to_name[ep.consistency_level]
))
def test_default_consistency_of_execution_profiles(self):
cloud_config = {'secure_connect_bundle': self.creds}
self.cluster = Cluster(cloud=cloud_config, protocol_version=4, execution_profiles={
'pre_create_default_ep': ExecutionProfile(),
'pre_create_changed_ep': ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
),
})
self.cluster.add_execution_profile('pre_connect_default_ep', ExecutionProfile())
self.cluster.add_execution_profile(
'pre_connect_changed_ep',
ExecutionProfile(
consistency_level=ConsistencyLevel.LOCAL_ONE,
)
)
session = se
|
ktonon/GameSoup
|
gamesoup/matches/models.py
|
Python
|
mit
| 520
| 0.003846
|
from django.core.urlresolvers import reverse
from django.db import models
from gamesoup.games.model
|
s import *
class Match(models.Model):
game = models.ForeignKey(Game)
state = models.TextField(blank=True)
def __unicode__(self):
return self.game.name
class Meta:
verbose_name_plural = 'Matches'
def play_link(self):
return '<a href="%s">play<
|
/a>' % reverse('matches:play_match', args=[self.id])
play_link.short_description = 'Play'
play_link.allow_tags = True
|
domenkozar/pip
|
tests/test_vcs_git.py
|
Python
|
mit
| 3,904
| 0.001281
|
from mock import patch
from pip.vcs.git import Git
from tests.test_pip import (reset_env, run_pip,
_create_test_package,)
from tests.git_submodule_helpers import (
_change_test_package_submodule,
_pull_in_submodule_changes_to_module,
_create_test_package_with_submodule,
)
def test_get_tag_revs_should_return_tag_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
env.run('git', 'tag', '0.2', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_tag_revs(version_pkg_path)
assert result == {'0.1': commit, '0.2': commit}, result
def test_get_branch_revs_should_return_branch_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
def test_get_branch_revs_should_ignore_no_branch():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
# current branch here is "* (nobranch)"
env.run('git', 'checkout', commit,
cwd=version_pk
|
g_path, expect_stderr=True)
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_branch_name(branches_revs_mock,
|
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('master', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_tag_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_ambiguous_commit(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456'], result
def test_check_submodule_addition():
"""
Submodules are pulled in on install and updated on upgrade.
"""
env = reset_env()
module_path, submodule_path = _create_test_package_with_submodule(env)
install_result = run_pip('install', '-e', 'git+'+module_path+'#egg=version_pkg')
assert '.virtualenv/src/version-pkg/testpkg/static/testfile' in install_result.files_created
_change_test_package_submodule(env, submodule_path)
_pull_in_submodule_changes_to_module(env, module_path)
# expect error because git may write to stderr
update_result = run_pip('install', '-e', 'git+'+module_path+'#egg=version_pkg', '--upgrade', expect_error=True)
assert env.venv/'src/version-pkg/testpkg/static/testfile2' in update_result.files_created
|
blond-admin/BLonD
|
__EXAMPLES/main_files/EX_07_Ions.py
|
Python
|
gpl-3.0
| 6,437
| 0.010253
|
# coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
Example input for simulation of ion dynamics
No intensity effects
:Authors: **Alexandre Lasheen**
'''
from __future__ import division, print_function
from builtins import range
from scipy.constants import physical_constants
# Atomic Mass Unit [eV]
u = physical_constants['atomic mass unit-electron volt relationship'][0]
import numpy as np
from blond.input_parameters.ring import Ring
from blond.input_parameters.rf_parameters import RFStation
from blond.trackers.tracker import RingAndRFTracker
from blond.beam.distributions import bigaussian
from blond.monitors.monitors import BunchMonitor
from blond.beam.profile import Profile, CutOptions
from blond.beam.beam import Beam, Particle
from blond.plots.plot import Plot
import os
this_directory = os.path.dirname(os.path.realpath(__file__)) + '/'
try:
os.mkdir(this_directory + '../output_files')
except:
pass
try:
os.mkdir(this_directory + '../output_files/EX_07_fig')
except:
pass
# Simulation parameters --------------------------------------------------------
# Bunch parameters
N_b = 5.0e11 # Design Intensity in SIS100
N_p = 50000 # Macro-particles
tau_0 = 100.0e-9 # Initial bunch length, 4 sigma [s]
Z = 28. # Charge state of Uranium
m_p = 238.05078826*u # Isotope mass of U-238
# Machine and RF parameters
C = 1083.6 # Machine circumference [m]
p_i = 153.37e9 # Synchronous momentum [eV/c]
p_f = 535.62e9 # Synchronous momentum, final 535.62e9
h = 10 # Harmonic number
V = 280.e3 # RF voltage [V]
dphi = np.pi # Phase modulation/offset
gamma_t = 15.59 # Transition gamma
alpha = 1./gamma_t/gamma_t # First order mom. comp. factor
# Tracking details
N_t = 45500 # Number of turns to track
dt_plt = 5000 # Time steps between plots
# Simulation setup -------------------------------------------------------------
print("Setting up the simulation...")
print("")
# Define general parameters
general_params = Ring(C, alpha, np.linspace(p_i, p_f, N_t+1),
Particle(m_p, Z), n_turns=N_t)
# Define beam and distribution
beam = Beam(general_params, N_p, N_b)
print("Particle mass is %.3e eV" %general_params.Particle.mass)
print("Particle charge is %d e" %general_params.Particle.charge)
linspace_test = np.linspace(p_i, p_f, N_t+1)
momentum_test = general_params.momentum
beta_test = general_params.beta
gamma_test = general_params.gamma
energy_test = general_params.energy
mass_test = general_params.Particle.mass # [eV]
charge_test = general_params.Particle.charge # e*Z
# Define RF station parameters and corresponding tracker
rf_params = RFStation(general_params, [h], [V], [dphi])
print("Initial bucket length is %.3e s" %(2.*np.pi/rf_params.omega_rf[0,0]))
print("Final bucket length is %.3e s" %(2.*np.pi/rf_params.omega_rf[0,N_t]))
|
phi_s_test = rf_param
|
s.phi_s #: *Synchronous phase
omega_RF_d_test = rf_params.omega_rf_d #: *Design RF frequency of the RF systems in the station [GHz]*
omega_RF_test = rf_params.omega_rf #: *Initial, actual RF frequency of the RF systems in the station [GHz]*
phi_RF_test = rf_params.omega_rf #: *Initial, actual RF phase of each harmonic system*
E_increment_test = rf_params.delta_E #Energy increment (acceleration/deceleration) between two turns,
long_tracker = RingAndRFTracker(rf_params, beam)
eta_0_test = rf_params.eta_0 #: *Slippage factor (0th order) for the given RF section*
eta_1_test = rf_params.eta_1 #: *Slippage factor (1st order) for the given RF section*
eta_2_test = rf_params.eta_2 #: *Slippage factor (2nd order) for the given RF section*
alpha_order_test = rf_params.alpha_order
bigaussian(general_params, rf_params, beam, tau_0/4,
reinsertion = 'on', seed=1)
# Need slices for the Gaussian fit
slice_beam = Profile(beam, CutOptions(n_slices=100))
# Define what to save in file
bunchmonitor = BunchMonitor(general_params, rf_params, beam,
this_directory + '../output_files/EX_07_output_data',
Profile=slice_beam)
format_options = {'dirname': this_directory + '../output_files/EX_07_fig'}
plots = Plot(general_params, rf_params, beam, dt_plt, N_t, 0, 8.e-7,
-400e6, 400e6, separatrix_plot=True, Profile=slice_beam,
h5file=this_directory + '../output_files/EX_07_output_data',
format_options=format_options)
# For testing purposes
test_string = ''
test_string += '{:<17}\t{:<17}\t{:<17}\t{:<17}\n'.format(
'mean_dE', 'std_dE', 'mean_dt', 'std_dt')
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(beam.dE), np.std(beam.dE), np.mean(beam.dt), np.std(beam.dt))
# Accelerator map
map_ = [long_tracker] + [slice_beam] + [bunchmonitor] + [plots]
print("Map set")
print("")
# Tracking ---------------------------------------------------------------------
for i in range(1, N_t+1):
# Plot has to be done before tracking (at least for cases with separatrix)
if (i % dt_plt) == 0:
print("Outputting at time step %d..." %i)
print(" Beam momentum %.6e eV" %beam.momentum)
print(" Beam gamma %3.3f" %beam.gamma)
print(" Beam beta %3.3f" %beam.beta)
print(" Beam energy %.6e eV" %beam.energy)
print(" Four-times r.m.s. bunch length %.4e s" %(4.*beam.sigma_dt))
print("")
# Track
for m in map_:
m.track()
# Define losses according to separatrix
beam.losses_separatrix(general_params, rf_params)
# For testing purposes
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(beam.dE), np.std(beam.dE), np.mean(beam.dt), np.std(beam.dt))
with open(this_directory + '../output_files/EX_07_test_data.txt', 'w') as f:
f.write(test_string)
print("Done!")
|
OSSHealth/ghdata
|
workers/insight_worker/setup.py
|
Python
|
mit
| 1,388
| 0.003602
|
#SPDX-License-Identifier: MIT
import io
import os
import re
from setuptools import find_packages
fro
|
m setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="insight_worker",
version="1.0.0",
url="https://github.com/chaoss/augur",
license='MIT',
author="Augurlabs",
author_email="s@gogg
|
ins.com",
description="Augur Worker that discovers and stores data anomalies",
packages=find_packages(exclude=('tests',)),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'click==7.1.2',
'scipy==1.4.1',
'sklearn==0.0',
'numpy==1.19.5',
],
entry_points={
'console_scripts': [
'insight_worker_start=workers.insight_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
|
VirusTotal/misp-modules
|
misp_modules/modules/expansion/ipasn.py
|
Python
|
agpl-3.0
| 2,208
| 0.002266
|
# -*- coding: utf-8 -*-
import json
from . import check_input_attribute, standard_error_message
from pyipasnhistory import IPASNHistory
from pymisp import MISPAttribute, MISPEvent, MISPObject
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst'], 'format': 'misp_standard'}
moduleinfo = {'version': '0.2', 'author': 'Raphaël Vinot',
'description': 'Query an IP ASN history service (https://github.com/CIRCL/IP-ASN-history.git)',
'module-type': ['expansion', 'hover']}
def parse_result(attribute, values):
event = MISPEvent()
initial_attribute = MISPAttribute()
initial_attribute.from_dict(**attribute)
event.add_attribute(**initial_attribute)
mapping = {'asn': ('AS', 'asn'), 'prefix'
|
: ('ip-src', 'subnet-announced')}
print(values)
for last_seen, response in values['response'].items():
asn = MISPObject('asn')
a
|
sn.add_attribute('last-seen', **{'type': 'datetime', 'value': last_seen})
for feature, attribute_fields in mapping.items():
attribute_type, object_relation = attribute_fields
asn.add_attribute(object_relation, **{'type': attribute_type, 'value': response[feature]})
asn.add_reference(initial_attribute.uuid, 'related-to')
event.add_object(**asn)
event = json.loads(event.to_json())
return {key: event[key] for key in ('Attribute', 'Object')}
def handler(q=False):
if q is False:
return False
request = json.loads(q)
if not request.get('attribute') or not check_input_attribute(request['attribute']):
return {'error': f'{standard_error_message}, which should contain at least a type, a value and an uuid.'}
if request['attribute']['type'] not in mispattributes['input']:
return {'error': 'Unsupported attribute type.'}
toquery = request['attribute']['value']
ipasn = IPASNHistory()
values = ipasn.query(toquery)
if not values:
misperrors['error'] = 'Unable to find the history of this IP'
return misperrors
return {'results': parse_result(request['attribute'], values)}
def introspection():
return mispattributes
def version():
return moduleinfo
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/examples/stats/plot_cluster_stats_evoked.py
|
Python
|
bsd-3-clause
| 2,991
| 0
|
"""
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
|
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=2)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
|
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/xsky_path9.py
|
Python
|
apache-2.0
| 2,040
| 0.015196
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume3-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapsh
|
ot5'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup5'],
[TestAction.delete_volume_snapshot, 'vm1-snapshot5'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.delete_vm_s
|
napshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9', 'vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1', 'vm1-backup5', 'volume1-backup5', 'volume2-backup5', 'volume3-backup5', 'vm1-image1']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-snapshot5', 'vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_backup2:['vm1-backup5', 'volume1-backup5', 'volume2-backup5', 'volume3-backup5']---vm1_volume1_volume2_volume3
vm_snap3:['vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9']---vm1volume1_volume2_volume3
vm_backup1:['vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']---vm1_volume1_volume2_volume3
'''
|
soylentdeen/CIAO-commissioning-tools
|
sandbox/dumpModalBasis.py
|
Python
|
mit
| 275
| 0
|
import scipy
import numpy
import matplotlib.pyplot as pyplot
import pyfits
import VLTTools
ciao =
|
VLTTools.VLTConnection(simulate=False)
ciao.get_InteractionMatrices
|
()
ciao.dumpCommandMatrix(nFiltModes=10)
print "This is where we will Compute the Modal Basis from the IMs"
|
ujdhesa/unisubs
|
utils/tests/multiqueryset.py
|
Python
|
agpl-3.0
| 4,283
| 0.000467
|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.test import TestCase
from videos.models import Video
from utils.multi_query_set import MultiQuerySet
class MultiQuerySetTest(TestCase):
fixtures = ['test.json']
def test_full(self):
self.assertEqual(list(Video.objects.all()),
list(MultiQuerySet(Video.objects.all())),
"Full, single MQS didn't match full QS.")
self.assertEqual(list(Video.objects.all()),
list(MultiQuerySet(Video.objects.none(),
Video.objects.all(),
Video.objects.none())),
"Full MQS with blanks didn't match full QS.")
self.assertEqual(list(Video.objects.all()) + list(Video.objects.all()),
list(MultiQuerySet(Video.objects.none(),
Video.objects.all(),
Video.objects.none(),
Video.objects.all())),
"Double MQS with blanks didn't match double full QS.")
def test_slice(self):
qs = Video.objects.all()
mqs = MultiQuerySet(Video.objects.all())
self.assertEqual(list(qs[0:1]),
list(mqs[0:1]),
"MQS[:1] failed.")
self.assertEqual(list(qs[0:2]),
list(mqs[0:2]),
"MQS[:2] failed.")
self.assertEqual(list(qs[0:3]),
list(mqs[0:3]),
"MQS[:3] (out-of-bounds endpoint) failed.")
self.assertEqual(list(qs[1:3]),
list(mqs[1:3]),
"MQS[1:3] failed.")
self.assertEqual(list(qs[2:3]),
list(mqs[2:3]),
"MQS[2:3] failed.")
self.assertEqual(list(qs[1:1]),
list(mqs[1:1]),
"MQS[1:1] (empty slice) failed.")
def test_slice_multiple(se
|
lf):
qs = list(Video.objects.all())
qs = qs + qs + qs
mqs = MultiQuerySet(Video.objects.all(),
Video.objects.all(),
Vid
|
eo.objects.all())
self.assertEqual(qs[0:3],
list(mqs[0:3]),
"MQS[:3] failed.")
self.assertEqual(qs[0:6],
list(mqs[0:6]),
"MQS[:6] (entire range) failed.")
self.assertEqual(qs[0:7],
list(mqs[0:7]),
"MQS[:7] (out-of-bounds endpoint) failed.")
self.assertEqual(qs[1:3],
list(mqs[1:3]),
"MQS[1:3] failed.")
self.assertEqual(qs[1:6],
list(mqs[1:6]),
"MQS[1:6] (entire range) failed.")
self.assertEqual(qs[1:7],
list(mqs[1:7]),
"MQS[1:7] (out-of-bounds endpoint) failed.")
self.assertEqual(qs[3:3],
list(mqs[3:3]),
"MQS[3:3] failed.")
self.assertEqual(qs[3:6],
list(mqs[3:6]),
"MQS[3:6] (entire range) failed.")
self.assertEqual(qs[3:7],
list(mqs[3:7]),
"MQS[3:7] (out-of-bounds endpoint) failed.")
|
google/ml_collections
|
ml_collections/config_flags/examples/define_config_dataclass_basic.py
|
Python
|
apache-2.0
| 1,322
| 0.004539
|
# Copyright 2022 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python 3
r"""Example of basic DEFINE_config_dataclass usage.
To run this example:
python define_config_dataclass_basic.py -- --my_config.field1=8 \
--my_config.nested.field=2.1 --my_config.tuple='(1, 2, (1, 2))'
"""
import dataclasses
from typing import Any, Mapping, Sequence
from absl import app
from ml_collections import config_flags
@dataclasses.dataclass
class MyConfig:
field1: int
field2: str
nested:
|
Mapping[str, Any]
tuple: Sequence[int]
config = MyConfig(
field1=1,
field2='tom',
nested={'field': 2.23},
tuple=(1, 2, 3),
)
_CONFIG = config_flags.DEFINE_config_dataclass('my_
|
config', config)
def main(_):
print(_CONFIG.value)
if __name__ == '__main__':
app.run(main)
|
Micronaet/micronaet-mx8
|
mx_pick_in/__openerp__.py
|
Python
|
agpl-3.0
| 1,557
| 0.001285
|
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute
|
it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any
|
later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'MX DDT',
'version': '0.1',
'category': 'Accounting',
'description': '''
DDT module with mx stock move
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'account',
'stock',
'sale_stock',
'stock_account',
],
'init_xml': [],
'demo': [],
'data': [
'view/ddt_view.xml',
'wizard/ddt_create_direct_invoice_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
jobdash/semantic
|
semantic/units.py
|
Python
|
mit
| 4,744
| 0.000211
|
import re
import quantities as pq
from numbers import NumberService
class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, input):
def handleExponents(input):
m = re.search(r'\bsquare (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquare (\w+)', r'\g<1>^2', input)
m = re.search(r'\bsquared (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquared (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) squared', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) squared', r'\g<1>^2', input)
m = re.search(r'\bsq (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsq (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) cubed', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) cubed', r'\g<1>^3', input)
m = re.search(r'\bcubic (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bcubic (\w+)', r'\g<1>^3', input)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', input)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
input = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), input)
return input
input = re.sub(r'\sper\s', r' / ', input)
input = handleExponents(input)
return input
def parseUnits(self, input)
|
:
"""Carries out a conversio
|
n (represented as a string) and returns the
result as a human-readable string.
Args:
input (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(input)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def extractUnits(self, input):
"""Collects all the valid units from an input string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
input (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
input = self._preprocess(input)
units = []
description = ""
for w in input.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
def convert(self, input):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
input (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
input = self._preprocess(input)
n = NumberService().longestNumber(input)
units = self.extractUnits(input)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
|
rsalmaso/django-babeljs
|
babeljs/execjs/runtime.py
|
Python
|
mit
| 8,806
| 0.000908
|
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2018, Raffaele Salmaso <raffaele@salmaso.org>
# Copyright (c) 2012 Omoto Kenji
# Copyright (c) 2011 Sam Stephenson
# Copyright (c) 2011 Josh Peek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import json
import re
import os
from subprocess import Popen, PIPE, STDOUT
import tempfile
from .exceptions import RuntimeError, ProgramError, RuntimeUnavailable
from .utils import json2_source, which
def encode_unicode_cod
|
epoints(str):
r"""
>>> encode_unicode_codepoints("a") == 'a'
|
True
>>> ascii = ''.join(chr(i) for i in range(0x80))
>>> encode_unicode_codepoints(ascii) == ascii
True
>>> encode_unicode_codepoints('\u4e16\u754c') == '\\u4e16\\u754c'
True
"""
codepoint_format = '\\u{0:04x}'.format
def codepoint(m):
return codepoint_format(ord(m.group(0)))
return re.sub('[^\x00-\x7f]', codepoint, str)
class Runtime(object):
def __init__(self, name, command, runner_source, encoding='utf8'):
self._name = name
if isinstance(command, str):
command = [command]
self._command = command
self._runner_source = runner_source
self._encoding = encoding
def __str__(self):
return "{class_name}({runtime_name})".format(
class_name=type(self).__name__,
runtime_name=self._name,
)
@property
def name(self):
return self._name
def exec_(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).exec_(source)
def eval(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).eval(source)
def compile(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self, source)
def is_available(self):
return self._binary() is not None
def runner_source(self):
return self._runner_source
def _binary(self):
"""protected"""
if not hasattr(self, "_binary_cache"):
self._binary_cache = which(self._command)
return self._binary_cache
def _execfile(self, filename):
"""protected"""
cmd = self._binary() + [filename]
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
stdoutdata, stderrdata = p.communicate()
ret = p.wait()
finally:
del p
if ret == 0:
return stdoutdata
else:
raise RuntimeError(stdoutdata)
class Context(object):
def __init__(self, runtime, source=''):
self._runtime = runtime
self._source = source
def eval(self, source):
if not source.strip():
data = "''"
else:
data = "'('+" + json.dumps(source, ensure_ascii=True) + "+')'"
code = 'return eval({data})'.format(data=data)
return self.exec_(code)
def exec_(self, source):
if self._source:
source = self._source + '\n' + source
(fd, filename) = tempfile.mkstemp(prefix='babeljs', suffix='.js')
os.close(fd)
try:
with io.open(filename, "w+", encoding=self._runtime._encoding) as fp:
fp.write(self._compile(source))
output = self._runtime._execfile(filename)
finally:
os.remove(filename)
output = output.decode(self._runtime._encoding)
output = output.replace("\r\n", "\n").replace("\r", "\n")
output = self._extract_result(output.split("\n")[-2])
return output
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
def _compile(self, source):
"""protected"""
runner_source = self._runtime.runner_source()
replacements = {
'#{source}': lambda: source,
'#{encoded_source}': lambda: json.dumps(
"(function(){ " +
encode_unicode_codepoints(source) +
" })()"
),
'#{json2_source}': json2_source,
}
pattern = "|".join(re.escape(k) for k in replacements)
runner_source = re.sub(pattern, lambda m: replacements[m.group(0)](), runner_source)
return runner_source
def _extract_result(self, output_last_line):
"""protected"""
if not output_last_line:
status = value = None
else:
ret = json.loads(output_last_line)
if len(ret) == 1:
ret = [ret[0], None]
status, value = ret
if status == "ok":
return value
elif value and value.startswith('SyntaxError:'):
raise RuntimeError(value)
else:
raise ProgramError(value)
class PyV8Runtime(object):
def __init__(self):
try:
import PyV8
except ImportError:
self._is_available = False
else:
self._is_available = True
@property
def name(self):
return "PyV8"
def exec_(self, source):
return self.Context().exec_(source)
def eval(self, source):
return self.Context().eval(source)
def compile(self, source):
return self.Context(source)
def is_available(self):
return self._is_available
class Context:
def __init__(self, source=""):
self._source = source
def exec_(self, source):
source = '''\
(function() {{
{0};
{1};
}})()'''.format(
encode_unicode_codepoints(self._source),
encode_unicode_codepoints(source)
)
source = str(source)
import PyV8
import contextlib
#backward compatibility
with contextlib.nested(PyV8.JSContext(), PyV8.JSEngine()) as (ctxt, engine):
js_errors = (PyV8.JSError, IndexError, ReferenceError, SyntaxError, TypeError)
try:
script = engine.compile(source)
except js_errors as e:
raise RuntimeError(e)
try:
value = script.run()
except js_errors as e:
raise ProgramError(e)
return self.convert(value)
def eval(self, source):
return self.exec_('return ' + encode_unicode_codepoints(source))
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
@classmethod
def con
|
philanthropy-u/edx-platform
|
common/test/acceptance/tests/lms/test_account_settings.py
|
Python
|
agpl-3.0
| 21,442
| 0.002798
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from datetime import datetime
from unittest import skip
import pytest
from bok_choy.page_object import XSS_INJECTION
from pytz import timezone, utc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage, FULL_NAME
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, AcceptanceTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
def visit_account_settings_page(self, gdpr=False):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
# TODO: LEARNER-4422 - delete when we clean up flags
if gdpr:
self.account_settings_page.browser.get(self.browser.current_url + "?course_experience.gdpr=1")
self.account_settings_page.wait_for_page()
def log_in_as_unique_user(self, email=None, full_name=None, password=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(
self.browser,
username=username,
email=email,
full_name=full_name,
password=password
).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def settings_changed_event_filter(self, event):
"""Filter out any events that are not "settings changed" events."""
return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME
def expected_settings_changed_event(self, setting, old, new, table=None):
"""A dictionary representing the expected fields in a "settings changed" event."""
return {
'username': self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': self.user_id,
'setting': setting,
'old': old,
'new': new,
'truncated': [],
'table': table or 'auth_userprofile'
}
}
def settings_change_initiated_event_filter(self, event):
"""Filter out any events that are not "settings change initiated" events."""
return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME
def expected_settings_change_initiated_event(self, setting, old, new, username=None, user_id=None):
"""A dictionary representing the expected fields in a "settings change initiated" event."""
return {
'username': username or self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': user_id or self.user_id,
'setting': setting,
'old': old,
'new': new,
}
}
def get_settings_page_url(self):
"""The absolute URL of the account settings page given the test context."""
return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER)
def assert_no_setting_changed_event(self):
"""Assert no setting changed event has been emitted thus far."""
self.assert_no_matching_events_were_emitted({'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME})
class DashboardMenuTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests that the dashboard menu works correctly with the accoun
|
t settings page.
"""
shard = 8
def test_link_on_dashboard_works(self):
"""
Scenario: Verify that the "Account" link works from the dashboard.
Given that I am a registered user
And I visit my dashboard
And I click on "Account" in the top drop down
Then I should see my account sett
|
ings page
"""
self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Account', dashboard_page.username_dropdown_link_text)
dashboard_page.click_account_settings_link()
class AccountSettingsPageTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests that verify behaviour of the Account Settings page.
"""
SUCCESS_MESSAGE = 'Your changes have been saved.'
shard = 8
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsPageTest, self).setUp()
self.full_name = FULL_NAME
self.social_link = ''
self.username, self.user_id = self.log_in_as_unique_user(full_name=self.full_name)
self.visit_account_settings_page()
def test_page_view_event(self):
"""
Scenario: An event should be recorded when the "Account Settings"
page is viewed.
Given that I am a registered user
And I visit my account settings page
Then a page view analytics event should be recorded
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1)
self.assert_events_match(
[
{
'event': {
'user_id': self.user_id,
'page': 'account',
'visibility': None
}
}
],
actual_events
)
def test_all_sections_and_fields_are_present(self):
"""
Scenario: Verify that all sections and fields are present on the page.
"""
expected_sections_structure = [
{
'title': 'Basic Account Information',
'fields': [
'Username',
'Full Name',
'Email Address (Sign In)',
'Password',
'Language',
'Country or Region of Residence',
'Time Zone',
]
},
{
'title': 'Additional Information',
'fields': [
'Education Completed',
'Gender',
'Year of Birth',
'Preferred Language',
]
},
{
'title': 'Social Media Links',
'fields': [
'Twitter Link',
'Facebook Link',
'LinkedIn Link',
]
},
{
'title': 'Delete My Account',
'fields': []
},
]
self.assertEqual(self.account_settings_page.sections_structure(), expected_sections_structure)
def _test_readonly_field(self, field_id, title, value):
"""
Test behavior of a readonly field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_readonly_field(field_id), value)
def _test_text_field(
self, field_id, title, initial_value, new_invalid_value, new_valid_values, success_message=SUCCESS_MESSAGE,
assert_after_reload=True
):
"""
Test behaviour of a text field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), initial_value)
self.assertEqual(
|
itielshwartz/BackendApi
|
lib/pyasn1_modules/rfc1157.py
|
Python
|
apache-2.0
| 3,309
| 0.001511
|
#
# SNMPv1 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1157.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import univ, namedtype, namedval, tag
from pyasn1_modules import rfc1155
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('version-1', 0)
)
defaultValue = 0
class Community(univ.OctetString): pass
class RequestID(univ.Integer): pass
class ErrorStatus(univ.Integer):
namedValues = namedval.NamedValues(
('noError', 0),
('tooBig', 1),
('noSuchName', 2),
('badValue', 3),
('readOnly', 4),
('genErr', 5)
)
class ErrorIndex(univ.Integer): pass
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1155.ObjectName()),
namedtype.NamedType('value', rfc1155.ObjectSyntax())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
class _RequestBase(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', RequestID()),
namedtype.NamedType('error-status', ErrorStatus()),
namedtype.NamedType('error-index', ErrorIndex()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFo
|
rmatConstructed, 0)
)
class GetNextRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class GetResponsePDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagI
|
mplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class TrapPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
namedtype.NamedType('generic-trap', univ.Integer().clone(
namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
('authenticationFailure', 4), ('egpNeighborLoss', 5),
('enterpriseSpecific', 6)))),
namedtype.NamedType('specific-trap', univ.Integer()),
namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class Pdus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-response', GetResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('trap', TrapPDU())
)
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('community', Community()),
namedtype.NamedType('data', Pdus())
)
|
kwlzn/pants
|
tests/python/pants_test/engine/test_storage.py
|
Python
|
apache-2.0
| 4,071
| 0.012282
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from contextlib import closing
from pants.base.project_tree import Dir, File
from pants.engine.nodes import Noop, Return, Runnable, Throw, Waiting
from pants.engine.storage import Cache, InvalidKeyError, Lmdb, Storage
def _runnable(an_arg):
return an_arg
class PickleableException(Exception):
def __eq__(self, other):
return type(self) == type(other)
class StorageTest(unittest.TestCase):
TEST_KEY = b'hello'
TEST_VALUE = b'world'
TEST_PATH = File('/foo')
TEST_PATH2 = Dir('/bar')
class SomeException(Exception): pass
def setUp(self):
self.storage = Storage.create()
self.result = 'something'
self.request = Runnable(func=_runnable, args=('this is an arg',))
def test_lmdb_key_value_store(self):
lmdb = Lmdb.create()[0]
with closing(lmdb) as kvs:
# Initially key does not exist.
self.assertFalse(kvs.get(self.TEST_KEY))
# Now write a key value pair and read back
|
.
written = kvs.put(self.TEST_KEY, self.TEST_VALUE)
self.assertTrue(written)
self.assertEquals(self.TEST_VALUE, kvs.get(self.TEST_KEY).getvalue())
# Write the same key again will not overwrite.
se
|
lf.assertFalse(kvs.put(self.TEST_KEY, self.TEST_VALUE))
def test_storage(self):
with closing(self.storage) as storage:
key = storage.put(self.TEST_PATH)
self.assertEquals(self.TEST_PATH, storage.get(key))
with self.assertRaises(InvalidKeyError):
self.assertFalse(storage.get(self.TEST_KEY))
def test_storage_key_mappings(self):
with closing(self.storage) as storage:
key1 = storage.put(self.TEST_PATH)
key2 = storage.put(self.TEST_PATH2)
storage.add_mapping(key1, key2)
self.assertEquals(key2, storage.get_mapping(key1))
# key2 isn't mapped to any other key.
self.assertIsNone(storage.get_mapping(key2))
def test_state_roundtrips(self):
states = [
Return('a'),
Throw(PickleableException()),
Waiting(['a']),
Runnable(_runnable, ('an arg',)),
Noop('nada {}', ('op',))
]
with closing(self.storage) as storage:
for state in states:
key = storage.put_state(state)
actual = storage.get_state(key)
self.assertEquals(state, actual)
self.assertEquals(key, storage.put_state(actual))
class CacheTest(unittest.TestCase):
def setUp(self):
"""Setup cache as well as request and result."""
self.storage = Storage.create()
self.cache = Cache.create(storage=self.storage)
self.request = Runnable(func=_runnable, args=('this is an arg',))
self.result = 'something'
def test_cache(self):
"""Verify get and put."""
with closing(self.cache):
self.assertIsNone(self.cache.get(self.request)[1])
self._assert_hits_misses(hits=0, misses=1)
request_key = self.storage.put_state(self.request)
self.cache.put(request_key, self.result)
self.assertEquals(self.result, self.cache.get(self.request)[1])
self._assert_hits_misses(hits=1, misses=1)
def test_failure_to_update_mapping(self):
"""Verify we can access cached result only if we save both result and the key mapping."""
with closing(self.cache):
# This places result to the main storage without saving to key mapping. This
# simulates error might happen for saving key mapping after successfully saving the result.
self.cache._storage.put(self.result)
self.assertIsNone(self.cache.get(self.request)[1])
self._assert_hits_misses(hits=0, misses=1)
def _assert_hits_misses(self, hits, misses):
self.assertEquals(hits, self.cache.get_stats().hits)
self.assertEquals(misses, self.cache.get_stats().misses)
self.assertEquals(hits+misses, self.cache.get_stats().total)
|
DoubleCiti/mongodb-migrations
|
mongodb_migrations/base.py
|
Python
|
gpl-3.0
| 967
| 0.004137
|
import pymongo
class BaseMigration(object):
def __init__(se
|
lf,
host='127.0.0.1',
port='27017',
database=No
|
ne,
user=None,
password=None,
url=None):
if url and database and user is not None: #provide auth_database in url (mongodb://mongohostname:27017/auth_database)
client = pymongo.MongoClient(url, username=user, password=password)
self.db = client.get_database(database)
elif url:
client = pymongo.MongoClient(url)
self.db = client.get_default_database()
elif database:
client = pymongo.MongoClient(host=host, port=port)
self.db = client[database]
else:
raise Exception('no database, url or auth_database in url provided')
def upgrade(self):
raise NotImplementedError
def downgrade(self):
raise NotImplementedError
|
codebox/algorithms
|
test_graph_topological_ordering.py
|
Python
|
mit
| 943
| 0.020148
|
import unittest
from graph_search import Graph
from graph_topological_ordering import find_topological_order
class TestGraphTopologicalOrdering(unittest.TestCase):
def check_labels(self, graph, smaller, larger):
self.assertTrue(graph.get_node(smaller).label < graph.get_node(larger).label)
|
def test_1(self):
graph = Graph([[0,1],[0,2],[1,3],[2,3]], True)
find_top
|
ological_order(graph)
self.check_labels(graph, 0, 1)
self.check_labels(graph, 0, 2)
self.check_labels(graph, 1, 3)
self.check_labels(graph, 2, 3)
def test_2(self):
graph = Graph([[0,1],[1,2],[1,3],[2,4],[3,5]], True)
find_topological_order(graph)
self.check_labels(graph, 0, 1)
self.check_labels(graph, 1, 2)
self.check_labels(graph, 1, 3)
self.check_labels(graph, 2, 4)
self.check_labels(graph, 3, 5)
if __name__ == '__main__':
unittest.main()
|
oscaro/django-oscar-adyen
|
tests/test_requests.py
|
Python
|
bsd-3-clause
| 3,128
| 0.001598
|
from django.conf import settings
from django.test import TestCase, override_settings
from freezegun import freeze_time
from adyen.gateway import MissingFieldException
from adyen.scaffold import Scaffold
TEST_RETURN_URL
|
= 'https://www.example.com/checkout/return/adyen/'
EXPECTED_FIELDS_LIST = [
{'type': 'hidden', 'name': 'currencyCode', 'value': 'EUR'},
{'type': 'hidden', 'name': 'merchantAccount', 'value': settings.ADYEN_IDENTIFIER},
{'type': 'hidden', 'name': 'merchantReference', 'value': '00000000123'},
{'type': 'hidden', 'name': 'merchantReturnData', 'value': '123'},
|
{'type': 'hidden', 'name': 'merchantSig', 'value': 'kKvzRvx7wiPLrl8t8+owcmMuJZM='},
{'type': 'hidden', 'name': 'paymentAmount', 'value': '123'},
{'type': 'hidden', 'name': 'resURL', 'value': TEST_RETURN_URL},
{'type': 'hidden', 'name': 'sessionValidity', 'value': '2014-07-31T17:20:00Z'},
{'type': 'hidden', 'name': 'shipBeforeDate', 'value': '2014-08-30'},
{'type': 'hidden', 'name': 'shopperEmail', 'value': 'test@example.com'},
{'type': 'hidden', 'name': 'shopperLocale', 'value': 'fr'},
{'type': 'hidden', 'name': 'shopperReference', 'value': '789'},
{'type': 'hidden', 'name': 'skinCode', 'value': 'cqQJKZpg'},
{'type': 'hidden', 'name': 'countryCode', 'value': 'fr'},
{'type': 'hidden', 'name': 'brandCode', 'value': 'ideal'},
{'type': 'hidden', 'name': 'issuerId', 'value': '1211'},
]
ORDER_DATA = {
'amount': 123,
'basket_id': 456,
'client_email': 'test@example.com',
'client_id': 789,
'currency_code': 'EUR',
'country_code': 'fr',
'description': 'Order #123',
'order_id': 'ORD-123',
'order_number': '00000000123',
'return_url': TEST_RETURN_URL,
'shopper_locale': 'fr',
'brand_code': 'ideal',
'issuer_id': '1211',
}
class TestAdyenPaymentRequest(TestCase):
@override_settings(ADYEN_ACTION_URL='foo')
def test_form_action(self):
"""
Test that the form action is properly fetched from the settings.
"""
assert 'foo' == Scaffold().get_form_action(request=None)
def test_form_fields_ok(self):
"""
Test that the payment form fields list is properly built.
"""
with freeze_time('2014-07-31 17:00:00'): # Any datetime will do.
fields_list = Scaffold().get_form_fields(request=None, order_data=ORDER_DATA)
# Order doesn't matter, so normally we'd use a set. But Python doesn't do
# sets of dictionaries, so we compare individually.
assert len(fields_list) == len(EXPECTED_FIELDS_LIST)
for field in fields_list:
assert field in EXPECTED_FIELDS_LIST
def test_form_fields_with_missing_mandatory_field(self):
"""
Test that the proper exception is raised when trying
to build a fields list with a missing mandatory field.
"""
new_order_data = ORDER_DATA.copy()
del new_order_data['amount']
with self.assertRaises(MissingFieldException):
Scaffold().get_form_fields(request=None, order_data=new_order_data)
|
vint21h/django-po2xls
|
tests/management/commands/test_po-to-xls.py
|
Python
|
gpl-3.0
| 1,479
| 0.002705
|
# -*- coding: utf-8 -*-
# django-po2xls
# tests/management/commands/test_po-to-xls.py
import os
import pathlib
from typing import List
from importlib import import_module
from django.test import TestCase
# po-to-xls management command imported on the fly
# because we can't import something from the module that contains "-"
Command = import_module("po2xls.management.commands.po-to-xls").Command # type: ignore
__all__: List[str] = ["CommandTest"]
class CommandTest(TestCase):
"""po-to-xls management command tests."""
@classmethod
def tearDownClass(cls) -> None:
"""Tear down."""
os.remove("po2xls/locale/uk/LC_MESSAGES/django.xls")
os.remove("po2xls/locale/en/LC_MESSAGES/django.xls")
super().tearDow
|
nClass()
def test_convert(self) -> None:
"""convert method must write converted data to .xls files for chosen locale.""" # noqa: D403,E501
Command().convert(locale="uk")
self.assertTrue(
expr=pathlib.Path("po2xls/locale/uk/LC_MESSAGES/django.xls").exists()
)
def test_convert__all(self) -> None:
"""convert method must write converted data to .xls files for all locales.""" # noqa: D403,E501
Comman
|
d().handle()
self.assertTrue(
expr=pathlib.Path("po2xls/locale/en/LC_MESSAGES/django.xls").exists()
)
self.assertTrue(
expr=pathlib.Path("po2xls/locale/uk/LC_MESSAGES/django.xls").exists()
)
|
maldun/EasyShells
|
MidSurface.py
|
Python
|
lgpl-2.1
| 2,382
| 0.011335
|
# EasyShells Module - API for easier Shell Model Construction in Salome
# MidSurface.py: Mid surface extraction for EasyShells module
#
# Copyright (C) 2013 Stefan Reiterer - maldun.finsterschreck@gmail.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import salome
import geompy
from numpy import array, arange
from MyGeom.Types import *
def create_parallel_midpoints(points_lower, points_upper):
"""
Help function to create the midpoints of two given parallel surfaces
"""
length_u = len(points_lower[0])
length_v = len(points_lower)
return [[(points_lower[i][j] +
|
points_upper[i][j])*0.5 \
for j in range(length_u)] \
|
for i in range(length_v)]
def parallel_midsurface(lower_face, upper_face, lower_deg = 2, upper_deg = 5):
"""
Determines the midsurface of 2 parallel
surfaces. Hereby parallel means that they
share the same normal direction. It is assumed
that both normals point outwards.
"""
points_u = arange(0,1+1./upper_deg,1./upper_deg)
points_v = points_u
lower_points = create_local_coordinates(lower_face,points_u,points_v)
lower_points = create_local_coordinates(upper_face,points_u,points_v)
midpoints = create_parallel_midpoints
def face_normal_translation(face,distance,change_orientation = False):
"""
Help function to make a translation
"""
if change_orientation:
face.changeOrientation()
normal = face.getNormal()
result = geompy.MakeTranslationVectorDistance(face.getGeomObject(),normal.getGeomObject(),
distance)
return MyFace(result)
|
Theragon/kupfer
|
kupfer/plugin/vim/__init__.py
|
Python
|
gpl-3.0
| 423
| 0.009456
|
__kupfer_name__ = _("Vim")
__kupfer_sources__ = ("RecentsSource", "ActiveVim", )
__kupfer_actions__ = ("InsertInVi
|
m", )
__description__ = _("Recently used documen
|
ts in Vim")
__version__ = "2011-04"
__author__ = "Plugin: Ulrik Sverdrup, VimCom: Ali Afshar"
def initialize_plugin(name):
global RecentsSource
global ActiveVim
global InsertInVim
from kupfer.plugin.vim.plugin import RecentsSource, ActiveVim, InsertInVim
|
crpurcell/friendlyVRI
|
arrays/array_data/ATCA/mk_ATCA_array_configs.py
|
Python
|
mit
| 3,761
| 0.003191
|
#!/usr/bin/env python
telescope = "ATCA"
latitude_deg = -30.312906
diameter_m = 22.0
import os
import sys
from util_misc import ascii_dat_read
#-----------------------------------------------------------------------------#
def main():
# Read the station lookup table
col, dummy = ascii_dat_read("ATCA_stations.txt", delim=" ",
doFloatCols=[2, 3])
statDict = {}
for station, N, W in zip(col[1], col[2], col[3]):
statDict[station] = (-W+1622.449, N)
# Read the array configuration file
col, dummy = ascii_dat_read("ATCA_configs.txt", delim=" ",
doFloatCols=[2, 3, 4, 5, 6, 7])
for confName, A1, A2, A3, A4, A5, A6 in zip(col[1], col[2], col[3], col[4],
col[5], col[6], col[7]):
if A1=='':
continue
outFileName = "ATCA_%s.config" % confName
FH = open(outFileName, "w")
FH.write("#" + "-"*78 + "#\n")
FH.write("#\n")
FH.write("# Array definition file for the %s %s configuration.\n"
% (telescope, confName))
FH.write("#\n")
FH.write("#" + "-"*78 + "#\n")
FH.write("\n")
FH.write("# Name of the telescope\n")
FH.write("telescope = %s\n" % telescope)
FH.write("\n")
FH.write("# Name of the configuration\n")
FH.write("config = %s\n" % confName)
FH.write("\n")
FH.write("# Latitude of the array centre\n")
FH.write("latitude_deg = %f\n" % latitude_deg)
FH.write("\n")
FH.write("# Antenna diameter\n")
FH.write("diameter_m
|
= %f\n" % diameter_
|
m)
FH.write("\n")
FH.write("# Antenna coordinates (offset E, offset N)\n")
FH.write("%f, %f\n" % (statDict[A1][0], statDict[A1][1]))
FH.write("%f, %f\n" % (statDict[A2][0], statDict[A2][1]))
FH.write("%f, %f\n" % (statDict[A3][0], statDict[A3][1]))
FH.write("%f, %f\n" % (statDict[A4][0], statDict[A4][1]))
FH.write("%f, %f\n" % (statDict[A5][0], statDict[A5][1]))
FH.write("%f, %f\n" % (statDict[A6][0], statDict[A6][1]))
FH.close()
for confName, A1, A2, A3, A4, A5 in zip(col[1], col[2], col[3], col[4],
col[5], col[6]):
if A1=='':
continue
confName += "_No_6"
outFileName = "ATCA_%s.config" % confName
FH = open(outFileName, "w")
FH.write("#" + "-"*78 + "#\n")
FH.write("#\n")
FH.write("# Array definition file for the %s %s configuration.\n"
% (telescope, confName))
FH.write("#\n")
FH.write("#" + "-"*78 + "#\n")
FH.write("\n")
FH.write("# Name of the telescope\n")
FH.write("telescope = %s\n" % telescope)
FH.write("\n")
FH.write("# Name of the configuration\n")
FH.write("config = %s\n" % confName)
FH.write("\n")
FH.write("# Latitude of the array centre\n")
FH.write("latitude_deg = %f\n" % latitude_deg)
FH.write("\n")
FH.write("# Antenna diameter\n")
FH.write("diameter_m = %f\n" % diameter_m)
FH.write("\n")
FH.write("# Antenna coordinates (offset E, offset N)\n")
FH.write("%f, %f\n" % (statDict[A1][0], statDict[A1][1]))
FH.write("%f, %f\n" % (statDict[A2][0], statDict[A2][1]))
FH.write("%f, %f\n" % (statDict[A3][0], statDict[A3][1]))
FH.write("%f, %f\n" % (statDict[A4][0], statDict[A4][1]))
FH.write("%f, %f\n" % (statDict[A5][0], statDict[A5][1]))
FH.close()
#-----------------------------------------------------------------------------#
main()
|
vaniakosmos/memes-reposter
|
apps/rss/admin.py
|
Python
|
mit
| 1,262
| 0.001585
|
from datetime import timedelta
from django.contrib import admin
from django.db.models import Case, Value, When
from django.utils import timezone
from .models import Channel, Post, RssFeed
@admin.register(Channel)
class ChannelAdmin(admin.ModelAdmin):
list_display = ('__str__', 'title', 'username', 'publish_picture', 'linked_title', 'short_link')
change_list_template = "rss/actions.html"
@admin.register(RssFeed)
class RssFeedAdmin(admin.ModelAdmin):
list_display = ('__str__', 'channel', 'link', 'active')
actions = ('activate', 'deactivate', 'toggle_active')
def activate(self, request, queryset):
qu
|
eryset.update(active=True)
def deactivate(self, request, queryset):
queryset.update(active=False)
def toggle_active(self, request, queryset):
queryset.update(active=Case(When(active=True, then=Value(False)), default=Value(True)))
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'feed', 'link', 'created', 'older_then_five_days')
ordering = ('-created',)
def older_then_five
|
_days(self, post: Post):
five_days_before = timezone.now() - timedelta(days=5)
return post.created < five_days_before
older_then_five_days.boolean = True
|
JohnCEarls/AUREA
|
scripts/testScripts/testTFIDF.py
|
Python
|
agpl-3.0
| 1,566
| 0.009579
|
from tfidf import *
import psycopg2
import psycopg2.extensions
import math
def cos_sim(A,B):
def dot_product(a,b):
sum = 0.0
for key in a.keys():
if key in b:
sum += a[key]*b[key]
return sum
return dot_product(A,B)/(math.sqrt(dot_product(A,A)) * math.sqrt(dot_product(B,B))
|
)
conn = psycopg2.connect("host=localhost dbname=SOFTFile user=AUREA password=AUREA")
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
c = conn.cursor()
qry = "SELECT dataset_id, dataset_title, dataset_description \
FROM dataset"
#WHERE dataset_id < 20"
c.execute(qry)
documentList = []
documentNumber = 0
docMap = []
for id,title, description in c.fetchall():
documentList.append(title + description)
docMap.append(id)
c
|
.close()
vectors = []
print "gotDocs"
for x in range(len(documentList)):
words = {}
for word in documentList[documentNumber].split(None):
words[word] = tfidf(word,documentList[documentNumber],documentList)
#for item in sorted(words.items(), key=itemgetter(1), reverse=True):
# print "%f <= %s" % (item[1], item[0])
vectors.append(words)
documentNumber = x+1
print "got vectors"
sim = []
for i in range(len(vectors[:-1])):
for j in range(i+1, len(vectors)):
sim = cos_sim(vectors[i], vectors[j])
db_id1 = docMap[i]
db_id2 = docMap[j]
qry = "INSERT into cosine_similarity(id1, id2, score) VALUES (%s, %s, %s)"
c = conn.cursor()
c.execute(qry, (db_id1, db_id2, sim))
c.close()
|
Tomcuzz/OctaHomeAutomation
|
Api/ciscophone.py
|
Python
|
mit
| 3,020
| 0.036755
|
from django.shortcuts import render
from SharedFunctions.models import *
from Lights.models import *
def HandlePhoneRequest(request):
area = request.GET.get('area', 'None')
if area == 'None':
return PhoneHomePage(request)
elif area == 'lights':
if request.
|
GET.get('room', 'None') != 'None':
if request.GET.get('light', 'None') != 'None':
if request.GET.get('command', 'None') != 'None':
return PhoneLightSetRGBPage(request)
else:
return PhoneLightPage(request)
else:
return PhoneLightsPage(request)
else:
return PhoneLightsRoomPage(request)
else:
return PhoneHomePage(request)
def PhoneHomePage(request):
items = [{'title':'Lights', 'address':'?page=ciscophone&area=lights'},
{'title':'Alarm', 'address':'?page=cis
|
cophone&area=alarm'},
{'title':'Temperature', 'address':'?page=ciscophone&area=temp'}]
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Service'}, content_type="text/xml")
def PhoneLightsRoomPage(request):
items = [{'title':'All Rooms', 'address':'?page=ciscophone&area=lights&room=allrooms'}]
for room in Rooms.objects.all():
items.append({'title':room.Name.replace("_", " "), 'address':'?page=ciscophone&area=lights&room=' + str(room.id)})
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Service'}, content_type="text/xml")
def PhoneLightsPage(request):
items = []
room = request.GET.get('room', 'None')
if room == 'allrooms':
lights = Lights.objects.all()
else:
theRoom = Rooms.objects.get(id=int(room))
lights = Lights.objects.filter(Room=theRoom)
for light in lights:
items.append({'title':light.LightName.replace("_", " "), 'address':'?page=ciscophone&area=lights&room=' + str(room) + '&light=' + str(light.id)})
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Light', 'softkey1':'test'}, content_type="text/xml")
def PhoneLightPage(request):
light = request.GET.get('light', 'None')
items = [{'title':'Toggle Light', 'address':'?page=ciscophone&area=lights&room=allrooms&light=' + light + '&command=toggle'},
{'title':'Set RGB Values', 'address':'?page=ciscophone&area=lights&room=allrooms&light=' + light + '&command=setrgb'},
{'title':'Select Scene', 'address':'?page=ciscophone&area=lights&room=allrooms&light=' + light + '&command=selectscene'}]
return render(request, 'OctaHomeApi/PhoneMenu.html', {'Items':items, 'Prompt':'Please Select A Service'}, content_type="text/xml")
def PhoneLightSetRGBPage(request):
items = [{'DisplayName':'Set Red Value', 'QueryStringParam':'r', 'DefaultValue':'255', 'InputFlag':'N'},
{'DisplayName':'Set Green Value', 'QueryStringParam':'g', 'DefaultValue':'255', 'InputFlag':'N'},
{'DisplayName':'Set Blue Value', 'QueryStringParam':'b', 'DefaultValue':'255', 'InputFlag':'N'}]
return render(request, 'OctaHomeApi/PhoneValueSet.html', {'Items':items, 'Prompt':'Please Select A Service', 'Url':'setrgb.xml'}, content_type="text/xml")
|
dtschan/weblate
|
weblate/accounts/tests/test_models.py
|
Python
|
gpl-3.0
| 1,853
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user models.
"""
from django.contrib.auth.models import User, Group
from django.test import TestCase
from weblate.accounts.models import AutoGroup
class AutoGroupTest(TestCase):
@staticmethod
def create_user():
return User.objects.create_user('test1', 'noreply@weblate.org', 'pass')
def test_default(self):
user = self.create_user()
|
self.assertEqual(user.groups.count(), 1)
def test_none(self):
AutoGr
|
oup.objects.all().delete()
user = self.create_user()
self.assertEqual(user.groups.count(), 0)
def test_matching(self):
AutoGroup.objects.create(
match='^.*@weblate.org',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 2)
def test_nonmatching(self):
AutoGroup.objects.create(
match='^.*@example.net',
group=Group.objects.get(name='Guests')
)
user = self.create_user()
self.assertEqual(user.groups.count(), 1)
|
burmanm/gather_agent
|
gather_agent/gather_agent.py
|
Python
|
apache-2.0
| 5,283
| 0.005111
|
import Queue
import handlers
import inspect
import threading
import pkgutil
import os
import sys
import imp
from gatherer import Gatherer
import signal
import platform
import ConfigParser
class GatherAgent(object):
"""
A simple layer between inputs (gatherers) and output (handler) using a simple
implementation of reactor pattern.
"""
KEY_SEPARATOR = '.'
def start(self, config_file='gather_agent.ini'):
"""
Initialization method of the GatherAgent. Sets up required queues, arses
the configuration, loads gatherers and handler and starts the dispatcher.
"""
self.q = Queue.Queue()
self.gatherers = []
# Load configuration properties
config = ConfigParser.ConfigParser()
config.read(config_file)
config.set('Gatherers', 'prefix', platform.node())
self.config = config
# Start gatherers and handlers..
self.handler = self.start_handler(config.get('General', 'handler'))
self.start_gatherers(self.load_gatherers(), self.handler)
signal.signal(signal.SIGINT, self._stop)
self.active = True
self.loop()
def start_handler(self, handler_cls):
handler_generic_config =
|
self.load_partial_config('Handlers')
handler_specific_config = self.load_partial_config('Handlers', handler_cls)
handler_specific_config.update(handler_generic_config
|
)
for o, _ in self.load_classes_list('handlers'):
if o.__name__ == handler_cls:
obj = o(handler_specific_config)
return obj
def start_gatherers(self, instances, handler):
"""
Creates new threads for each gatherer running the gatherer's run() method
"""
for instance in instances:
t = threading.Thread(target=instance.run)
t.daemon = True
t.start()
self.gatherers.append(instance)
def loop(self):
"""
Main dispatcher loop which waits for available objects in the queue. Once
an event is received, it calls event's handler and waits for results before
processing the next event.
"""
while self.active:
event = self.q.get()
event.handle()
def load_partial_config(self, section, keyprefix=None):
"""
Parses a partial configuration from the ini-file, filtering any key that
isn't defined by the keyprefix. If no keyprefix is given, filters all the
properties that are namespaced with dot (.)
"""
section_config = self.config.items(section)
partial_config = {}
for k, v in section_config:
d = None
if keyprefix is not None:
keyprefix = keyprefix.lower()
i = k.rfind(keyprefix + self.KEY_SEPARATOR)
if i > -1:
d = { k: v }
else:
i = k.rfind(self.KEY_SEPARATOR)
if i < 0:
d = { k: v }
if d is not None:
partial_config.update(d)
return partial_config
def load_handlers_config(self, class_name):
handlers_config = self.load_partial_config('Handlers', class_name)
return handlers_config
def load_gatherers_config(self, class_name):
generic_gatherer_config = self.load_partial_config('Gatherers')
specific_gatherer_config = self.load_partial_config('Gatherers', class_name)
generic_gatherer_config.update(specific_gatherer_config)
return generic_gatherer_config
def load_classes_list(self, package):
"""
Loads all classes from the given package. Returns a generator with two
parameters, class_name and the module
"""
path = os.path.join(os.path.dirname(__file__), package)
modules = pkgutil.iter_modules(path=[path])
for _, module_name, _ in modules:
fp, pathname, description = imp.find_module(module_name, [path])
module = imp.load_module(module_name, fp, pathname, description)
for name in dir(module):
o = getattr(module, name)
if inspect.isclass(o):
yield o, name
def load_gatherers(self):
"""
Creates and returns a generator with one instance of each gatherers
object.
"""
for o, name in self.load_classes_list('gatherers'):
if issubclass(o, Gatherer) and o is not Gatherer:
partial_config = self.load_gatherers_config(name)
obj = o(self.handler, partial_config, self.q)
yield obj
def _stop(self, signum, frame):
"""
If a signal is received from the OS, this method is used to clean up and
stop all the gatherers and handlers.
"""
print 'Received signal ' + str(signum) + ', closing gatherers and handlers'
self.active = False
for i in self.gatherers:
i.close()
self.handler.close()
if __name__ == "__main__":
g = GatherAgent()
if len(sys.argv) > 1:
g.start(sys.argv[1])
else:
g.start()
|
akhmadMizkat/odoo
|
addons/project_issue_sheet/__openerp__.py
|
Python
|
gpl-3.0
| 850
| 0.002353
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Timesheet on Issues',
'version': '1.0',
'category': 'Project Management',
|
'description': """
This module adds the Timesheet support for the Issues/Bugs Management in Project.
=================================================================================
Worklogs can be maintained to signify number of hours spent by users to handle an issue.
""",
'website': 'https://www.odoo.com/page/project-management',
'depends': [
'project_issue',
'hr_timesheet_sheet',
],
'data'
|
: [
'project_issue_sheet_view.xml',
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/nntplib.py
|
Python
|
lgpl-2.1
| 44,234
| 0.000701
|
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date meth
|
ods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data
|
(using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes a unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to an OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be dat
|
hale36/SRTV
|
sickbeard/providers/strike.py
|
Python
|
gpl-3.0
| 3,919
| 0.003572
|
# Author: matigonkas
# URL: https://github.com/SiCKRAGETV/sickrage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
class STRIKEProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Strike")
self.supportsBacklog = True
self.public = True
self.url = 'https://getstrike.net/'
self.ratio = 0
self.cache = StrikeCache(self)
|
self.minseed, self.minleech = 2 * [None]
def isEnabled(self):
return self.enabled
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys(): #Mode = RSS, Season, Episode
|
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: " + search_string.strip(), logger.DEBUG)
searchURL = self.url + "api/v2/torrents/search/?category=TV&phrase=" + search_string
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
jdata = self.getURL(searchURL, json=True)
if not jdata:
logger.log("No data returned from provider", logger.DEBUG)
return []
results = []
for item in jdata['torrents']:
seeders = ('seeds' in item and item['seeds']) or 0
leechers = ('leeches' in item and item['leeches']) or 0
title = ('torrent_title' in item and item['torrent_title']) or ''
size = ('size' in item and item['size']) or 0
download_url = ('magnet_uri' in item and item['magnet_uri']) or ''
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
item = title, download_url, size, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class StrikeCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# set this 0 to suppress log line, since we aren't updating it anyways
self.minTime = 0
def _getRSSData(self):
# no rss for getstrike.net afaik, also can't search with empty string
return {'entries': {}}
provider = STRIKEProvider()
|
vritant/subscription-manager
|
test/test_lock.py
|
Python
|
gpl-2.0
| 6,575
| 0.000608
|
import os
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from subscription_manager import lock
class TestLock(unittest.TestCase):
lf_name = "lock.file"
def setUp(self):
self.tmp_dir = self._tmp_dir()
self.other_process = None
def _tmp_dir(self):
tmp_dir = tempfile.mkdtemp(suffix="lock", prefix="subman-unit-tests-")
return tmp_dir
def _lock_path(self):
tmp_dir = self._tmp_dir()
return os.path.join(tmp_dir, self.lf_name)
# For thread.Timer()
def _kill_other_process(self, other_process):
self.fail("nothing happened before we timed out.")
# die die die
other_process.terminate()
other_process.kill()
self.timer.cancel()
def _grab_lock_from_other_pid(self, lockfile_path,
other_process_timeout=None,
acquire_timeout=None):
# klugey
other_process_timeout = other_process_timeout or 3.0
acquire_timeout = acquire_timeout or 5.0
sys_path = os.path.join(os.path.dirname(__file__), "../src")
self.other_process = subprocess.Popen(["/usr/bin/python", __file__, lockfile_path],
close_fds=True,
stdin=subprocess.PIPE,
env={'PYTHONPATH': sys_path})
#lock_path = os.path.join(self.tmp_dir, 'lock.file')
# make sure other process has had time to create the lock file
while True:
lock_exists = os.path.exists(lockfile_path)
if lock_exists:
break
time.sleep(0.05)
# in another thread, wait 3 seconds, then send 'whatever' to stdin of
# other process so it closes. A timeout...
def wait_for_pid(timer):
time.sleep(other_process_timeout)
self.close_lock_holder()
timer.cancel()
timer = threading.Timer(acquire_timeout, self.timeout_fail)
op_thread = threading.Thread(target=wait_for_pid, args=[timer])
op_thread.start()
return op_thread
def close_lock_holder(self):
try:
self.other_process.communicate("whatever")
except Exception, e:
print e
# whatever, we closed it in the other thread
def timeout_fail(self):
self.close_lock_holder()
self.fail("timeoutsdfsdf")
def test_two_pids_blocking_none_blocks(self):
lock_path = self._lock_path()
# start a different proc that holds the lock, that times out after 3
self._grab_lock_from_other_pid(lock_path, 1.0, 0.2)
b = lock.Lock(lock_path)
res = b.acquire()
self.assertTrue(res is None)
def test_two_pids_blocking_none(self):
lock_path = self._lock_path()
# start a different proc that holds the lock, that times out after 3
self._grab_lock_from_other_pid(lock_path, 0.2, 1.0)
b = lock.Lock(lock_path)
res = b.acquire()
self.assertTrue(b.acquired())
self.assertTrue(res is None)
def test_two_pids_blocking_true(self):
lock_path = self._lock_path()
# start a different proc that holds the lock, that times out after 3
self._grab_lock_from_other_pid(lock_path, 0.2, 1.0)
b = lock.Lock(lock_path)
res = b.acquire(blocking=True)
self.assertTrue(b.acquired())
self.assertTrue(res)
def test_two_pids_blocking_false(self):
lock_path = self._lock_path()
self._grab_lock_from_other_pid(lock_path, 0.2, 1.0)
b = lock.Lock(lock_path)
res = b.acquire(blocking=False)
self.assertFalse(b.acquired())
self.other_process.communicate("whatever")
self.assertFalse(res)
def test_lock(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
self.assertEquals(lf.path, lock_path)
self.assertEquals(lf.depth, 0)
def test_lock_acquire(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
res = lf.acquire()
# given no args, acquire() blocks or returns None
self.assertEquals(res, None)
def test_lock_acquire_blocking_true(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=True)
# acquire(blocking=True) will block or return True
self.assertTrue(res)
def test_lock_acquire_blocking_false(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=False)
# res of False indicates lock could not be acquired without blocking
# True indicates lock was acquired
self.assertTrue(res)
def test_lock_release(self):
lock_path = self._lock_path()
lf = lock.Lock(lock_path)
lf.acquire()
lf.release()
def _stale_lock(self):
lock_path = self._lock_path()
fakepid = 123456789
f = open(lock_path, 'w')
f.write('%s\n' % fakepid)
f.close()
return lock_path
def test_lock_acquire_stale_pid(self):
lock_path = self._stale_lock()
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=True)
self.assertTrue(res)
def test_lock_acquire_stale_pid_nonblocking(self):
lock_path = self._stale_lock()
|
lf = lock.Lock(lock_path)
res = lf.acquire(blocking=False)
self.assertTrue(res)
# always blocks, needs eventloop/threads
# def test_lock_drive_full_blocking(self):
# lock_path = "/dev/full"
# lf = lock.Lock(lock_path)
# res = lf.acquire(blocking=True)
# log.debug(res)
# FIXME: the lockfile creation fails on /dev/full
# def test_lock_drive_full_nonblocking(self):
# lock_path = "/dev/full"
# lf = lock.Lock(lock_path)
# res = lf.acquire(
|
blocking=False)
# self.assertFalse(res)
# run this module's main in a subprocess to grab a lock from a different
# pid.
def main(args):
lock_file_path = args[1]
test_lock = lock.Lock(lock_file_path)
# could return a useful value, so the thread communicating with
# it could notice it couldn't get the lock
res = test_lock.acquire(blocking=False)
if res is False:
return 128
# exit on any stdin input
for line in sys.stdin.readlines():
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[:]))
|
MeanEYE/Sunflower
|
sunflower/icons.py
|
Python
|
gpl-3.0
| 3,797
| 0.025283
|
from __future__ import absolute_import
from builtins import filter
import os
import sys
import zipfile
from gi.repository import Gtk, Gio, GdkPixbuf, GLib
from sunflower.common import UserDirectory, get_user_directory, get_static_assets_directory
class IconManager:
"""Icon manager class provides easy and abstract way of dealing with icons"""
def __init__(self, parent):
self._parent = parent
self._icon_theme = Gtk.IconTheme.get_default()
self._user_directories = None
self._default_file = None
self._default_directory = None
# preload information
self._prepare_icons()
def _prepare_icons(self):
"""Load special user directories"""
# set default icons for file and directory
self._default_file = 'text-x-generic'
self._default_directory = 'folder'
# special user directories
directories = []
icon_names = {
UserDirectory.DESKTOP: 'user-desktop',
UserDirectory.DOWNLOADS: 'folder-download',
UserDirectory.TEMPLATES: 'folder-templates',
UserDirectory.PUBLIC: 'folder-publicshare',
UserDirectory.DOCUMENTS: 'folder-documents',
UserDirectory.MUSIC: 'folder-music',
UserDirectory.PICTURES: 'folder-pictures',
UserDirectory.VIDEOS: 'folder-videos'
}
# add all directories
for directory in icon_names:
full_path = get_user_directory(directory)
icon_name = icon_names[directory]
# make sure icon exists
if not self.has_icon(icon_name):
icon_name = self._default_directory
directories.append((full_path, icon_name))
# add user home directory
if self.has_icon('user-home'):
directories.append((os.path.expanduser('~'), 'user-home'))
# create a dictionary
self._user_directories = dict(directories)
def has_icon(self, icon_name):
"""Check if icon with specified name exists in theme"""
return self._icon_theme.has_icon(icon_name)
def get_icon_sizes(self, icon_name):
"""Get icon sizes for specified name"""
return self._icon_theme.get_icon_sizes(icon_name)
def get_icon_for_file(self, filename):
"""Load icon for specified file"""
result = self._default_file
mime_type = self._parent.associations_manager.get_mime_type(filename)
themed_icon = None
# get icon names
if mime_type is not None:
themed_icon = Gio.content_type_get_icon(mime_type)
# get only valid icon names
if themed_icon is not None:
icon_list = themed_icon.get_names()
icon_list = list(filter(self.has_icon, icon_list))
if len(icon_list) > 0:
result = icon_list[0]
return result
def get_icon_for_directory(self, path):
"""Get icon for specified directory"""
result = self._default_directory
if path in self._user_directories:
result = self._user_directories[path]
return result
def get_mount_icon_name(self, icons):
"""Return existing icon name from the specified list"""
result = 'drive-
|
harddisk'
# create a list of icons and f
|
ilter non-existing
icon_list = icons.split(' ')
icon_list = list(filter(self.has_icon, icon_list))
# if list has items, grab first
if len(icon_list) > 0:
result = icon_list[0]
return result
def set_window_icon(self, window):
"""Set window icon"""
# check system for icon
if self.has_icon('sunflower'):
window.set_icon(self._icon_theme.load_icon('sunflower', 256, 0))
# try loading from zip file
elif os.path.isfile(sys.path[0]) and sys.path[0] != '':
archive = zipfile.ZipFile(sys.path[0])
with archive.open('images/sunflower.svg') as raw_file:
buff = Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new(raw_file.read()))
icon = GdkPixbuf.Pixbuf.new_from_stream(buff, None)
window.set_icon(icon)
archive.close()
# load from local path
else:
base_path = get_static_assets_directory()
window.set_icon_from_file(os.path.join(base_path, 'images', 'sunflower.svg'))
|
r2k0/flask-apps
|
mega-tut/app/views.py
|
Python
|
mit
| 2,555
| 0.005479
|
"""
the handlers that respond to requests from browsers or clients.
Each view function is mapped to one or more request URLs.
"""
from flask import render_template, flash, redirect, session, url_for, reqeust, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm
from models import User, ROLE_USER, ROLE_ADMIN
#the two decorators create mappings from URLs / and /index to this function
@app.route('/')
@app.route('/index')
@login_required
def index():
user = {'nickname': 'Okkar'}
posts = [
{
'author': {'nickname': 'Max'},
'body': 'Golden Gate Bridge!'
},
{
'author': {'nickname': 'Pan'},
|
'body': 'I want bacon!'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect
|
(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.before_request
def before_request():
g.user = current_user
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
|
datyayu/cemetery
|
[Python][Django] Rest framework tutorial/snippets/urls.py
|
Python
|
mit
| 314
| 0
|
from django.conf.urls import url, in
|
clude
from snippets.views import SnippetViewSet, UserViewSet
from rest_f
|
ramework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'snippets', SnippetViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
|
OpenParking/Open-Parkinig---Edison
|
source/touh.py
|
Python
|
gpl-2.0
| 1,004
| 0.000996
|
import time
import pyupm_ttp223 as ttp223
import requests
import json
url = "http://requestb.in/1mj62581?inspect"
headers = {'content-type': 'application/json'}
touch1 = ttp223.TTP223(4)
touch1Pressed = False
touch2 = ttp223.TTP223(8)
touch2Pressed = False
def sendInfo(touch, tId, Pressed):
if touch.isPressed():
i
|
f not Pressed:
print "Send Info"
Pressed = True
data = {"Id": "AI", "Espacio": tId, "Disponible": False}
data = json.dumps(data)
requests.post(url, params=data, headers=headers)
else:
if Pressed:
print "Send Info"
Pressed = False
data = {"Id": "AI", "Espacio": tId, "Disponible": True}
data = json.dumps(data)
requests.post(url, params=data, headers=heade
|
rs)
return Pressed
while True:
touch1Pressed = sendInfo(touch1, 1, touch1Pressed)
touch2Pressed = sendInfo(touch2, 2, touch2Pressed)
time.sleep(1)
del touch1
del touch2
|
abilian/abilian-core
|
src/abilian/web/tags/__init__.py
|
Python
|
lgpl-2.1
| 244
| 0
|
""""
|
""
from __future__ import annotations
from flask import Flask
from .criterion import TagCriterion
from .extension import TagsExtension
__all__ = ["TagsExtension", "TagCriterion"]
def register_plugin(app: Flask):
Tag
|
sExtension(app)
|
mariosky/evo-drawings
|
venv/lib/python2.7/site-packages/django/db/backends/__init__.py
|
Python
|
agpl-3.0
| 47,905
| 0.001232
|
import datetime
import time
from django.db.utils import DatabaseError
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
from collections import namedtuple
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.signals import connection_created
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseErrorWrapper
from django.utils.functional import cached_property
from django.utils.importlib import import_module
from django.utils import six
from django.utils import timezone
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
|
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor =
|
None
# Savepoint management related attributes
self.savepoint_state = 0
# Transaction management related attributes
self.autocommit = False
self.transaction_state = []
# Tracks if the connection is believed to be in transaction. This is
# set somewhat aggressively, as the DBAPI doesn't make it easy to
# deduce if the connection is in transaction or not.
self._dirty = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# List of savepoints created by 'atomic'
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = thread.get_ident()
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alias)
##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
raise NotImplementedError
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
raise NotImplementedError
def init_connection_state(self):
"""Initializes the database connection settings."""
raise NotImplementedError
def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError
##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
if self.settings_dict['AUTOCOMMIT']:
self.set_autocommit(True)
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors:
return self.create_cursor()
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
self.set_clean()
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
self.set_clean()
def close(self):
"""
Closes the connection to the database.
"""
self.validate_thread_sharing()
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
self.set_clean()
##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
##### Generic savepoint management methods #####
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing i
|
xTVaser/Schoolwork-Fall-2016
|
Thesis/Parser/main.py
|
Python
|
gpl-3.0
| 526
| 0.001901
|
import easygui
from os import listdir
from os.path import isfile, join
# Import Custom Libraries
from libs.requestStripper import *
file_path = "/home/tyler/Documents/Thesis Testing"
print(file_path)
files = [f for f in listd
|
ir(file_path) if isfile(join(file_path, f))]
for i, value in enumerate(files):
files[i] = fi
|
le_path + "/" + files[i]
print(files)
lines = []
for f in files:
gatherStrings(lines, f)
newFile = open(file_path+"/"+"compiledRequests", "w")
exportFile(lines, newFile)
newFile.close()
|
Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86
|
usr/pkg/lib/python2.7/shutil.py
|
Python
|
mit
| 18,302
| 0.002186
|
"""Utility functions for copying and archiving files and directory trees.
XXX The functions
|
here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except Imp
|
ortError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError, why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error, err:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file
|
ua-snap/downscale
|
old/old_bin/downscaling_launcher.py
|
Python
|
mit
| 3,849
| 0.043128
|
#!/usr/bin/python2
import os, glob
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs'
|
)
base_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
output_base_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/downscaled'
cru_base_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts20/akcan'
for root, dirs, files in os.walk( base_dir ):
if files:
path,
|
variable = os.path.split( root )
path, model = os.path.split( path )
# this gets rid of any .xml or .txt files that may be living amongst the NetCDF's
files = [ fn for fn in files if fn.endswith( '.nc' ) ]
for fn in files:
print 'running %s' % fn
# split out the sub_dirs to have both model_name and variable folder hierarchy
# from the prepped folder directory
output_dir = os.path.join( output_base_dir, model, variable )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# anomalies calculation type and cru input path condition
if 'tas_' in os.path.basename( fn ):
anomalies_calc_type = 'absolute'
downscale_operation = 'add'
cru_path = os.path.join( cru_base_dir, 'tas' )
elif 'hur_' in os.path.basename( fn ):
anomalies_calc_type = 'proportional'
downscale_operation = 'mult'
cru_path = os.path.join( cru_base_dir, 'hur' )
plev = 1000
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
# condition to determine if we need to read in the historical dataset with the modeled for
# anomalies calculation
if 'historical' in fn:
# run with only the historical file
dates = os.path.basename( fn ).strip( '.nc' ).split( '_' )
dates = dates[ len( dates )-2 ], dates[ len( dates )-1 ]
begin_time, end_time = [ '-'.join([ i[:4], i[4:] ]) for i in dates ]
if 'tas_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -hi ' + os.path.join( root, fn ) + ' -o ' + output_dir + ' -bt ' + begin_time + \
' -et ' + end_time + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
elif 'hur_' in fn:
# run with only the historical file
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -hi ' + os.path.join( root, fn ) + ' -o ' + output_dir + ' -bt ' + begin_time + \
' -et ' + end_time + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -plev ' + str(plev) + ' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
else:
# grab the historical file from that particular folder
historical_fn = glob.glob( os.path.join( root, '*historical*.nc' ) )[0]
# run with both historical and modeled files for anomalies calc.
if 'tas_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -mi ' + os.path.join( root, fn ) + ' -hi ' + historical_fn + ' -o ' + output_dir + \
' -bt ' + '2006-01' + ' -et ' + '2100-12' + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
elif 'hur_' in fn:
os.system( 'python hur_ar5_model_data_downscaling.py' + ' -mi ' + os.path.join( root, fn ) + ' -hi ' + historical_fn + ' -o ' + output_dir + \
' -bt ' + '2006-01' + ' -et ' + '2100-12' + ' -cbt ' + '1961-01' + ' -cet ' + '1990-12' + ' -plev ' + str(plev) + \
' -cru ' + cru_path + ' -at ' + anomalies_calc_type + ' -m ' + 'mean' + ' -dso ' + downscale_operation )
else:
NotImplementedError( "only 'hur' & 'tas' have been implemented here" )
|
50wu/gpdb
|
gpMgmt/bin/gppylib/test/unit/test_unit_compare_segment_guc.py
|
Python
|
apache-2.0
| 8,167
| 0.003061
|
from mock import *
from .gp_unittest import *
from gpconfig_modules.compare_segment_guc import MultiValueGuc
from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc
from gpconfig_modules.file_segment_guc import FileSegmentGuc
class CompareSegmentGucTest(GpTestCase):
def setUp(self):
row = ['contentid', 'guc_name', 'file_value', "dbid"]
self.file_seg_guc = FileSegmentGuc(row)
row = ['contentid', 'guc_name', 'sql_value']
self.db_seg_guc = DatabaseSegmentGuc(row)
self.subject = MultiValueGuc(self.file_seg_guc, self.db_seg_guc)
def test_init_when_comparison_guc_supplied(self):
row = ['contentid', 'guc_name', 'file_value', "diff_dbid"]
file_seg_guc = FileSegmentGuc(row)
old = self.subject
self.subject = MultiValueGuc(self.subject, file_seg_guc)
self.assertEqual(self.subject.db_seg_guc, old.db_seg_guc)
self.assertEqual(self.subject.primary_file_seg_guc, old.primary_file_seg_guc)
self.assertEqual(self.subject.mirror_file_seg_guc, file_seg_guc)
def test_init_with_wrong_content_id_raises(self):
row = ['contentid', 'guc_name', 'file_value', "dbid"]
file_seg_guc = FileSegmentGuc(row)
row = ['different', 'guc_name', 'sql_value']
db_seg_guc = DatabaseSegmentGuc(row)
with self.assertRaisesRegex(Exception, "Not the same context"):
MultiValueGuc(file_seg_guc, db_seg_guc)
def test_init_handles_both_orders(self):
self.assertEqual(self.file_seg_guc, self.subject.primary_file_seg_guc)
self.assertEqual(self.db_seg_guc, self.subject.db_seg_guc)
self.assertTrue(isinstance(self.subject.primary_file_seg_guc, FileSegmentGuc))
self.assertTrue(isinstance(self.subject.db_seg_guc, DatabaseSegmentGuc))
self.subject = MultiValueGuc(self.db_seg_guc, self.file_seg_guc)
self.assertEqual(self.file_seg_guc, self.subject.primary_file_seg_guc)
self.assertEqual(self.db_seg_guc, self.subject.db_seg_guc)
self.assertTrue(isinstance(self.subject.primary_file_seg_guc, FileSegmentGuc))
self.assertTrue(isinstance(self.subject.db_seg_guc, DatabaseSegmentGuc))
def test_init_when_none_raises(self):
with self.assertRaisesRegex(Exception, "comparison requires two gucs"):
self.subject = MultiValueGuc(self.db_seg_guc, None)
with self.assertRaisesRegex(Exception, "comparison requires two gucs"):
self.subject = MultiValueGuc(None, self.db_seg_guc)
def test_report_fail_format_for_database_and_file_gucs(self):
|
self.assertEqual(self.subject.report_fail_format(),
["[context: contentid] [dbid: dbid] [name: guc_name] [value: sql_value | file: file_value]"])
def test_report_fail_format_file_segment_guc_only(self):
self.subject.db_seg_guc = None
row = ['contentid', 'guc_name', 'primary_value', "dbid1"]
self.subject.set_primary_file_segment(FileSegmentGuc(row))
row = ['cont
|
entid', 'guc_name', 'mirror_value', "dbid2"]
self.subject.set_mirror_file_segment(FileSegmentGuc(row))
self.assertEqual(self.subject.report_fail_format(),
["[context: contentid] [dbid: dbid1] [name: guc_name] [value: primary_value]",
"[context: contentid] [dbid: dbid2] [name: guc_name] [value: mirror_value]"])
def test_when_segment_report_success_format(self):
self.assertEqual(self.subject.report_success_format(),
"Segment value: sql_value | file: file_value")
def test_when_values_match_report_success_format_file_compare(self):
self.subject.db_seg_guc.value = 'value'
self.subject.primary_file_seg_guc.value = 'value'
self.assertEqual(self.subject.report_success_format(), "Segment value: value | file: value")
def test_is_internally_consistent_fails(self):
self.assertEqual(self.subject.is_internally_consistent(), False)
def test_is_internally_consistent_when_file_value_is_none_succeeds(self):
self.file_seg_guc.value = None
self.assertEqual(self.subject.is_internally_consistent(), True)
def test_is_internally_consistent_when_primary_is_same_succeeds(self):
self.subject.primary_file_seg_guc.value = "sql_value"
self.assertEqual(self.subject.is_internally_consistent(), True)
def test_is_internally_consistent_when_mirror_is_different_fails(self):
self.subject.primary_file_seg_guc.value = "sql_value"
row = ['contentid', 'guc_name', 'diffvalue', "dbid1"]
self.subject.set_mirror_file_segment(FileSegmentGuc(row))
self.assertEqual(self.subject.is_internally_consistent(), False)
def test_is_internally_consistent_with_quotes_and_escaping(self):
cases = [
{'file_value': "'value'", 'db_value': 'value'},
{'file_value': "''", 'db_value': ''},
{'file_value': "'\\n\\r\\b\\f\\t'", 'db_value': '\n\r\b\f\t'},
{'file_value': "'\\0\\1\\2\\3\\4\\5\\6\\7'", 'db_value': '\0\1\2\3\4\5\6\7'},
{'file_value': "'\\8'", 'db_value': '8'},
{'file_value': "'\\01\\001\\377\\777\\7777'", 'db_value': '\x01\x01\xFF\xFF\xFF7'},
]
for case in cases:
file_seg_guc = FileSegmentGuc(['contentid', 'guc_name', case['file_value'], "dbid"])
db_seg_guc = DatabaseSegmentGuc(['contentid', 'guc_name', case['db_value']])
subject = MultiValueGuc(file_seg_guc, db_seg_guc)
error_message = "expected file value: %r to be equal to db value: %r" % (case['file_value'], case['db_value'])
self.assertEqual(subject.is_internally_consistent(), True, error_message)
def test_is_internally_consistent_when_there_is_no_quoting(self):
cases = [
{'file_value': "value123", 'db_value': 'value123'},
{'file_value': "value-._:/", 'db_value': 'value-._:/'},
]
for case in cases:
file_seg_guc = FileSegmentGuc(['contentid', 'guc_name', case['file_value'], "dbid"])
db_seg_guc = DatabaseSegmentGuc(['contentid', 'guc_name', case['db_value']])
subject = MultiValueGuc(file_seg_guc, db_seg_guc)
error_message = "expected file value: %r to be equal to db value: %r" % (case['file_value'], case['db_value'])
self.assertEqual(subject.is_internally_consistent(), True, error_message)
def test_is_internally_consistent_when_gucs_are_different_returns_false(self):
file_seg_guc = FileSegmentGuc(['contentid', 'guc_name', "'hello", "dbid"])
db_seg_guc = DatabaseSegmentGuc(['contentid', 'guc_name', "hello"])
subject = MultiValueGuc(file_seg_guc, db_seg_guc)
self.assertFalse(subject.is_internally_consistent())
def test__unquote(self):
cases = [
('hello', 'hello'),
("''", ''),
("'hello'", 'hello'),
("'a\\b\\f\\n\\r\\tb'", 'a\b\f\n\r\tb'),
("'\\0\\1\\2\\3\\4\\5\\6\\7\\8\\9'", '\0\1\2\3\4\5\6\789'),
("'\\1\\01\\001\\0001'", '\x01\x01\x01\x001'),
("'\\1a1'", '\x01a1'),
("'\\377\\400\\776\\7777'", '\xFF\x00\xFE\xFF7'),
("''''", "'"),
]
for quoted, unquoted in cases:
self.assertEqual(MultiValueGuc._unquote(quoted), unquoted)
def test__unquote_failure_cases(self):
cases = [
"'hello",
"",
"'",
"'hello\\'",
"'hel'lo'",
"'''",
]
for quoted in cases:
with self.assertRaises(MultiValueGuc.ParseError):
MultiValueGuc._unquote(quoted)
def test_set_file_segment_succeeds(self):
row = ['contentid', 'guc_name', 'file_value', "diff_dbid"]
file_seg_guc = FileSegmentGuc(row)
self.subject.set_mirror_file_segment(file_seg_guc)
self.assertEqual(self.subject.mirror_file_seg_guc, file_seg_guc)
def test_get_value_returns_unique(self):
self.assertEqual(self.subject.get_value(), "sql_value||file_value")
|
havardgulldahl/perpetual-yearcal
|
models.py
|
Python
|
mit
| 1,783
| 0.011223
|
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 Håvard Gulldahl
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# appengine stuff
from google.appengine.ext import ndb
class Color(ndb.Model):
foreground = ndb.StringProperty()
ba
|
ckground = ndb.StringProperty()
colorId = ndb.StringProperty()
category = ndb.StringProperty() # 'calendar' or 'event'
title = ndb.StringProperty()
class CalendarPrettyTitle(ndb.Model):
cal_id = ndb.StringProperty()
pretty_title = ndb.StringProperty()
class UserSetup(ndb.Model):
user = ndb.UserProperty()
google_token = ndb.JsonProperty()
trello_token = ndb.JsonProperty() # oauth1 access token dict, where .keys() == ('oauth_token', 'oau
|
th_token_secret')
timestamp = ndb.DateTimeProperty(auto_now=True)
|
WesCoomber/dataFlowGraphProjecto
|
presentgetBothNodesEdges.py
|
Python
|
mit
| 22,404
| 0.010266
|
import re, sys
import functools
import graphviz as gv
from graphviz import Source
bad_words = [ 'jns', 'js', 'jnz', 'jz', 'jno', 'jo', 'jbe', 'jb', 'jle', 'jl', 'jae', 'ja', 'jne loc', 'je', 'jmp', 'jge', 'jg', 'SLICE_EXTRA', 'SLICE_ADDRESSING', '[BUG]', 'SLICE_VERIFICATION', 'syscall', '#PARAMS_LOG']
instrEdges = []
instrNodes = []
with open('smallCleanedSlice.txt') as oldfile:
for line in oldfile:
tempLine = line.split()
instrNodes.append(tempLine[1] + '-' + tempLine[2])
i=0
for x in instrNodes:
instrNodes[i] = x.replace("#", "")
i += 1
instrNodesString = ''.join(instrNodes)
print('Done! Instruction Nodes List Size is : ') #+ instrNodesString
#print(instrNodes)
print(len(instrNodes))
#print(instrNodes[len(instrNodes)-1])
pattern = '\s+(\S+)\s'
with open('smallCleanedSlice.txt') as oldfile:
for line in oldfile:
prepline = line.replace("#\S*", " r1 ")
prepline = prepline.replace("[SLICE_INFO]", " r2 ")
prepline = prepline.replace("[SLICE_INFO]", " r2 ")
prepline = prepline.replace("[SLICE]", " r3 ")
prepline = prepline.replace("\t", " \t ")
prepline = prepline.rstrip("\t")
prepline = re.sub(r'(\s)#\w+', r'\1', prepline)
prepline = re.sub(r'.*SLICE', '', prepline)
prepline = re.sub(r'(\s)SLICE\s+', r'\1', prepline)
splitList = re.split("r1 | r2 | \t | r3 ", prepline)
if (len(splitList) >=2):
tempEdge = splitList[1]
tempEdge = tempEdge.lstrip()
#print tempEdges
#print len(splitList)
else :
tempEdge = splitList[0]
#print ('hello: '+tempEdge)
instrEdges.append(tempEdge)
#str1 = ''.join(tempLine)
#for line in str1:
dict1 ={}
j = 0
#give unique id number for each instruction based on its line number (starting at 0)
'''for x in instrNodes:
instrNodes[j] = str(j)+ '-' +instrNodes[j]
j+=1
'''
instrNodesString = ''.join(instrEdges)
print('Done! Instruction Edges List size is : ') #+ instrNodesString
#print(instrEdges)
#print(instrNodes)
print(len(instrEdges))
new_dict = {k: v for k, v in zip(instrNodes, instrEdges)}
#print(dict1)
#example dictionary entry is dict1['0-cmp': 'eax, 0xfffff001']
print('Done! Dict (LineNumber-Instruction: Edges) is : ')
#print((new_dict).keys())
#print((new_dict))
print("first node(instr): and its edges(operands): " + 'b7ff5c05-cmp: '+str(new_dict['b7ff5c05-cmp']))
#PRINT OUT THE TWO LISTS INTO TWO SEPERATE FILES
#y = ",".join(map(str, instrNodes))
#z = ",,".join(map(str, instrEdges))
#outputFile= open('nodesOut.txt', 'w')
#outputFile.write(y)
#outputFile2 = open('edgesOut.txt', 'w')
#outputFile2.write(z)
flagEnterKeys = 1
while (flagEnterKeys == 1):
input_var = raw_input('Enter a key (b7ff5c05-cmp for the 1st instruction cmp in the slice): TYPE EXIT TO End.\n')
if (input_var in new_dict):
print("Operands for " + input_var + " are: " + str(new_dict[input_var]) + ".\n")
break
if ((input_var == "exit") or (input_var == ",exit,")):
flagEnterKeys = 0;
break
else :
print("ERROR! Please enter in a valid key for the instrNodes, instrEdges dictionary.")
##New Graphviz-dot code here
graph = functools.partial(gv.Graph, format='svg')
digraph = functools.partial(gv.Digraph, format='svg')
datG = digrap
|
h()
nodes = instrNodes
edges = instrEdges
#nodes = testNodes
#edges = testEdges
print(nodes)
print(edges)
def add_nodes(graph):
for n in nodes:
graph.node(n, label = str(n) + '(' + str(new_dict[n]) + ')')
return graph
def add_edges(graph):
for e in edges:
graph.edge(*e)
return graph
cmpFlags = []
newestOF = ''
newestSF = ''
newestZF = ''
newestAF = ''
newestCF = ''
newestPF = ''
# default values 'R' means edge from root node in the 32-bit 4word register
|
s
#Accumulator Counter Data Base Stack Pointer Stack Base Pointer Source Destination
EAX = ['R','R','R','R']
ECX = ['R','R','R','R']
EDI = ['R','R','R','R']
EDX = ['R','R','R','R']
EBX = ['R','R','R','R']
ESP = ['R','R','R','R']
EBP = ['R','R','R','R']
ESI = ['R','R','R','R']
EDI = ['R','R','R','R']
#modify Eax register and its 16 and 8 bit versions
def modifyEAX(firstWord, secondWord, thirdWord, fourthWord):
EAX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyAX(thirdWord, fourthWord):
EAX[2:4] = [thirdWord, fourthWord]
def modifyAH(thirdWord):
EAX[2:3] = [thirdWord]
def modifyAL(fourthWord):
EAX[3:4] = [fourthWord]
#modify ecx register and its 16 and 8 bit versions
def modifyECX(firstWord, secondWord, thirdWord, fourthWord):
ECX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyCX(thirdWord, fourthWord):
ECX[2:4] = [thirdWord, fourthWord]
def modifyCH(thirdWord):
ECX[2:3] = [thirdWord]
def modifyCL(fourthWord):
ECX[3:4] = [fourthWord]
#modify edx register and its 16 and 8 bit versions
def modifyEDX(firstWord, secondWord, thirdWord, fourthWord):
EDX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyDX(thirdWord, fourthWord):
EDX[2:4] = [thirdWord, fourthWord]
def modifyDH(thirdWord):
EDX[2:3] = [thirdWord]
def modifyDL(fourthWord):
EDX[3:4] = [fourthWord]
#modify ebx register and its 16 and 8 bit versions
def modifyEBX(firstWord, secondWord, thirdWord, fourthWord):
EBX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyBX(thirdWord, fourthWord):
EBX[2:4] = [thirdWord, fourthWord]
def modifyBH(thirdWord):
EBX[2:3] = [thirdWord]
def modifyBL(fourthWord):
EBX[3:4] = [fourthWord]
#modify esp register and its 16bit versions
def modifyESP(firstWord, secondWord, thirdWord, fourthWord):
ESP[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifySP(thirdWord, fourthWord):
ESP[2:4] = [thirdWord, fourthWord]
#modify ebp register and its 16bit versions
def modifyEBP(firstWord, secondWord, thirdWord, fourthWord):
EBP[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyBP(thirdWord, fourthWord):
EBP[2:4] = [thirdWord, fourthWord]
#modify esi register and its 16bit versions
def modifyESI(firstWord, secondWord, thirdWord, fourthWord):
ESI[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifySI(thirdWord, fourthWord):
ESI[2:4] = [thirdWord, fourthWord]
#modify edi register and its 16bit versions
def modifyEDI(firstWord, secondWord, thirdWord, fourthWord):
EDI[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyDI(thirdWord, fourthWord):
EDI[2:4] = [thirdWord, fourthWord]
ax = EAX[2:4]
print(EAX)
print(ax)
ax = ['changedax1', 'changedax2']
print(EAX)
print(ax)
datG.node('R', 'Root')
#datG.edge('R', '0-cmp', label='eax')
#datG.edge('R', '0-cmp', label='0xfffff001' )
datG.node('Out', 'Output')
pattern = re.compile("^\s+|\s*,\s*|\s+$")
for idx, c in enumerate(instrEdges):
splitStr = [a for a in pattern.split(c) if a]
for idz, b in enumerate(splitStr):
tempNodeStr = instrNodes[(idx)]
if (idz == 0 and 'mov' not in tempNodeStr):
# if dest reg is eax
if b == "eax":
modifyEAX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "ax":
modifyAX(nodes[idx],nodes[idx])
if b == "ah":
modifyAH(nodes[idx])
if b == "al":
modifyAL(nodes[idx])
#
# if dest reg is ecx
if b == "ecx":
modifyECX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "cx":
modifyCX(nodes[idx],nodes[idx])
if b == "ch":
modifyCH(nodes[idx])
if b == "cl":
modifyCL(nodes[idx])
#
# if dest reg is edx
if b == "edx":
modifyEDX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "dx":
modifyDX(nodes[idx],nodes[idx])
if b == "dh":
modifyDH(nodes[idx])
if b == "dl":
modifyDL(nodes[idx])
#
|
fanout/django-eventstream
|
django_eventstream/urls.py
|
Python
|
mit
| 96
| 0
|
from django.urls import path
fr
|
om . import views
urlpatterns = [
|
path('', views.events),
]
|
City-of-Bloomington/green-rental
|
scripts/helpers.py
|
Python
|
agpl-3.0
| 22,169
| 0.011728
|
"""
*2014.09.10 16:10:05
DEPRECATED!!!!
please use building.models.search_building and building.models.make_building
instead of the make_unit and make_building functions found here...
out of date.
"""
import sys, os, json, codecs, re
sys.path.append(os.path.dirname(os.getcwd()))
from geopy import geocoders, distance
# MapQuest no longer available in present api. Work around
# detailed here: http://stackoverflow.com/questions/30132636/geocoding-error-with-geopandas-and-geopy
geocoders.MapQuest = geocoders.OpenMapQuest
#http://stackoverflow.com/questions/8047204/django-script-to-access-model-objects-without-using-manage-py-shell
#from rentrocket import settings
#from django.core.management import setup_environ
#setup_environ(settings)
#pre django 1.4 approach:
#from rentrocket import settings as rrsettings
#from django.core.management import setup_environ
#setup_environ(settings)
#from django.c
|
onf import settings
#settings.configure(rrsettings)
os.environ.setdefault("DJANGO
|
_SETTINGS_MODULE", "rentrocket.settings")
from building.models import Building, Parcel, BuildingPerson, Unit
from person.models import Person
def parse_person(text):
"""
take a string representing all details of a person
and try to parse out the different details for that person...
usually it's a comma separated string,
but sometimes names have commas in them
instead, look for the start of the address,
either a number or a PO variation
"""
name = ''
address = ''
phone = ''
remainder = ''
print "Parsing: %s" % text
phone = re.compile("(\d{3})\W*(\d{3})\W*(\d{4})\W*(\w*)")
m = phone.search(text)
if m:
#print dir(m)
#print len(m.groups())
phone1 = m.group(1)
parts = text.split(phone1)
#update text so it only contains part without phone number:
text = parts[0]
full_phone = phone1+parts[1]
print "Phone found: %s" % full_phone
filler='.*?' # Non-greedy match on filler
po_box='( P\\.O\\. | P O | PO )'
rg = re.compile(po_box,re.IGNORECASE|re.DOTALL)
m = rg.search(text)
if m:
csv1=m.group(1)
print "PO BOX MATCH: ("+csv1+")"+"\n"
print text
parts = text.split(csv1)
#name = m.group(0)
name = parts[0]
#IndexError: no such group
#address = m.group(1) + m.group(2)
address = m.group(1) + parts[1]
else:
re2='(\\d+)' # Integer Number 1
rg = re.compile(re2,re.IGNORECASE|re.DOTALL)
m = rg.search(text)
if m:
int1 = m.group(1)
print "NUMBER MATCH: (" + int1 + ")"
parts = text.split(int1)
#name = m.group(0)
name = parts[0]
#IndexError: no such group
#address = m.group(1) + m.group(2)
address = m.group(1) + parts[1]
address = address.strip()
name = name.strip()
print "name: %s" % name
print "address: %s" % address
print ""
if name[-1] == ',':
name = name[:-1]
if address[-1] == ',':
address = address[:-1]
return (name, address, phone, remainder)
def make_building(location, bldg_id, city, feed_source, parcel_id=None, bldg_type=None, no_units=None, sqft=None):
"""
add the building to the database
#*2015.03.07 14:04:37
#see search_building(bldgform.cleaned_data.get("address"), unit=unit, make=True)
"""
full_city = '%s, IN, USA' % city.name
match = False
#find an address to use
for geo_source in location.sources:
if not match:
source_list = location.get_source(geo_source)
if len(source_list) and source_list[0]['place'] and source_list[0]['place'] != full_city:
print "using: %s to check: %s" % (geo_source, source_list[0]['place'])
match = True
#TODO: process this a bit more...
#probably don't want city and zip here:
#keeping city and zip minimizes chance for overlap
#especially since this is used as a key
#can always take it out on display, if necessary
#*2014.09.10 14:51:28
#this has changed... should only use street now...
#see building/models.py -> make_building
#cur_address = source_list[0]['place']
#cur_address = source_list[0]['place']
if parcel_id == None:
cid = "%s-%s" % (city.tag, bldg_id)
else:
cid = parcel_id
print "Checking parcel id: %s" % (cid)
parcels = Parcel.objects.filter(custom_id=cid)
if parcels.exists():
parcel = parcels[0]
print "Already had parcel: %s" % parcel.custom_id
else:
parcel = Parcel()
parcel.custom_id = cid
parcel.save()
print "Created new parcel: %s" % parcel.custom_id
buildings = Building.objects.filter(city=city).filter(address=cur_address)
bldg = None
#check if a previous building object in the db exists
if buildings.exists():
bldg = buildings[0]
print "Already had: %s" % bldg.address
else:
#if not,
#CREATE A NEW BUILDING OBJECT HERE
#cur_building = Building()
bldg = Building()
#bldg.address = source_list[0]['place']
bldg.address = source_list[0]['street']
bldg.latitude = float(source_list[0]['lat'])
bldg.longitude = float(source_list[0]['lng'])
bldg.parcel = parcel
bldg.geocoder = geo_source
bldg.source = feed_source
bldg.city = city
bldg.state = city.state
if bldg_type:
bldg.type = bldg_type
if no_units:
bldg.number_of_units = no_units
if sqft:
bldg.sqft = sqft
bldg.save()
print "Created new building: %s" % bldg.address
return bldg
else:
print "Skipping: %s with value: %s" % (geo_source, source_list[0]['place'])
def make_unit(apt_num, building):
#check for existing:
units = Unit.objects.filter(building=building).filter(number=apt_num)
unit = None
#check if a previous building object in the db exists
if units.exists():
unit = units[0]
print "Already had Unit: %s" % unit.address
else:
#if not,
#CREATE A NEW UNIT OBJECT HERE
unit = Unit()
unit.building = building
unit.number = apt_num
# don't want to set this unless it's different:
#unit.address = building.address + ", " + apt_num
## bedrooms
## bathrooms
## sqft
## max_occupants
unit.save()
print "Created new unit: %s" % unit.number
return unit
def make_person(name, building, relation, address=None, city=None, website=None, phone=None):
#now associate applicant with building:
#first find/make person
people = Person.objects.filter(city=city).filter(name=name)
person = None
#check if a previous building object in the db exists
if people.exists():
person = people[0]
print "Already had Person: %s" % person.name
else:
#if not,
#CREATE A NEW PERSON OBJECT HERE
person = Person()
person.name = name
if city:
person.city = city
if address:
person.address = address
if website:
person.website = website
if phone:
person.phone = phone
person.save()
#then find/make association:
bpeople = BuildingPerson.objects.filter(build
|
danielru/pySDC
|
playgrounds/deprecated/acoustic_1d_imex/ploterrorconstants.py
|
Python
|
bsd-2-clause
| 3,326
| 0.02285
|
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from matplotlib.ticker import ScalarFormatter
from subprocess import call
fs = 8
order = np.array([])
nsteps = np.array([])
error = np.array([])
# load SDC data
file = open('conv-data.txt', 'r')
while True:
line = file.readline()
if not line: break
items = str.split(line, " ", 3)
order = np.append(order, int(items[0]))
nsteps = np.append(nsteps, int(float(items[1])))
error = np.append(error, float(items[2]))
file.close()
assert np.size(order)==np.size(nsteps), 'Found different number of entries in order and nsteps'
assert np.size(nsteps)==np.size(error), 'Found different number of entries in nsteps and error'
N = np.size(nsteps)/3
assert isinstance(N, int), 'Number of entries not a multiple of three'
# load Runge-Kutta data
order_rk = np.array([])
nsteps_rk = np.array([])
error_rk = np.array([])
file = open('conv-data-rk.txt', 'r')
while True:
line = file.readline()
if not line: break
items = str.split(line, " ", 3)
order_rk = np.append(order_rk, int(items[0]))
nsteps_rk = np.append(nsteps_rk, int(float(items[1])))
error_rk = np.append(error_rk, float(items[2]))
file.close()
assert np.size(order_rk)==np.size(nsteps_rk), 'Found different number of entries in order and nsteps'
assert np.size(nsteps_rk)==np.size(error_rk), 'Found different number of entries in nsteps and error'
N = np.size(nsteps_rk)/3
assert isinstance(N, int), 'Number of entries not a multiple of three'
### Compute and plot error constant ###
errconst_sdc = np.zeros((3,N))
errconst_rk = np.zeros((3,N))
nsteps_plot_sdc = np.zeros((3,N))
nsteps_plot_rk = np.zeros((3,N))
order_plot = np.zeros(3)
for ii in range(0,3):
order_plot[ii] = order[N*ii]
for jj in range(0,N):
p_sdc = order[N*ii+jj]
err_sdc = error[N*ii+jj]
nsteps_plot_sdc[ii,jj] = nsteps[N*ii+jj]
dt_sdc = 1.0/float(nsteps_plot_sdc[ii,jj])
errconst_sdc[ii,jj] = err_sdc/dt_sdc**float(p_sdc)
p_rk = order_rk[N*ii+jj]
err_rk = error_rk[N*ii+jj]
nsteps_plot_rk[ii,jj] = nsteps_rk[N*ii+jj]
dt_rk = 1.0/float(nsteps_plot_rk[ii,jj])
errconst_rk[ii,jj] = err_rk/dt_rk**float(p_rk)
color = [ 'r', 'b', 'g' ]
shape_sdc = ['<', '^', '>']
shape_rk = ['o', 'd', 's']
rcParams['figure.figsize'] = 2.5, 2.
|
5
fig = plt.figure()
for ii in range(0,3):
plt.semilogy(nsteps_plot_sdc[ii,:], errconst_sdc[ii,:], shape_sdc[ii], markersize=fs, color=color[ii], label='SDC('+str(int(order_plot[ii]))+')')
plt.semilogy(nsteps_plot_rk[ii,:], errconst_rk[ii,:], shape_rk[ii], markersize=fs-2, color=color[ii], label='IMEX('+str(int(order_plot[ii]))+')')
plt.legend(loc='lower left', fontsize=fs, prop={'size':fs-1}, ncol=2)
plt.xlabel('Number o
|
f time steps', fontsize=fs)
plt.ylabel('Estimated error constant', fontsize=fs, labelpad=2)
plt.xlim([0.9*np.min(nsteps_plot_sdc), 1.1*np.max(nsteps_plot_sdc)])
plt.ylim([1e1, 1e6])
plt.yticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6],fontsize=fs)
plt.xticks([20, 30, 40, 60, 80, 100], fontsize=fs)
plt.gca().get_xaxis().get_major_formatter().labelOnlyBase = False
plt.gca().get_xaxis().set_major_formatter(ScalarFormatter())
#plt.show()
filename = 'error_constants.pdf'
fig.savefig(filename,bbox_inches='tight')
call(["pdfcrop", filename, filename])
|
eldarion/django-trending
|
trending/managers.py
|
Python
|
bsd-3-clause
| 1,033
| 0.000968
|
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Sum
from django.contrib.contenttypes.models impo
|
rt ContentType
class Tr
|
endingManager(models.Manager):
def trending(self, model, days=30, kind=""):
views = self.filter(
viewed_content_type=ContentType.objects.get_for_model(model),
views_on__gte=datetime.date.today() - datetime.timedelta(days=days),
kind=kind
).values(
"viewed_content_type",
"viewed_object_id",
"kind"
).annotate(
num_views=Sum("count")
).order_by("-num_views")
for d in views:
try:
d["object"] = ContentType.objects.get_for_id(
d["viewed_content_type"]
).get_object_for_this_type(
pk=d["viewed_object_id"]
)
except ObjectDoesNotExist:
d["object"] = None
return views
|
KODeKarnage/script.sub.missing
|
resources/lib/thetvdbapi.py
|
Python
|
gpl-3.0
| 10,006
| 0.006696
|
"""
thetvdb.com Python API
(c) 2009 James Smith (http://loopj.com)
(c) 2014 Wayne Davison <wayne@opencoder.net>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You sho
|
uld have received a copy of the GNU General Public License
along with this pro
|
gram. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import datetime
import random
import re
import copy
import xml.parsers.expat as expat
from cStringIO import StringIO
from zipfile import ZipFile
class TheTVDB(object):
def __init__(self, api_key='2B8557E0CBF7D720', language = 'en', want_raw = False):
#http://thetvdb.com/api/<apikey>/<request>
self.api_key = api_key
self.mirror_url = "http://thetvdb.com"
self.base_url = self.mirror_url + "/api"
self.base_key_url = "%s/%s" % (self.base_url, self.api_key)
self.language = language
self.want_raw = want_raw
# Mirror selection got deprecated a while back, so tell it to skip the actual fetch.
self.select_mirrors(False)
def select_mirrors(self, do_the_fetch = True):
#http://thetvdb.com/api/<apikey>/mirrors.xml
url = "%s/mirrors.xml" % self.base_key_url
self.xml_mirrors = []
self.zip_mirrors = []
try:
filt_func = lambda name, attrs: attrs if name == 'Mirror' else None
xml = self._get_xml_data(url, filt_func) if do_the_fetch else {}
for mirror in xml.get("Mirror", []):
mirrorpath = mirror.get("mirrorpath", None)
typemask = mirror.get("typemask", None)
if not mirrorpath or not typemask:
continue
typemask = int(typemask)
if typemask & 1:
self.xml_mirrors.append(mirrorpath)
if typemask & 4:
self.zip_mirrors.append(mirrorpath)
except:
pass
if not self.xml_mirrors:
self.xml_mirrors = [ self.mirror_url ]
if not self.zip_mirrors:
self.zip_mirrors = [ self.mirror_url ]
self.xml_mirror_url = random.choice(self.xml_mirrors)
self.zip_mirror_url = random.choice(self.zip_mirrors)
self.base_xml_url = "%s/api/%s" % (self.xml_mirror_url, self.api_key)
self.base_zip_url = "%s/api/%s" % (self.zip_mirror_url, self.api_key)
def _2show(self, attrs):
return attrs
def _2episode(self, attrs):
return attrs
@staticmethod
def convert_time(time_string):
"""Convert a thetvdb time string into a datetime.time object."""
time_res = [re.compile(r"\D*(?P<hour>\d{1,2})(?::(?P<minute>\d{2}))?.*(?P<ampm>a|p)m.*", re.IGNORECASE), # 12 hour
re.compile(r"\D*(?P<hour>\d{1,2}):?(?P<minute>\d{2}).*")] # 24 hour
for r in time_res:
m = r.match(time_string)
if m:
gd = m.groupdict()
if "hour" in gd and "minute" in gd and gd["minute"] and "ampm" in gd:
hour = int(gd["hour"])
if hour == 12:
hour = 0
if gd["ampm"].lower() == "p":
hour += 12
return datetime.time(hour, int(gd["minute"]))
elif "hour" in gd and "ampm" in gd:
hour = int(gd["hour"])
if hour == 12:
hour = 0
if gd["ampm"].lower() == "p":
hour += 12
return datetime.time(hour, 0)
elif "hour" in gd and "minute" in gd:
return datetime.time(int(gd["hour"]), int(gd["minute"]))
return None
@staticmethod
def convert_date(date_string):
"""Convert a thetvdb date string into a datetime.date object."""
first_aired = None
try:
first_aired = datetime.date(*map(int, date_string.split("-")))
except ValueError:
pass
return first_aired
# language can be "all", "en", "fr", etc.
def get_matching_shows(self, show_name, language=None, want_raw=False):
"""Get a list of shows matching show_name."""
if type(show_name) == type(u''):
show_name = show_name.encode('utf-8')
get_args = {"seriesname": show_name}
if language is not None:
get_args['language'] = language
get_args = urllib.urlencode(get_args, doseq=True)
url = "%s/GetSeries.php?%s" % (self.base_url, get_args)
if want_raw:
filt_func = lambda name, attrs: attrs if name == "Series" else None
else:
filt_func = lambda name, attrs: (attrs.get("seriesid", ""), attrs.get("SeriesName", ""), attrs.get("IMDB_ID", "")) if name == "Series" else None
xml = self._get_xml_data(url, filt_func)
return xml.get('Series', [])
def get_show(self, show_id):
"""Get the show object matching this show_id."""
url = "%s/series/%s/%s.xml" % (self.base_xml_url, show_id, self.language)
return self._get_show_by_url(url)
def _get_show_by_url(self, url):
filt_func = lambda name, attrs: self._2show(attrs) if name == "Series" else None
xml = self._get_xml_data(url, filt_func)
return xml['Series'][0] if 'Series' in xml else None
def get_episode(self, episode_id):
"""Get the episode object matching this episode_id."""
url = "%s/episodes/%s" % (self.base_xml_url, episode_id)
return self._get_episode_by_url(url)
def _get_episode_by_url(self, url):
filt_func = lambda name, attrs: self._2episode(attrs) if name == "Episode" else None
xml = self._get_xml_data(url, filt_func)
return xml['Episode'][0] if 'Episode' in xml else None
def get_show_and_episodes(self, show_id):
"""Get the show object and all matching episode objects for this show_id."""
url = "%s/series/%s/all/%s.zip" % (self.base_zip_url, show_id, self.language)
zip_name = '%s.xml' % self.language
filt_func = lambda name, attrs: self._2episode(attrs) if name == "Episode" else self._2show(attrs) if name == "Series" else None
xml = self._get_xml_data(url, filt_func, zip_name=zip_name)
if 'Series' not in xml:
return None
return (xml['Series'][0], xml.get('Episode', []))
def get_updates(self, callback, period = "day"):
"""Return all series, episode, and banner updates w/o having to have it
all in memory at once. Also returns the Data timestamp. The callback
routine should be defined as: my_callback(name, attrs) where name will
be "Data", "Series", "Episode", or "Banner", and attrs will be a dict
of the values (e.g. id, time, etc)."""
self._get_update_info(period, callback=callback)
def _get_update_info(self, period, filter_func = None, callback = None):
url = "%s/updates/updates_%s.zip" % (self.base_zip_url, period)
zip_name = 'updates_%s.xml' % period
return self._get_xml_data(url, filter_func, zip_name, callback)
def _get_xml_data(self, url, filter_func = None, zip_name = None, callback = None):
data = urllib.urlopen(url)
if zip_name:
zipfile = ZipFile(StringIO(data.read()))
data = zipfile.open(zip_name)
if not data:
raise Exception("Failed to get any data")
e = ExpatParseXml(callback, filter_func)
e.parse(data)
return e.xml
class ExpatParseXml(object):
def __init__(self, callback, filter_func):
self.el_container = None
self.el_name = None
self.el_attr_name =
|
pandel/Marlin
|
buildroot/share/scripts/createTemperatureLookupMarlin.py
|
Python
|
gpl-3.0
| 6,204
| 0.009349
|
#!/usr/bin/python
"""Thermistor Value Lookup Table Generator
Generates lookup to temperature values for use in a microcontroller in C format based on:
http://en.wikipedia.org/wiki/Steinhart-Hart_equation
The main use is for Arduino programs that read data from the circuit board described here:
http://reprap.org/wiki/Temperature_Sensor_v2.0
Usage: python createTemperatureLookupMarlin.py [options]
Options:
-h, --help show this help
--rp=... pull-up resistor
--t1=ttt:rrr low temperature temperature:resistance point (around 25 degC)
--t2=ttt:rrr middle temperature temperature:resistance point (around 150 degC)
--t3=ttt:rrr high temperature temperature:resistance point (around 250 degC)
--num-temps=... the number of temperature points to calculate (default: 36)
"""
from math import *
import sys
import getopt
"Constants"
ZERO = 273.15 # zero point of Kelvin scale
VADC = 5 # ADC voltage
VCC = 5 # supply voltage
ARES = pow(2,10) # 10 Bit ADC resolution
VSTEP = VADC / ARES # ADC voltage resolution
TMIN = 0 # lowest temperature in table
TMAX = 350 # highest temperature in table
class Thermistor:
"Class to do the thermistor maths"
def __init__(self, rp, t1, r1, t2, r2, t3, r3):
l1 = log(r1)
l2 = log(r2)
l3 = log(r3)
y1 = 1.0 / (t1 + ZERO) # adjust scale
y2 = 1.0 / (t2 + ZERO)
y3 = 1.0 / (t3 + ZERO)
x = (y2 - y1) / (l2 - l1)
y = (y3 - y1) / (l3 - l1)
c = (y - x) / ((l3 - l2) * (l1 + l2 + l3))
b = x - c * (l1**2 + l2**2 + l1*l2)
a = y1 - (b + l1**2 *c)*l1
if c < 0:
|
print "//////////////////////////////////////////////////////////////////////////////////////"
print "// WARNING: negative coefficient 'c'! Something may be wrong with the measurements! //"
print "///////////////////////////////////////////////
|
///////////////////////////////////////"
c = -c
self.c1 = a # Steinhart-Hart coefficients
self.c2 = b
self.c3 = c
self.rp = rp # pull-up resistance
def resol(self, adc):
"Convert ADC reading into a resolution"
res = self.temp(adc)-self.temp(adc+1)
return res
def voltage(self, adc):
"Convert ADC reading into a Voltage"
return adc * VSTEP # convert the 10 bit ADC value to a voltage
def resist(self, adc):
"Convert ADC reading into a resistance in Ohms"
r = self.rp * self.voltage(adc) / (VCC - self.voltage(adc)) # resistance of thermistor
return r
def temp(self, adc):
"Convert ADC reading into a temperature in Celcius"
l = log(self.resist(adc))
Tinv = self.c1 + self.c2*l + self.c3* l**3 # inverse temperature
return (1/Tinv) - ZERO # temperature
def adc(self, temp):
"Convert temperature into a ADC reading"
x = (self.c1 - (1.0 / (temp+ZERO))) / (2*self.c3)
y = sqrt((self.c2 / (3*self.c3))**3 + x**2)
r = exp((y-x)**(1.0/3) - (y+x)**(1.0/3))
return (r / (self.rp + r)) * ARES
def main(argv):
"Default values"
t1 = 25 # low temperature in Kelvin (25 degC)
r1 = 100000 # resistance at low temperature (10 kOhm)
t2 = 150 # middle temperature in Kelvin (150 degC)
r2 = 1641.9 # resistance at middle temperature (1.6 KOhm)
t3 = 250 # high temperature in Kelvin (250 degC)
r3 = 226.15 # resistance at high temperature (226.15 Ohm)
rp = 4700; # pull-up resistor (4.7 kOhm)
num_temps = 36; # number of entries for look-up table
try:
opts, args = getopt.getopt(argv, "h", ["help", "rp=", "t1=", "t2=", "t3=", "num-temps="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--rp":
rp = int(arg)
elif opt == "--t1":
arg = arg.split(':')
t1 = float(arg[0])
r1 = float(arg[1])
elif opt == "--t2":
arg = arg.split(':')
t2 = float(arg[0])
r2 = float(arg[1])
elif opt == "--t3":
arg = arg.split(':')
t3 = float(arg[0])
r3 = float(arg[1])
elif opt == "--num-temps":
num_temps = int(arg)
t = Thermistor(rp, t1, r1, t2, r2, t3, r3)
increment = int((ARES-1)/(num_temps-1));
step = (TMIN-TMAX) / (num_temps-1)
low_bound = t.temp(ARES-1);
up_bound = t.temp(1);
min_temp = int(TMIN if TMIN > low_bound else low_bound)
max_temp = int(TMAX if TMAX < up_bound else up_bound)
temps = range(max_temp, TMIN+step, step);
print "// Thermistor lookup table for Marlin"
print "// ./createTemperatureLookupMarlin.py --rp=%s --t1=%s:%s --t2=%s:%s --t3=%s:%s --num-temps=%s" % (rp, t1, r1, t2, r2, t3, r3, num_temps)
print "// Steinhart-Hart Coefficients: a=%.15g, b=%.15g, c=%.15g " % (t.c1, t.c2, t.c3)
print "// Theoretical limits of thermistor: %.2f to %.2f degC" % (low_bound, up_bound)
print
print "const short temptable[][2] PROGMEM = {"
for temp in temps:
adc = t.adc(temp)
print " { (short) (%7.2f * OVERSAMPLENR ), %4s }%s // v=%.3f\tr=%.3f\tres=%.3f degC/count" % (adc , temp, \
',' if temp != temps[-1] else ' ', \
t.voltage(adc), \
t.resist( adc), \
t.resol( adc) \
)
print "};"
def usage():
print __doc__
if __name__ == "__main__":
main(sys.argv[1:])
|
deepmind/acme
|
acme/utils/counting.py
|
Python
|
apache-2.0
| 4,636
| 0.003883
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, hierarchical distributed counter."""
import threading
import time
from typing import Dict, Mapping, Optional, Union
from acme import core
Number = Union[int, float]
class Counter(core.Saveable):
"""A simple counter object that can periodically sync with a parent."""
def __init__(self,
parent: Optional['Counter'] = None,
prefix: str = '',
time_delta: float = 1.0,
return_only_prefixed: bool = False):
"""Initialize the counter.
Args:
parent: a Counter object to cache locally (or None for no caching).
prefix: string prefix to use for all local counts.
time_delta: time difference in seconds between syncing with the parent
counter.
return_only_prefixed: if True, and if `prefix` isn't empty, return counts
restricted to the given `prefix` on each call to `increment` and
`get_counts`. The `prefix` is stripped from returned count names.
"""
self._parent = parent
self._prefix = prefix
self._time_delta = time_delta
# Hold local counts and we'll lock around that.
# These are counts to be synced to the parent and the cache.
self._counts = {}
self._lock = threading.Lock()
# We'll sync periodically (when the last sync was more than self._time_delta
# seconds ago.)
self._cache = {}
self._last_sync_time = 0.0
self._return_only_prefixed = return_only_prefixed
def increment(self, **counts: Number) -> Dict[str, Number]:
"""Increment a set of counters.
Args:
**counts: keyword arguments specifying count increments.
Returns:
The [name, value] mapping of all counters stored, i.e. this will also
include counts that were not updated by this call to increment.
"""
with self._lock:
for key, value in counts.items():
self._counts.setdefault(key, 0)
self._counts[key] += value
return self.get_counts()
def get_counts(self) -> Dict[str, Number]:
"""Return all counts tracked by this counter."""
now = time.time()
# TODO(b/144421838): use futures instead of blocking.
if self._parent and (now - self._last_sync_time) > self._time_delta:
with self._lock:
counts = _prefix_keys(self._counts, self._prefix)
# Reset the local counts, as they will be merged into the parent and the
# cache.
self._counts = {}
self._cache = self._parent.increment(**counts)
self._last_sync_time = now
# Potentially prefix the keys in the counts dictionary.
counts = _prefix_keys(self._counts, self._prefix)
# If there's no prefix make a copy of the dictionary so we don't modify the
# internal self._counts.
if not self._prefix:
counts = dict(counts)
# Combine local counts with any parent counts.
for key, value in self._cache.items():
counts[key] = counts.get(key, 0) + value
if self._prefix and self._return_only_prefixed:
counts = dict([(key[len(self._prefix) + 1:], value)
for key, value in counts.items()
i
|
f key.startswith(f'{self._prefix}_')])
return counts
def save(self) -> Mapping[str, Mapping[str, Number]]:
return {'counts': self._counts, 'cache': self._cache}
def restore(self, state: Mapping[str, Mapping[str, Number]]):
# Force a sync, if necessary, on the next get_counts call.
self._last_sync_time = 0.
self._counts = state['counts']
self._cache = state['cache']
def _prefix_keys(dictionary: Dict[str, Number], prefix: str):
"""Return a dictionary w
|
ith prefixed keys.
Args:
dictionary: dictionary to return a copy of.
prefix: string to use as the prefix.
Returns:
Return a copy of the given dictionary whose keys are replaced by
"{prefix}_{key}". If the prefix is the empty string it returns the given
dictionary unchanged.
"""
if prefix:
dictionary = {f'{prefix}_{k}': v for k, v in dictionary.items()}
return dictionary
|
jlaurelli/movie_organizer
|
movie_organizer/settings.py
|
Python
|
mit
| 2,681
| 0
|
"""
Django settings for movie_organizer project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ak7d+2obkx$-@!3jd@l!e*#95*4vfwfb2p01_nsek^#2ke)y3@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'movies'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'movie_organizer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_proces
|
sors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'movie_organizer.wsgi.a
|
pplication'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
sensidev/serpisori
|
app/__init__.py
|
Python
|
mit
| 26
| 0
|
_
|
_author__ = 'lucifurtu
|
n'
|
silberman/Deep-OSM
|
src/download_labels.py
|
Python
|
mit
| 4,752
| 0.016204
|
'''
Extract Ways from OSM PBF files
'''
import osmium as o
import json, os, requests, sys, time
import shapely.wkb as wkblib
# http://docs.osmcode.org/pyosmium/latest/intro.html
# A global factory that creates WKB from a osmium geometry
wkbfab = o.geom.WKBFactory()
# set in Dockerfile as env variable
GEO_DATA_DIR = os.environ.get("GEO_DATA_DIR")
class WayMap():
def __init__(self, extract_type='highway'):
self.extracter = WayExtracter(extract_type)
def extract_files(self, file_list):
for path in file_list:
self.run_extraction(path)
def run_extraction(self, file_path):
t0 = time.time()
self.extracter.apply_file(file_path, locations=True)
t1 = time.time()
elapsed = "{0:.1f}".format(t1-t0)
print "EXTRACTED WAYS with locations from pbf file {}, took {}s".format(file_path, elapsed)
class WayExtracter(o.SimpleHandler):
def __init__(self, extract_type='highway'):
'''
extract_type can so far be in: highway, tennis
'''
o.SimpleHandler.__init__(self)
self.ways = []
self.way_dict = {}
self.types = []
self.extract_type = extract_type
def way(self, w):
if self.extract_type == 'tennis':
self.extract_if_tennis_court(w)
elif self.extract_type == 'highway':
self.extract_if_highway(w)
else:
print "ERROR unknown type to extract from PBF file"
def extract_if_tennis_court(self, w):
name = ''
is_tennis = False
for tag in w.tags:
if tag.k == 'sport' and 'tennis' == tag.v:
is_tennis = True
if tag.k == 'name':
name = tag.v
if not is_tennis:
return
way_dict = {
'uid': w.uid,
'ends_have_same_id': w.ends_have_same_id(),
'id': w.id,
'tags':[]}
for tag in w.tags:
way_dict['tags'].append((tag.k, tag.v))
self.add_linestring(w, way_dict)
def extract_if
|
_highway(self, w):
is_highway = False
is_big = False
name = ''
highway_type = None
for tag in w.tags:
if tag.k == 'name':
name = tag.v
# and tag.v in ['primary', 'secondary', 'tertiary', 'trunk']
if tag.k == 'highway':
highway_type = tag.v
is_highway = True
#try:
# if tag.k == 'lanes' and int(tag.v[len(tag.v)-1]) >= 2:
# is_big = True
# # #for t in w.tags:
# #
|
# print "tag {} {}".format(t.k, t.v)
#except:
# print("exception, weird lanes designation {}".format(tag.v))
# or not is_big
if not is_highway:
return
if not highway_type in self.types:
self.types.append(highway_type)
way_dict = {'visible': w.visible,
'deleted': w.deleted,
'uid': w.uid,
'highway_type': highway_type,
'ends_have_same_id': w.ends_have_same_id(),
'id': w.id,
'tags':[]}
for tag in w.tags:
way_dict['tags'].append((tag.k, tag.v))
self.add_linestring(w, way_dict)
def add_linestring(self, w, way_dict):
try:
wkb = wkbfab.create_linestring(w)
except:
# throws on single point ways
return
line = wkblib.loads(wkb, hex=True)
reverse_points = []
for point in list(line.coords):
reverse_points.append([point[1],point[0]])
way_dict['linestring'] = reverse_points
self.ways.append(way_dict)
def download_and_extract(file_urls_to_download, extract_type='highway'):
file_urls = file_urls_to_download
file_paths = download_files(file_urls)
w = WayMap(extract_type=extract_type)
w.extract_files(file_paths)
return w
def download_file(url):
local_filename = url.split('/')[-1]
full_local_filename = os.path.join(GEO_DATA_DIR, local_filename)
r = requests.get(url, stream=True)
with open(full_local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return full_local_filename
def download_files(url_list):
paths = []
print("DOWNLOADING {} PBFs...".format(len(url_list)))
t0 = time.time()
for url in url_list:
local_filename = url.split('/')[-1]
full_local_filename = os.path.join(GEO_DATA_DIR, local_filename)
if not os.path.exists(full_local_filename):
paths.append(download_file(url))
else:
paths.append(full_local_filename)
print("PBF {} already downloaded".format(full_local_filename))
if time.time()-t0 > 0.01:
print("downloads took {0:.1f}s".format(time.time()-t0))
return paths
|
vivisect/synapse
|
synapse/lib/splice.py
|
Python
|
apache-2.0
| 2,652
| 0.000754
|
import tempfile
import synapse.common as s_common
import synapse.lib.msgpack as s_msgpack
_readsz = 10000000
def splice(act, **info):
'''
Form a splice event from a given act name and info.
Args:
act (str): The name of the action.
**info: Additional information about the event.
Example:
splice = splice('add:node', form='inet:ipv4', valu=0)
self.fire(splice)
Notes:
Splice events were reworked in v0.0.45 and now contain a sub-event of
the (act, info) under the 'mesg' key.
Returns:
(str, dict): The splice event.
'''
return (act, info)
def convertOldSplice(mesg):
'''
Converts an "old" splice event to the "new" format.
Args:
mesg ((str,dict)): An event tuple.
Examples:
Convert a splice to the new format:
newsplice = convertOldSplice(oldsplice)
Raises:
(BadSpliceMesg): The splice was unable to be converted.
Returns:
(str, dict): The splice event.
'''
if not(isinstance(mesg, tuple) and len(mesg) is 2):
raise s_common.BadSpliceMesg('invalid event mesg')
evtname = mesg[0]
if evtname != 'splice':
raise s_common.BadSpliceMesg('event mesg is not a splice')
data = mesg[1]
if data.get('mesg'):
raise s_common.BadSpliceMesg('splice has already been converted')
act = mesg[1].pop('act', None)
if not act:
raise s_common.BadSpliceMesg('splice is missing act')
return splice(act, **data)
def convertSpliceFd(fpath):
'''
Converts an "old" splice log to the new format.
Args:
fpath (str): The path to the "old" splice log file.
Example:
convertSpliceFd('/stuff/oldsplicelog.mpk')
Notes:
This function reads the an "old" splice log file, writes to a temporary
file, and then overwrites the old file with the new data. This function
only converts old splices to new splices. If any messages are invalid,
an exception will be raised and the conversion will exit early and not
overwrite any data.
Returns:
None
'''
with tempfile.SpooledTemporaryFile() as tmp:
with open(fpath, 'r+b') as fd:
for chnk in s_common.chunks(s_msgpack.iterfd(fd), 1000):
for mesg in chnk:
mesg = co
|
nvertOldSplice(mesg)
|
tmp.write(s_msgpack.en(mesg))
tmp.seek(0)
fd.seek(0)
data = tmp.read(_readsz)
while data:
fd.write(data)
data = tmp.read(_readsz)
fd.truncate()
|
HackBulgaria/Odin
|
courses/south_migrations/0003_auto__chg_field_course_start_time__chg_field_course_end_time.py
|
Python
|
agpl-3.0
| 1,387
| 0.005047
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Course.start_time'
db.alter_column(u'courses_course', 'start_time', self.gf('django.db
|
.models.fields.DateField')())
# Changing field 'Course.end_time'
db.alter_column(u'courses_course', 'end_time', self.gf('django.db.models.fields.DateField')())
def backwards(self, orm):
# Changing field 'Course.start_time'
db.alter_column(u'courses_course', 'start_time', self.gf('django.db.models.fields.TimeField')())
# Changing field 'Course.end_time'
db.alter_column(u'co
|
urses_course', 'end_time', self.gf('django.db.models.fields.TimeField')())
models = {
u'courses.course': {
'Meta': {'object_name': 'Course'},
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'start_time': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['courses']
|
mfraezz/osf.io
|
tests/test_registrations/test_retractions.py
|
Python
|
apache-2.0
| 43,805
| 0.003196
|
"""Tests related to retraction of public registrations"""
import datetime
from rest_framework import status as http_status
import mock
import pytest
from django.utils import timezone
from django.db import DataError
from nose.tools import * # noqa
from framework.auth import Auth
from framework.exceptions import PermissionsError
from tests.base import fake, OsfTestCase
from osf_tests.factories import (
AuthUserFactory, NodeFactory, ProjectFactory,
RegistrationFactory, UserFactory, UnconfirmedUserFactory,
UnregUserFactory, OSFGroupFactory
)
from osf.utils import tokens
from osf.exceptions import (
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
NodeStateError,
)
from osf.models import Contributor, Retraction
from osf.utils import permissions
@pytest.mark.enable_bookmark_creation
class RegistrationRetractionModelsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionModelsTestCase, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user, is_public=True)
self.valid_justification = fake.sentence()
self.invalid_justification = fake.text(max_nb_chars=3000)
def test_set_public_registration_to_private_raises_NodeStateException(self):
self.registration.save()
with assert_raises(NodeStateError):
self.registration.set_privacy('private')
self.registration.reload()
assert_true(self.registration.is_public)
def test_initiate_retraction_saves_retraction(self):
initial_count = Retraction.objects.all().count()
self.registration._initiate_retraction(self.user)
assert_equal(Retraction.objects.all().count(), initial_count + 1)
def test__initiate_retraction_does_not_create_tokens_for_unregistered_admin(self):
unconfirmed_user = UnconfirmedUserFactory()
Contributor.objects.create(node=self.registration, user=unconfirmed_user)
self.registration.add_permission(unconfirmed_user, permissions.ADMIN, save=True)
assert_equal(Contributor.objects.get(node=self.registration, user=unconfirmed_user).permission, permissions.ADMIN)
retraction = self.registration._initiate_retraction(self.user)
assert_true(self.user._id in retraction.approval_state)
assert_false(unconfirmed_user._id in retraction.approval_state)
def test__initiate_retraction_adds_admins_on_child_nodes(self):
project_admin = UserFactory()
project_non_admin = UserFactory()
child_admin = UserFactory()
child_non_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin)
project.add_contributor(project_non_admin, auth=Auth(project.creator), save=True)
child = NodeFactory(creator=child_admin, parent=project)
child.add_contributor(child_non_admin, auth=Auth(child.creator), save=True)
grandchild = NodeFactory(creator=grandchild_admin, parent=child) # noqa
registration = RegistrationFactory(project=project)
retraction = registration._initiate_retraction(registration.creator)
assert_in(project_admin._id, retraction.approval_state)
assert_in(child_admin._id, retraction.approval_state)
assert_in(grandchild_admin._id, retraction.approval_state)
assert_not_in(project_non_admin._id, retraction.approval_state)
assert_not_in(child_non_admin._id, retraction.approval_state)
# Backref tests
def test_retraction_initiator_has_backref(self):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_equal(Retraction.objects.filter(initiated_by=self.user).count(), 1)
# Node#retract_registration tests
def test_pending_retract(self):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_false(self.registration.is_retracted)
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
assert_equal(self.registration.retraction.justification, self.valid_justification)
assert_equal(self.registration.retraction.initiated_by, self.user)
assert_equal(
self.registration.retraction.initiation_date.date(),
timezone.now().date()
)
def test_retract_component_raises_NodeStateError(self):
project = ProjectFactory(is_public=True, creator=self.user)
NodeFactory(is_public=True, creator=self.user, parent=project)
registration = RegistrationFactory(is_public=True, project=project)
with assert_raises(NodeStateError):
registration._nodes.first().retract_registration(self.user, self.valid_justification)
def test_long_justification_raises_ValidationValueError(self):
with assert_raises(DataError):
self.registration.retract_registration(self.user, self.invalid_justification)
self.registration.save()
assert_is_none(self.registration.retraction)
def test_retract_private_registration_raises_NodeStateError(self):
self.registration.is_public = False
with assert_raises(NodeStateError):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_is_none(self.registration.retraction)
def test_retraction_of_registration_pending_embargo_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_retraction_of_registration_in_active_embargo_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(
|
days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_
|
embargo)
assert_true(self.registration.embargo_end_date)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
retraction_approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, retraction_approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
# Retraction#approve_retraction_tests
def test_invalid_approval_token_raises_InvalidSanctionApprovalToken(self):
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
with assert_raises(InvalidSanctionApprovalToken):
self.reg
|
ejconlon/iwantaride
|
postit.py
|
Python
|
mit
| 1,204
| 0.004153
|
#!/usr/bin/env python
# ./postit.py http://localhost:5000/db/loadpost users fixtures/users.txt
# alternately, if you are running locally, visit
# http://localhost
|
:5000/db/loadfixture/users/users.txt
# to drop the db go to
# http://localhost:5000/db/drop
# to show the db go to
# http://localhost:5000/db/show
import urllib, urllib2, httplib
def post(url, schema, key, value):
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
req = urllib2.Request(url+"/"+schema)
connection = httplib.HTTPCon
|
nection(req.get_host())
params = urllib.urlencode({key: value})
print params
connection.request('POST', req.get_selector(),
params, headers)
response = connection.getresponse()
print response.status, response.reason
data = response.read()
connection.close()
def splitline(line):
return [x for x in (y.strip() for y in line.split(',')) if len(x)]
if __name__ == "__main__":
import sys
url = sys.argv[1]
schema = sys.argv[2]
filename = sys.argv[3]
lines = None
with open(filename, 'r') as f:
lines = f.read()
post(url, schema, 'payload', lines)
|
watchdogpolska/poradnia
|
poradnia/users/migrations/0023_auto_20220103_1354.py
|
Python
|
mit
| 618
| 0.001618
|
# Generated by Django 2.2.25 on 2022-01-03 12:54
from django.db import migration
|
s, models
class Migration(migrations.Migration):
dependencies = [
("users", "0022_auto_20191015_0510"),
]
operations = [
migrations.AlterField(
model_name="user",
name="notify_unassigned_letter",
field=models.Boolean
|
Field(
default=False,
help_text="Whether or not to notify user about any letter in case without anybody who can reply to client",
verbose_name="Defaults to reply in cases",
),
),
]
|
systers/hyperkitty
|
hyperkitty/tests/_test_caching.py
|
Python
|
gpl-3.0
| 9,926
| 0.004332
|
# -*- coding: utf-8 -*-
# flake8: noqa
import unittest
import datetime
import uuid
from urllib.error import HTTPError
from mock import Mock
from mailman.email.message import Message
from mailman.interfaces.archiver import ArchivePol
|
icy
#import kittystore.utils
#from kittystore import get_store
#from kittystore.caching import mailman_user
#from kitt
|
ystore.test import FakeList, SettingsModule
class ListCacheTestCase(unittest.TestCase):
def setUp(self):
self.store = get_store(SettingsModule(), auto_create=True)
kittystore.utils.MM_CLIENT = Mock()
def tearDown(self):
self.store.close()
kittystore.utils.MM_CLIENT = None
def test_properties_on_new_message(self):
ml = FakeList("example-list")
ml.display_name = "name 1"
ml.subject_prefix = "[prefix 1]"
ml.description = "desc 1"
kittystore.utils.MM_CLIENT.get_list.side_effect = lambda n: ml
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
ml_db = self.store.get_lists()[0]
self.assertEqual(ml_db.display_name, "name 1")
self.assertEqual(ml_db.subject_prefix, "[prefix 1]")
ml.display_name = "name 2"
ml.subject_prefix = "[prefix 2]"
ml.description = "desc 2"
ml.archive_policy = "private"
msg.replace_header("Message-ID", "<dummy2>")
self.store.add_to_list("example-list", msg)
ml_db = self.store.get_lists()[0]
#ml_db = self.store.db.find(List).one()
self.assertEqual(ml_db.display_name, "name 2")
self.assertEqual(ml_db.subject_prefix, "[prefix 2]")
self.assertEqual(ml_db.description, "desc 2")
self.assertEqual(ml_db.archive_policy, ArchivePolicy.private)
def test_on_old_message(self):
kittystore.utils.MM_CLIENT = None
olddate = datetime.datetime.utcnow() - datetime.timedelta(days=40)
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg["Date"] = olddate.isoformat()
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
ml_db = self.store.get_lists()[0]
self.assertEqual(ml_db.recent_participants_count, 0)
self.assertEqual(ml_db.recent_threads_count, 0)
class FakeMMUser(object):
user_id = None
class UserIdCacheTestCase(unittest.TestCase):
def setUp(self):
self.store = get_store(SettingsModule(), auto_create=True)#, debug=True)
self.mm_client = Mock()
mailman_user._MAILMAN_CLIENT = self.mm_client
self.mm_client.get_user.side_effect = HTTPError(
None, 404, "dummy", {}, None)
def tearDown(self):
self.store.close()
mailman_user._MAILMAN_CLIENT = None
def test_on_new_message_userid(self):
# Check that the user_id is set on a new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
# setup Mailman's reply
new_user_id = FakeMMUser()
uid = uuid.uuid1()
new_user_id.user_id = uid.int
self.mm_client.get_user.side_effect = lambda addr: new_user_id
# check the User does not exist yet
self.assertEqual(0,
self.store.get_message_count_by_user_id(uid))
# do the test and check
self.store.add_to_list("example-list", msg)
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, uid)
self.assertTrue(dbmsg.sender.user is not None,
"A 'User' instance was not created")
self.assertEqual(dbmsg.sender.user.id, uid)
self.assertEqual(1,
self.store.get_message_count_by_user_id(uid))
self.assertEqual(self.store.get_users_count(), 1)
def test_on_new_message_no_reply_from_mailman(self):
# Check that the user_id is set on a new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, None)
def test_sync_mailman_user(self):
# Check that the user_id is set when sync_mailman_user is run
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
self.store.add_to_list("example-list", msg)
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, None)
# setup Mailman's reply
uid = uuid.uuid1()
new_user_id = FakeMMUser()
new_user_id.user_id = uid.int
self.mm_client.get_user.side_effect = lambda addr: new_user_id
# do the test and check
mailman_user.sync_mailman_user(self.store)
#dbmsg = self.store.get_message_by_id_from_list(
# "example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, uid)
self.assertTrue(dbmsg.sender.user is not None,
"A 'User' instance was not created")
self.assertEqual(dbmsg.sender.user.id, uid)
self.assertEqual(1,
self.store.get_message_count_by_user_id(uid))
def test_on_new_message_bad_reply_from_mailman(self):
# Check that errors from mailmanclient are handled gracefully
self.mm_client.get_user.side_effect = ValueError
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
try:
self.store.add_to_list("example-list", msg)
except ValueError as e:
self.fail("Errors from mailmanclient should be handled gracefully")
dbmsg = self.store.get_message_by_id_from_list(
"example-list", "dummy")
self.assertEqual(dbmsg.sender.user_id, None)
class TestNotifyStore(unittest.TestCase):
def setUp(self):
self.store = get_sa_store(SettingsModule(), auto_create=True)
self.store.db.cache.get_or_create = Mock()
self.store.db.cache.get_or_create.side_effect = lambda *a, **kw: a[1]()
self.store.db.cache.set = Mock()
# cache.delete() will be called if the cache is invalidated
self.store.db.cache.delete = Mock()
def tearDown(self):
self.store.close()
def test_on_new_message_invalidate(self):
# Check that the cache is invalidated on new message
msg = Message()
msg["From"] = "dummy@example.com"
msg["Message-ID"] = "<dummy>"
msg.set_payload("Dummy message")
today = datetime.datetime.utcnow().date() # don't use datetime.date.today(), we need UTC
self.store.add_to_list("example-list", msg)
# calls to cache.delete() -- invalidation
delete_args = [ call[0][0] for call in
self.store.db.cache.delete.call_args_list ]
#from pprint import pprint; pprint(delete_args)
self.assertEqual(set(delete_args), set([
'list:example-list:recent_participants_count',
'list:example-list:recent_threads_count',
'list:example-list:participants_count:%d:%d' % (today.year, today.month),
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:emails_count',
'list:example-list:thread:QKODQBCADMDSP5YPOPKECXQWEQAMXZL3:participants_count'
]))
# calls to cache.get_or_create() -- repopulation
goc_args = [ call[0][0] for call in
self.store.db.cache.get_or_create.call_args_list ]
#from pprint import pprint; pprint(goc_args)
self.assertEqual(set(goc_args), set([
'list:example-list:recent_participants_count',
'lis
|
kmarius/qutebrowser
|
qutebrowser/browser/webengine/webenginetab.py
|
Python
|
gpl-3.0
| 37,530
| 0.00008
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Flori
|
an Bruhin (The Compiler) <mail@qutebrowser.org>
|
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import sys
import re
import html as html_utils
import sip
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QEvent, QPoint, QPointF,
QUrl, QTimer)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.config import configdata
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(webenginesettings.inject_userscripts)
webenginesettings.inject_userscripts()
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
def show_source(self):
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError:
# Qt < 5.8
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
urlstr = self._tab.url().toString(QUrl.RemoveUserInfo)
# The original URL becomes the path of a view-source: URL
# (without a host), but query/fragment should stay.
url = QUrl('view-source:' + urlstr)
tb.tabopen(url, background=False, related=True)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page.
Attributes:
_flags: The QWebEnginePage.FindFlags of the last search.
_pending_searches: How many searches have been started but not called
back yet.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
self._pending_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
self._pending_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
self._pending_searches -= 1
if self._pending_searches > 0:
# See https://github.com/qutebrowser/qutebrowser/issues/2442
# and https://github.com/qt/qtwebengine/blob/5.10/src/core/web_contents_adapter.cpp#L924-L934
log.webview.debug("Ignoring cancelled search callback with "
"{} pending searches".format(
self._pending_searches))
return
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= Q
|
itaiag/blackjack
|
blackjack.py
|
Python
|
apache-2.0
| 17,936
| 0.004014
|
#!/usr/bin/env python
from random import Random
colors_support = True
try:
from colorama import init, Fore
init()
except:
colors_support = False
print "For colors install colorama"
hint_table = \
{('5',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('6',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('7',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('8',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('9',): {'A': 'h', '10': 'h', '3': 'd', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('10',): {'A': 'h', '10': 'h', '3': 'd', '2': 'd', '5': 'd', '4': 'd', '7': 'd', '6': 'd', '9': 'd', '8': 'd'},
('11',): {'A': 'h', '10': 'd', '3': 'd', '2': 'd', '5': 'd', '4': 'd', '7': 'd', '6': 'd', '9': 'd', '8': 'd'},
('12',): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('13',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('14',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('15',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('16',): {'A': 'h', '10': 'h', '3': 's', '2': 's', '5': 's', '4': 's', '7': 'h', '6': 's', '9': 'h', '8': 'h'},
('17',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('18',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('19',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('20',): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('2', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'h', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('3', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'h', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('4', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('5', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('6', 'A'): {'A': 'h', '10': 'h', '3': 'd', '2': 'h
|
', '5': 'd', '4': 'd', '7': 'h', '6': 'd', '9': 'h', '8': 'h'},
('7', 'A'): {'A': 'h', '10': 'h', '3': 'd', '2': 's', '5': 'd', '4': 'd', '7': 's', '6': 'd', '9': 'h', '8': 's'},
('8', 'A'): {'A':
|
's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('9', 'A'): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'},
('A', 'A'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('2', '2'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('3', '3'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('4', '4'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('5', '5'): {'A': 'h', '10': 'h', '3': 'd', '2': 'd', '5': 'd', '4': 'd', '7': 'd', '6': 'd', '9': 'd', '8': 'd'},
('6', '6'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('7', '7'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('8', '8'): {'A': 'h', '10': 'h', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 'h', '6': 'h', '9': 'h', '8': 'h'},
('9', '9'): {'A': 's', '10': 's', '3': 'h', '2': 'h', '5': 'h', '4': 'h', '7': 's', '6': 'h', '9': 'h', '8': 'h'},
('10', '10'): {'A': 's', '10': 's', '3': 's', '2': 's', '5': 's', '4': 's', '7': 's', '6': 's', '9': 's', '8': 's'}}
def color(color):
if colors_support:
if color is "green":
return Fore.GREEN # @UndefinedVariable
elif color is "red":
return Fore.RED # @UndefinedVariable
elif color is "white":
return Fore.WHITE # @UndefinedVariable
elif color is "yellow":
return Fore.YELLOW # @UndefinedVariable
elif color is "blue":
return Fore.BLUE # @UndefinedVariable
else:
return Fore.WHITE # @UndefinedVariable
else:
return ''
class Bookie(object):
def __init__(self, credit=1000):
self.credit = credit
self.bet = None
self.previous_bet = None
def place_bet(self, bet=None, ratio=2):
if bet is None and self.previous_bet is None:
raise Exception("No bet was specified")
if bet is None and self.previous_bet is not None:
# Using the last bet
bet = self.previous_bet
if bet > self.credit:
raise Exception("There is only {0} in credit\
, can't place bet of {1}".format(self.credit, bet))
self.ratio = ratio
self.previous_bet = bet
self.bet = bet
def report_win(self):
if self.bet is None:
raise Exception("No bet was placed")
self.credit += self.bet * self.ratio - self.bet
def report_lose(self):
if self.bet is None:
raise Exception("No bet was placed")
self.credit -= self.bet
def double_bet(self):
if self.bet is None:
raise Exception("No bet was placed")
self.bet *= 2
def half_bet(self):
if self.bet is None:
raise Exception("No bet was placed")
self.bet /= 2
def abort_bet(self):
self.bet = 0
class Deck(object):
def __init__(self, num_of_decks):
self.cards = []
self.rand = Random()
for deck_num in range(num_of_decks * 4):
self.cards.extend(range(2, 11))
self.cards.extend(['J'] * 4 * num_of_decks)
self.cards.extend(['Q'] * 4 * num_of_decks)
self.cards.extend(['K'] * 4 * num_of_decks)
self.cards.extend(['A'] * 4 * num_of_decks)
def get_card(self):
card_num = self.rand.randint(0, len(self.cards) - 1)
card = self.cards[card_num]
del self.cards[card_num]
return card
class Player(object):
def __init__(self, deck):
self.cards = []
self.deck = deck
def draw_card_from_deck(self):
self.cards.append(self.deck.get_card())
def get_sum_of_cards(self):
sum_of_cards = 0
aces = 0
for card in self.cards:
# Each one of the faces card is 10
if card is 'J' or card is 'Q' or card is 'K':
sum_of_cards += 10
elif card is 'A':
aces += 1
elif card is 'X':
# Hidden card
continue
else:
sum_of_cards += card
# We need to see how to handle aces
if aces > 0:
temp_sum = 11 + (aces - 1) + sum_of_cards
if temp_sum <= 21:
sum_of_cards = temp_sum
else:
sum_of_cards += aces
return sum_of_cards
def get_cards(self):
return self.cards
class MachinePlayer(Player):
def __init__(self, deck):
super(MachinePlayer, self).__init__(deck)
self.hidden_card = None
def should_take_another_card(self, player):
if self.get_sum_of_cards() < 17 or\
(self.get_sum_of_cards() is 17 and
self.cards.count('A') is 1):
return True
return False
def draw_card_from_deck(self):
if len(self.cards) is 1 and self.hidden_card is None:
|
scikit-multilearn/scikit-multilearn
|
skmultilearn/cluster/networkx.py
|
Python
|
bsd-2-clause
| 6,829
| 0.003075
|
from __future__ import absolute_import
import community
import networkx as nx
from networkx.algorithms.community import asyn_lpa_communities
import numpy as np
from .base import LabelGraphClustererBase
from .helpers import _membership_to_list_of_communities
class NetworkXLabelGraphClusterer(LabelGraphClustererBase):
"""Cluster label space with NetworkX community detection
This clusterer constructs a NetworkX representation of the Label Graph generated by graph builder and detects
communities in it using methods from the NetworkX library. Detected communities are converted to
a label space clustering.
Parameters
----------
graph_builder: a GraphBuilderBase inherited transformer
the graph builder to provide the adjacency matrix and weight map for the underlying graph
method: string
the community detection method to use, this clusterer supports the following community detection methods:
+----------------------+--------------------------------------------------------------------------------+
| Method name string | Description |
+----------------------+--------------------------------------------------------------------------------+
| louvain_ | Detecting communities with largest modularity using incremental greedy search |
+----------------------+--------------------------------------------------------------------------------+
| label_propagation_ | Detecting communities from multiple async label propagation on the graph |
+----------------------+--------------------------------------------------------------------------------+
.. _louvain: https://python-louvain.readthedocs.io/en/latest/
.. _label_propagation: https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.community.label_propagation.asyn_lpa_communities.html
Attributes
----------
graph_ : networkx.Graph
|
the networkx Graph object containing the graph representation of graph builder's adjacency matrix and weights
weights_ : { 'weight' : list of values
|
in edge order of graph edges }
edge weights stored in a format recognizable by the networkx module
References
----------
If you use this clusterer please cite the igraph paper and the clustering paper:
.. code :: latex
@unknown{networkx,
author = {Hagberg, Aric and Swart, Pieter and S Chult, Daniel},
year = {2008},
month = {01},
title = {Exploring Network Structure, Dynamics, and Function Using NetworkX},
booktitle = {Proceedings of the 7th Python in Science Conference}
}
@article{blondel2008fast,
title={Fast unfolding of communities in large networks},
author={Blondel, Vincent D and Guillaume, Jean-Loup and Lambiotte, Renaud and Lefebvre, Etienne},
journal={Journal of statistical mechanics: theory and experiment},
volume={2008},
number={10},
pages={P10008},
year={2008},
publisher={IOP Publishing}
}
Examples
--------
An example code for using this clusterer with a classifier looks like this:
.. code-block:: python
from sklearn.ensemble import RandomForestClassifier
from skmultilearn.problem_transform import LabelPowerset
from skmultilearn.cluster import NetworkXLabelGraphClusterer, LabelCooccurrenceGraphBuilder
from skmultilearn.ensemble import LabelSpacePartitioningClassifier
# construct base forest classifier
base_classifier = RandomForestClassifier(n_estimators=1000)
# construct a graph builder that will include
# label relations weighted by how many times they
# co-occurred in the data, without self-edges
graph_builder = LabelCooccurrenceGraphBuilder(
weighted = True,
include_self_edges = False
)
# setup problem transformation approach with sparse matrices for random forest
problem_transform_classifier = LabelPowerset(classifier=base_classifier,
require_dense=[False, False])
# setup the clusterer to use, we selected the modularity-based approach
clusterer = NetworkXLabelGraphClusterer(graph_builder=graph_builder, method='louvain')
# setup the ensemble metaclassifier
classifier = LabelSpacePartitioningClassifier(problem_transform_classifier, clusterer)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
For more use cases see `the label relations exploration guide <../labelrelations.ipynb>`_.
"""
def __init__(self, graph_builder, method):
"""Initializes the clusterer
Attributes
----------
graph_builder: a GraphBuilderBase inherited transformer
Class used to provide an underlying graph for NetworkX
"""
super(NetworkXLabelGraphClusterer, self).__init__(graph_builder)
self.method = method
def fit_predict(self, X, y):
"""Performs clustering on y and returns list of label lists
Builds a label graph using the provided graph builder's `transform` method
on `y` and then detects communities using the selected `method`.
Sets :code:`self.weights_` and :code:`self.graph_`.
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
edge_map = self.graph_builder.transform(y)
if self.graph_builder.is_weighted:
self.weights_ = dict(weight=list(edge_map.values()))
else:
self.weights_ = dict(weight=None)
self.graph_ = nx.Graph()
for n in range(y.shape[1]):
self.graph_.add_node(n)
for e, w in edge_map.items():
self.graph_.add_edge(e[0], e[1], weight=w)
if self.method == 'louvain':
partition_dict = community.best_partition(self.graph_)
memberships = [partition_dict[i] for i in range(y.shape[1])]
return np.array(
_membership_to_list_of_communities(
memberships,
1 + max(memberships)
)
)
else:
return np.array([list(i) for i in asyn_lpa_communities(self.graph_, 'weight')])
|
macwis/simplehr
|
candidates/migrations/0027_auto_20171227_1432.py
|
Python
|
gpl-3.0
| 507
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-12-27 14:3
|
2
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0026_auto_20171227_1429'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='location',
),
migrations.RemoveField(
model_name='position
|
',
name='location',
),
]
|
tiborsimko/invenio-records-restapi
|
invenio_records_rest/utils.py
|
Python
|
gpl-2.0
| 7,700
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""General utility functions module."""
from functools import partial
import six
from flask import abort, current_app, jsonify, make_response, request, url_for
from invenio_pidstore.errors import PIDDeletedError, PIDDoesNotExistError, \
PIDMissingObjectError, PIDRedirectedError, PIDUnregistered
from invenio_pidstore.resolver import Resolver
from invenio_records.api import Record
from werkzeug.routing import BaseConverter, BuildError, PathConverter
from werkzeug.utils import cached_property, import_string
from .errors import PIDDeletedRESTError, PIDDoesNotExistRESTError, \
PIDMissingObjectRESTError, PIDRedirectedRESTError, \
PIDUnregisteredRESTError
from .proxies import current_records_rest
def build_default_endpoint_prefixes(records_rest_endpoints):
"""Build the default_endpoint_prefixes map."""
pid_types = set()
guessed = set()
endpoint_prefixes = {}
for key, endpoint in records_rest_endpoints.items():
pid_type = endpoint['pid_type']
pid_types.add(pid_type)
is_guessed = key == pid_type
is_default = endpoint.get('default_endpoint_prefix', False)
if is_default:
if pid_type in endpoint_prefixes and pid_type not in guessed:
raise ValueError('More than one "{0}" defined.'.format(
pid_type
))
endpoint_prefixes[pid_type] = key
guessed -= {pid_type}
elif is_guessed and pid_type not in endpoint_prefixes:
endpoint_prefixes[pid_type] = key
guessed |= {pid_type}
not_found = pid_types - set(endpoint_prefixes.keys())
if not_found:
raise ValueError('No endpoint-prefix for {0}.'.format(
', '.join(not_found)
))
return endpoint_prefixes
def obj_or_import_string(value, default=None):
"""Import string or return object.
:params value: Import path or class object to instantiate.
:params default: Default object to return if the import fails.
:returns: The imported object.
"""
if isinstance(value, six.string_types):
return import_string(value)
elif value:
return value
return default
def load_or_import_from_config(key, app=None, default=None):
"""Load or import value from config.
:returns: The loaded value.
"""
app = app or current_app
imp = app.config.get(key)
return obj_or_import_string(imp, default=default)
def allow_all(*args, **kwargs):
"""Return permission that always allow an access.
:returns: A object instance with a ``can()`` method.
"""
return type('Allow', (), {'can': lambda self: True})()
def deny_all(*args, **kwargs):
"""Return permission that always deny an access.
:returns: A object instance with a ``can()`` method.
"""
return type('Deny', (), {'can': lambda self: False})()
def check_elasticsearch(record, *args, **kwargs):
"""Return permission that check if the record exists in ES index.
:params record: A record object.
:returns: A object instance with a ``can()`` method.
"""
def can(self):
"""Try to search for given record."""
search = request._methodview.search_class()
search = search.get_record(str(record.id))
return search.count() == 1
return type('CheckES', (), {'can': can})()
class LazyPIDValue(object):
"""Lazy PID resolver.
The PID will not be resolved until the `data` property is accessed.
"""
def __init__(self, resolver, value):
"""Initialize with resolver object and the PID value.
:params resolver: Resolves for PID,
see :class:`invenio_pidstore.resolver.Resolver`.
:params value: PID value.
:type value: str
"""
self.resolver = resolver
self.value = value
@cached_property
def data(self):
"""Resolve PID from a value and return a tuple with PID and the record.
:returns: A tuple with the PID and the record resolved.
"""
try:
return self.resolver.resolve(self.value)
except PIDDoesNotExistError as pid_error:
raise PIDDoesNotExistRESTError(pid_error=pid_error)
except PIDUnregistered as pid_error:
raise PIDUnregisteredRESTError(pid_error=pid_error)
except PIDDeletedError as pid_error:
raise PIDDeletedRESTError(pid_error=pid_error)
except PIDMissingObjectError as pid_error:
current_app.logger.exception(
'No object assigned to {0}.'.format(pid_error.pid),
extra={'pid': pid_error.pid})
raise PIDMissingObjectRESTError(pid_error.pid, pid_error=pid_error)
except PIDRedirectedError as pid_error:
try:
location = url_for(
'.{0}_item'.format(
current_records_rest.default_endpoint_prefixes[
pid_error.destination_pid.pid_type]),
pid_value=pid_error.destination_pid.pid_value)
data = dict(
status=301,
message='Moved Permanently',
location=location,
)
response = make_response(jsonify(data), data['status'])
response.headers['Location'] = location
abort(response)
except (BuildError, KeyError):
current_app.logger.exception(
'Invalid redirect - pid_type "{0}" '
'endpoint missing.'.format(
pid_error.destination_pid.pid_type),
extra={
'pid': pid_error.pid,
'destination_pid': pid_error.destination_pid,
})
raise PIDRedirectedRESTError(
pid_error.destination_pid.pid_type, pid_error=pid_error)
class PIDConverter(BaseConverter):
"""Converter for PID values in the route mapping.
This class is a custom routing converter defining the 'PID' type.
See http://werkzeug.pocoo.org/docs/0.12/routing/#custom-converters.
Use ``pid`` as a type in the route pattern, e.g.: the use of
route decorator: ``@blueprint.route('/record/<pid(recid):pid_value>')``,
will match and resolve a path: ``/record/123456``.
"""
def __init__(self, url_map, pid_type, getter=None, record_class=None):
"""Initialize the converter."""
super(PIDConverter, self).__init__(url_map)
getter = obj_or_import_string(getter, default=partial(
obj_or_import_string(record_class, default=Record).get_record,
with_deleted=True
))
self.resolver = Resolver(pid_type=pid_type, object_type='rec',
gett
|
er=getter)
def to_python(self, value):
"""Resolve PID value."""
return LazyPIDValue(self.resolver, value)
class PIDPathConverter(PIDConverter, PathConverter):
"""PIDConverter with support for path-like (with slashes) PID values.
This class is a custom routing converter defining the 'PID' type.
See http://werkzeug.pocoo.o
|
rg/docs/0.12/routing/#custom-converters.
Use ``pidpath`` as a type in the route patter, e.g.: the use of a route
decorator: ``@blueprint.route('/record/<pidpath(recid):pid_value>')``,
will match and resolve a path containing a DOI: ``/record/10.1010/12345``.
"""
|
Drhealsgood/learning_django
|
polls/views.py
|
Python
|
mit
| 1,526
| 0.006553
|
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views import generic
from polls.models import Choice,Poll
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_poll_list'
def get_queryset(self):
"""
Return the last five published polls (not including those set to be
published in the future).
"""
return Poll.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Poll
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Poll
template_name = 'polls/results.html'
def vote(req
|
uest, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# display voting form
return render(request, 'polls/detail.html',
|
{
'poll':p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results',args=(p.id,)))
|
kensonman/webframe
|
models.py
|
Python
|
apache-2.0
| 46,692
| 0.029256
|
# -*- coding: utf-8 -*-
# File: webframe/models.py
# Author: Kenson Man <kenson@kensonidv.hk>
# Date: 2020-10-17 12:29
# Desc: Provide the basic model for webframe
from datetime import datetime
from deprecation import deprecated
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.shortcuts import get_object_or_404 as getObj
from django.utils import timezone as tz
from django.utils.translation import ngettext, get_language, ugettext_lazy as _
from json import JSONEncoder
from pathlib import Path
from shutil import copyfile
from .CurrentUserMiddleware import get_current_user, get_current_request
from .functions import getBool, getClass, getTime, FMT_DATE, FMT_TIME, FMT_DATETIME, isUUID, TRUE_VALUES, getSecretKey, encrypt, decrypt, ENCRYPTED_PREFIX, LogMessage as lm, cache
import math, uuid, logging, json, pytz, re, sys, os
logger=logging.getLogger('webframe.models')
DATEFMT='%Y-%m-%d %H:%M:%S.%fT%z'
fmt=lambda d: 'null' if d is None else d.strftime(DATEFMT)
rfmt=lambda d: None if d=='null' else datetime.strptime(d, DATEFMT)
nullValue=_('null') #Make sure the null value can be translate
#Make sure the following transaction
_('Traditional Chinese')
_('English')
def valueOf(val):
'''
Parse the value into string format
'''
if isinstance(val, datetime):
rst=fmt(val)
elif isinstance(val, get_user_model()):
rst=val.username
elif isinstance(val, uuid.UUID):
rst=val.hex
elif isinstance(val, ValueObject):
rst=val.id.hex
else:
rst=val
return rst
def parseVal(field, val):
'''
Parse the value from dumpable format
'''
typ=field.get_internal_type()
if val is None:
return None
elif typ in ['AutoField', 'IntegerField', 'SmallIntegerField']:
return int(val)
elif typ in ['BigAutoField', 'BigIntegerField']:
return long(val)
elif typ in ['FloatField', 'DecimalField']:
return float(val)
elif typ == 'BooleanField':
return getBool(val)
elif typ in ['UUIDField']:
return uuid.UUID(val)
elif typ in ['CharField', 'TextField', 'EmailField', 'URLField']:
return str(val)
elif typ == 'DateTimeField':
return datetime.strptime(val, DATEFMT)
elif typ == 'ForeignKey':
if field.related_model is get_user_model():
try:
return get_user_model().objects.get(username=val)
except get_user_model().DoesNotExist:
rst=get_current_user()
logger.warning('Specify user<%s> not found, use current user<%s> instead.'%(val, rst))
return rst
return getObj(field.related_model, id=val)
return str(val)
class ValueEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, uuid.UUID):
return o.hex
return super(ValueEncoder, self).default(o)
class Dictable(object):
'''
The class to provide the import/export json method.
'''
META_TYPE='_type_'
META_VERS='_vers_'
def expDict(self, **kwargs):
'''
The method to export dictionary. It will ignore the properties that:
- prefix with "_"
- subfix with "_id"
'''
src=self.__dict__
rst=dict()
for k in src:
if k.startswith('_'): continue
if k.endswith('_id'): continue
rst[k]=src[k]
rst[Dictable.META_TYPE]="%s.%s"%(self.__class__.__module__, self.__class__.__name__)
rst[Dictable.META_VERS]=self._getDictVers()
for f in self.__class__._meta.get_fields():
if isinstance(f, models.Field):
n=f.name
v=getattr(self, n)
rst[n]=valueOf(v)
|
return rst
def impDict(self, data, **kwargs):
'''
The method to import from dictionary.
'''
if not Dictable.META_TYPE in data: raise TypeError('This is not the dictionary created by expDict. No type information found')
if not isinstance(self, getClass(data[Dictable.META_TYPE])): raise TypeError('Cannot import %s as %s'%(data[Dictable.META_TYPE], self.__class__))
if hasattr(self, Dictable.META_VERS):
if self._getDictVers() != data[D
|
ictable.META_VERS]: raise IOError('Version mismatched. Requesting %s but %s'%(getattr(self, Dictable.META_VERS), data[Dictable.META_VERS]))
for f in self.__class__._meta.get_fields():
if isinstance(f, models.Field):
n=f.name
v=parseVal(f, data.get(n, None))
setattr(self, n, v)
if getBool(kwargs.get('createNew', 'false')): self.id=None
if getBool(kwargs.get('autoSave', 'false')): self.save()
return self
def _getDictVers(self):
'''
Getter of the dictionary version. It is used to limit the version of dict.
'''
return getattr(self, Dictable.META_VERS, '1')
@staticmethod
def getType( instance ):
mod=instance.__class__.__module__
if mod is None or mod==str.__class__.__module__:
return instance.__class__.__name__
else:
return '{0}.{1}'.format(mod, instance.__class__.__name__)
class ValueObject(models.Model, Dictable):
CACHED='__CACHED__'
class Meta(object):
abstract = True
verbose_name = _('ValueObject')
verbose_name_plural = _('ValueObjects')
# view_* permission becomes the default permissions Django 3.0
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name=_('ValueObject.id'),
help_text=_('ValueObject.id.helptext'),
)
lmd = models.DateTimeField(
auto_now=True,
verbose_name=_('ValueObject.lmd'),
help_text=_('ValueObject.lmd.helptext'),
)
lmb = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=get_current_user,
null=True,
blank=True,
on_delete=models.CASCADE, #Since Django 2.0, the on_delete field is required.
related_name='%(class)s_lmb',
verbose_name=_('ValueObject.lmb'),
help_text=_('ValueObject.lmb.helptext'),
)
cd = models.DateTimeField(
auto_now_add=True,
verbose_name=_('ValueObject.cd'),
help_text=_('ValueObject.cd.helptext'),
)
cb = models.ForeignKey(
settings.AUTH_USER_MODEL,
default=get_current_user,
null=True,
blank=True,
on_delete=models.CASCADE, #Since Django 2.0, the on_delete field is required.
related_name='%(class)s_cb',
verbose_name=_('ValueObject.cb'),
help_text=_('ValueObject.cb.helptext'),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'id' in kwargs: self.id=kwargs['id']
if 'cb' in kwargs: self.cb=kwargs['cb']
if 'cd' in kwargs: self.cd=kwargs['cd']
if 'lmb' in kwargs: self.lmb=kwargs['lmb']
if 'lmd' in kwargs: self.lmd=kwargs['lmd']
@property
def isNew(self):
return self.lmd is None
@property
def isNotNew(self):
return self.lmd is not None
def id_or_new(self):
if self.isNew():
return 'new'
return self.id.hex
def save(self, *args, **kwargs):
'''
Saving the value-object. The method will setup the lmb default value
'''
user=get_current_user()
if user:
if not user.is_authenticated: user=None
if kwargs.get('update_lmb', 'true') in TRUE_VALUES:
self.lmb=user
if kwargs.get('update_cb', 'true') in TRUE_VALUES:
try:
if not self.cb: self.cb=user
except TypeError:
self.cb=user
super(ValueObject, self).save()
def expDict(self):
return {
Dictable.META_VERS: 1
, Dictable.META_TYPE: Dictable.getType(self)
, 'id': self.id
, 'cb': {Dictable.META_TYPE: Dictable.getType(self.cb), 'id': self.cb.id, 'username': self.cb.username, 'email': self.cb.email} if self.cb else None
, 'cd': self.cd
, 'lmb': {Dictable.META_TYPE: Dictable.getType(self.lmb), 'id': self.lmb.
|
samupl/simpleERP
|
apps/contacts/models.py
|
Python
|
mit
| 6,028
| 0.003484
|
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext_lazy
from django_countries import countries
from django_countries.fields import CountryField
COUNTRIES = [(ugettext_lazy(name), code) for (name, code) in list(countries)]
class Company(models.Model):
# Company credentials
company_name = models.CharField(_('Company name'), max_length=1024, null=True, blank=True)
company_nip = models.CharField(_('NIP'), max_length=10, null=True, blank=True)
company_regon = models.CharField(_('REGON'), max_length=9, null=True, blank=True)
# Common address credentials
address_city = models.CharField(_('City'), max_length=1024, null=True, blank=True)
address_street = models.CharField(_('Street'), max_length=1024, null=True, blank=True)
address_postcode = models
|
.CharField(_('Postal code'), max_length=10, null=True, blank=True)
address_country = CountryField(max_length=512, null=True, blank=True)
@property
def company(se
|
lf):
return '{company} (NIP: {nip}, REGON: {regon})'.format(
company=self.company_name,
nip=self.company_nip,
regon=self.company_regon
)
@property
def address_country_verbose(self):
return countries.countries[self.address_country]
def __str__(self):
return self.company
class Meta:
verbose_name = _('Company')
verbose_name_plural = _('Companies')
class CompanyBankAccount(models.Model):
slug = models.CharField(
_('Short name'),
max_length=16, unique=True,
)
company = models.ForeignKey(Company)
bank_account_number = models.CharField(
_('Bank account number'),
max_length=512, null=True, blank=True,
)
iban = models.CharField(
_('IBAN'),
max_length=512, null=True, blank=True,
)
swift = models.CharField(
_('SWIFT Code'),
max_length=512, null=True, blank=True,
)
sorting_number = models.CharField(
_('Sorting number'),
max_length=512, null=True, blank=True,
)
bank_name = models.CharField(
_('Bank name'),
max_length=1024, null=True, blank=True,
)
bank_branch = models.CharField(
_('Bank branch'),
max_length=1024, null=True, blank=True,
)
class Meta:
verbose_name = _('Bank Account')
verbose_name_plural = _('Bank Accounts')
def __str__(self):
return self.slug
class Contact(models.Model):
TYPE_PERSONAL = 1
TYPE_COMPANY = 2
TYPE_GOV = 3
TYPES = {
TYPE_PERSONAL: _('Private person'),
TYPE_COMPANY: _('Company'),
TYPE_GOV: _('Government organization'),
}
contact_type = models.PositiveIntegerField(_('Type'), choices=list(TYPES.items()))
# Private person credentials
person_first_name = models.CharField(_('First name'), max_length=256, null=True, blank=True)
person_last_name = models.CharField(_('Last name'), max_length=256, null=True, blank=True)
# Company credentials
company_name = models.CharField(_('Company name'), max_length=1024, null=True, blank=True)
company_nip = models.CharField(_('NIP'), max_length=13, null=True, blank=True)
company_regon = models.CharField(_('REGON'), max_length=9, null=True, blank=True)
# Common address credentials
address_city = models.CharField(_('City'), max_length=1024, null=True, blank=True)
address_street = models.CharField(_('Street'), max_length=1024, null=True, blank=True)
address_postcode = models.CharField(_('Postal code'), max_length=10, null=True, blank=True)
address_country = models.CharField(_('Country'), max_length=512, null=True, blank=True, choices=COUNTRIES)
# Receiver (gov organization)
receiver_name = models.CharField(_('Receiver name'), max_length=1024, null=True, blank=True)
receiver_city = models.CharField(_('Receiver City'), max_length=1024, null=True, blank=True)
receiver_street = models.CharField(_('Receiver Street'), max_length=1024, null=True, blank=True)
receiver_postcode = models.CharField(_('Receiver Postal code'), max_length=10, null=True, blank=True)
receiver_country = models.CharField(_('Receiver Country'), max_length=512, null=True, blank=True, choices=COUNTRIES)
@property
def address(self):
return '{street}, {postcode} {city}, {country}'.format(
street=self.address_street,
postcode=self.address_postcode,
city=self.address_city,
country=self.address_country)
@property
def name(self):
return '{first_name} {last_name}'.format(
first_name=self.person_first_name,
last_name=self.person_last_name)
@property
def company(self):
return '{company} (NIP: {nip}, REGON: {regon})'.format(
company=self.company_name,
nip=self.company_nip,
regon=self.company_regon
)
@property
def address_country_verbose(self):
return countries.countries.get(self.address_country, '')
@property
def receiver_country_verbose(self):
return countries.countries.get(self.receiver_country, '')
@property
def is_company(self):
return self.contact_type == self.TYPE_COMPANY
@property
def is_gov(self):
return self.contact_type == self.TYPE_GOV
def __str__(self):
if self.contact_type == self.TYPE_COMPANY:
credentials = self.company
elif self.contact_type == self.TYPE_GOV:
credentials = '{company} ({receiver_name})'.format(
company=self.company,
receiver_name=self.receiver_name
)
else:
credentials = self.name
return '{type}: {credentials}'.format(
type=self.TYPES.get(self.contact_type),
credentials=credentials
)
class Meta:
verbose_name = _('Contact')
verbose_name_plural = _('Contacts')
|
seamless-distribution-systems/galilei
|
galieli-netdata-installer/netdata/python.d/nsd.chart.py
|
Python
|
gpl-3.0
| 3,581
| 0.002793
|
# -*- coding: utf-8 -*-
# Description: NSD `nsd-control stats_noreset` netdata python.d module
# Author: <383c57 at gmail.com>
from base import ExecutableService
import re
# default module values (can be overridden per job in `config`)
priority = 60000
retries = 5
update_every = 30
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
CHARTS = {
'queries': {
'options': [
None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'],
'lines': [
['num_queries', 'queries', 'incremental'],]},
'zones': {
'options': [
None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'],
'lines': [
['zone_master', 'master', 'absolute'],
['zone_slave', 'slave', 'absolute'],]},
'protocol': {
'options': [
None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
'lines': [
['num_udp', 'udp', 'incremental'],
['num_udp6', 'udp6', 'incremental'],
['num_tcp', 'tcp', 'incremental'],
['num_tcp6', 'tcp6', 'incremental'],]},
'type': {
'options': [
None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'],
'lines': [
['num_type_A', 'A', 'incremental'],
['num_type_NS', 'NS', 'incremental'],
['num_type_CNAME', 'CNAME', 'incremental'],
['num_type_SOA', 'SOA', 'incremental'],
['num_type_PTR', 'PTR', 'incremental'],
['num_type_HINFO', 'HINFO', 'incremental'],
['num_type_MX', 'MX', 'incremental'],
['num_type_NAPTR', 'NAPTR', 'incremental'],
['num_type_TXT', 'TXT', 'incremental'],
['num_type_AAAA', 'AAAA', 'incremental'],
['num_type_SRV', 'SRV', 'incremental'],
['num_type_TYPE255', 'ANY', 'incremental'],]},
'transfer': {
'options': [
None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
'lines': [
['num_opcode_NOTIFY', 'NOTIFY',
|
'incremental'],
['num_type_TYPE252', 'AXFR', 'incremental'],]},
'rcode': {
'options': [
None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
'lines': [
['num_rcode_NOERROR', 'NO
|
ERROR', 'incremental'],
['num_rcode_FORMERR', 'FORMERR', 'incremental'],
['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
['num_rcode_REFUSED', 'REFUSED', 'incremental'],
['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]}
}
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
self.command = "nsd-control stats_noreset"
self.order = ORDER
self.definitions = CHARTS
self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
def _get_data(self):
lines = self._get_raw_data()
if not lines:
return None
r = self.regex
stats = dict((k.replace('.', '_'), int(v))
for k, v in r.findall(''.join(lines)))
stats.setdefault('num_opcode_NOTIFY', 0)
stats.setdefault('num_type_TYPE252', 0)
stats.setdefault('num_type_TYPE255', 0)
return stats
|
INCF/lib9ML
|
nineml/abstraction/dynamics/visitors/validators/base.py
|
Python
|
bsd-3-clause
| 2,819
| 0
|
"""
This file contains the DynamicsValidator class for validating component
:copyright: Copyright 2010-2017 by the NineML Python team, see AUTHORS.
:license: BSD-3, see LICENSE for details.
"""
from builtins import object
from nineml.visitors.validators import NoDuplicatedObjectsValidator
from .general import (
TimeDerivativesAreDeclaredDynamicsValidator,
StateAssignmentsAreOnStateVariablesDynamicsValidator,
AliasesAreNotRecursiveDynamicsValidator,
NoUnresolvedSymbolsDynamicsValidator,
RegimeGraphDynamicsValidator,
RegimeOnlyHasOneHandlerPerEventDynamicsValidator,
CheckNoLHSAssignmentsToMathsNamespaceDynamicsValidator,
DimensionalityDynamicsValidator)
from .names import (
LocalNameConflictsDynamicsValidator,
DimensionNameConflictsDynamicsValidator,
DuplicateRegimeNamesDynamicsValidator,
RegimeAliasMatchesBaseScopeValidator)
from .ports import (
EventPortsDynamicsValidator, OutputAnalogPortsDynamicsValidator)
from .types import (
TypesDynamicsValidator)
class DynamicsValidator(object):
"""Class for grouping all the component-validations tests together"""
@classmethod
def validate_componentclass(cls, component_class,
validate_dimensions=True, **kwargs):
"""
Tests a componentclassclass against a variety of tests, to verify its
internal structure
"""
# Check class structure:
TypesDynamicsValidator(component_class, **kwargs)
NoDuplicatedObjectsValidator(component_class, **kwargs)
DuplicateRegimeNamesDynamicsValidator(component_class, **kwargs)
LocalNameConflictsDynamicsValidator(component_class, **kwargs)
DimensionNameConflictsDynamicsValidator(component_class, **kwargs)
RegimeAliasMatchesBaseScopeValidator(component_class, **kwargs)
EventPortsDynamicsValidator(component_class, **kwargs)
OutputAnalogPortsDynamicsValidator(component_class, **kwargs)
TimeDerivativesAreDeclaredDynamicsValidator(component_class, **kwargs)
StateAssignmentsAreOnStateVariablesDynamicsValidator(component_class,
**kwargs)
AliasesAreNotRecursiveDynamicsValidator(component_class, **kwargs)
NoUnresolvedSymbolsDynamicsValidator(component_class, **kwargs)
RegimeGraphDynamicsValidator(component_class, **kwargs)
RegimeOnlyHasOneHandlerPer
|
EventDynamicsValidator(component_class,
**kwargs)
CheckNoLHSAssignmentsToMathsNamespaceDynamicsValidator(component_class,
|
**kwargs)
if validate_dimensions:
DimensionalityDynamicsValidator(component_class, **kwargs)
|
mmirabent/sniffle
|
generate_connections.py
|
Python
|
apache-2.0
| 1,066
| 0.002814
|
import socket
# List of the top 25 sites according t
|
o Alexa
websites = [ "Google.com",
"Facebook.com",
"Youtube.com",
"Baidu.com",
"Yahoo.com",
"Amazon.com",
"Wikipedia.org",
"Qq.com",
"Twitter.com",
"Google.co.in",
"Taobao.com
|
",
"Live.com",
"Sina.com.cn",
"Linkedin.com",
"Yahoo.co.jp",
"Weibo.com",
"Ebay.com",
"Google.co.jp",
"Yandex.ru",
"Blogspot.com",
"Vk.com",
"Hao123.com",
"T.co",
"Bing.com",
"Google.de"]
ip_addresses = []
# Open a bunch of TCP connections on port 80 and close them. Wait at most 1 sec
# before timing out. Timing out raises a socket.timeout exception. Catch it and
# proceed.
for site in websites:
try:
sock = socket.create_connection((site, 80),1)
sock.close()
except socket.timeout:
pass
|
benthomasson/django-tastypie-swagger
|
tastypie_swagger/utils.py
|
Python
|
bsd-2-clause
| 499
| 0
|
from urlparse import urljoin
from django.conf import settings
def trailing_slash_or_none():
|
"""
Return a slash or empty string based on tastypie setting
"""
if getattr(settings, 'TASTYPIE_ALLOW_MISSING_SLASH', False):
return ''
return '/'
def urljoin_forced(base, path, **kwargs):
"""
urljoin base with path, except append '/' to base i
|
f it doesnt exist
"""
base = base.endswith('/') and base or '%s/' % base
return urljoin(base, path, **kwargs)
|
felipegerard/arte_mexicano_antiguo
|
felipegerard/entregable/itm/itm/similarity_functions.py
|
Python
|
agpl-3.0
| 629
| 0.041335
|
from collections import defaultdict
# Regresar similitudes de un objeto index en un diccionario
def index2dict(index, file_list, num_sims=5):
file_list = [i.replace('.txt','') for i
|
in file_list]
sims = {} #defaultdict(dict)
for i, idx in enumerate(index):
s = []
for j in range(len(file_list)):
s.append({
'name':file_list[j],
'similarity':float(idx[j])
}) # idx[j] es un numpy.float32 y no es compatible con JSON. Por eso lo hacemos float normal
s = sorted(s, key = lambda item: item['similarity'], reverse=True)[:num
|
_sims]
sims[file_list[i]] = {
i:s[i]
for i in range(len(s))
}
return sims
|
sencha/chromium-spacewalk
|
build/android/gyp/create_device_library_links.py
|
Python
|
bsd-3-clause
| 3,509
| 0.014249
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates symlinks to native libraries for an APK.
The native libraries should have previously been pushed to the device (in
options.target_dir). This script then creates links in an apk's lib/ folder to
those native libraries.
"""
import optparse
import os
import sys
from util import build_device
from util import build_utils
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib.utils import apk_helper
def RunShellCommand(device, cmd):
output = device.RunShellCommand(cmd)
if output:
raise Exception(
'Unexpected output running command: ' + cmd + '\n' +
'\n'.join(output))
def CreateSymlinkScript(options):
libraries = build_utils.ReadJson(options.libraries_json)
link_cmd = (
'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
'$APK_LIBRARIES_DIR/%(lib_basename)s \n'
)
script = '#!/bin/sh \n'
for lib in libraries:
script += link_cmd % { 'lib_basename': lib }
with open(options.script_host_path, 'w') as scriptfile:
scriptfile.write(script)
def TriggerSymlinkScript(options):
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
apk_package = apk_helper.GetPackageName(options.apk)
apk_libraries_dir = '/data/data/%s/lib' % apk_package
device_dir = os.path.dirname(options.script_device_path)
mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
{ 'dir': device_dir })
RunShellCommand(device, mkdir_cmd)
device.PushChangedFiles(options.script_host_path, options.script_device_path)
trigger_cmd = (
'APK_LI
|
BRARIES_DIR=%(apk_libraries_dir)s; '
'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
'. %(script_device_path)s'
) % {
'apk_libraries_dir': apk_libraries_dir,
'target_dir': options.target_dir,
'script_devic
|
e_path': options.script_device_path
}
RunShellCommand(device, trigger_cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('--apk', help='Path to the apk.')
parser.add_option('--script-host-path',
help='Path on the host for the symlink script.')
parser.add_option('--script-device-path',
help='Path on the device to push the created symlink script.')
parser.add_option('--libraries-json',
help='Path to the json list of native libraries.')
parser.add_option('--target-dir',
help='Device directory that contains the target libraries for symlinks.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args()
required_options = ['apk', 'libraries_json', 'script_host_path',
'script_device_path', 'target_dir', 'configuration_name']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
CreateSymlinkScript(options)
TriggerSymlinkScript(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
theofilis/elasticsearch-engine
|
elasticsearch_engine/manager.py
|
Python
|
gpl-2.0
| 20,982
| 0.001716
|
from django.db import connections
from django.db.models.manager import Manager as DJManager
from django.db.utils import DatabaseError
from bson.objectid import ObjectId
import re
from .utils import dict_keys_to_str
try:
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
except ImportError:
class ObjectDoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
DoesNotExist = ObjectDoesNotExist
__all__ = ['queryset_manager', 'Q', 'InvalidQueryError',
'InvalidCollectionError']
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
class InvalidCollectionError(Exception):
pass
RE_TYPE = type(re.compile(''))
class InternalMetadata:
def __init__(self, meta):
self.object_name = meta["object_name"]
class InternalModel:
"""
An internal queryset model to be embedded in a query set for django compatibility.
"""
def __init__(self, document):
self.document = document
self._meta = InternalMetadata(document._meta)
self.DoesNotExist = ObjectDoesNotExist
class QuerySet(object):
"""
A set of results returned from a query. Wraps a ES cursor,
providing :class:`~mongoengine.Document` objects as the results.
"""
def __init__(self, document, collection):
self._document = document
self._collection_obj = collection
self._accessed_collection = False
self._query = {}
self._where_clause = None
self._loaded_fields = []
self._ordering = []
self.transform = None
# If inheritance is allowed, only return instances and instances of
# subclasses of the class being used
# if document._meta.get('allow_inheritance'):
# self._query = {'_types': self._document._class_name}
self._cursor_obj = None
self._limit = None
self._skip = None
# required for compatibility with django
# self.model = InternalModel(document)
def __call__(self, q_obj=None, **query):
"""Filter the selected documents by calling the
:class:`~mongoengine.queryset.QuerySet` with a query.
:param q_obj: a :class:`~mongoengine.queryset.Q` object to be used in
the query; the :class:`~mongoengine.queryset.QuerySet` is filtered
multiple times with different :class:`~mongoengine.queryset.Q`
objects, only the last one will be used
:param query: Django-style query keyword arguments
"""
if q_obj:
self._where_clause = q_obj.as_js(self._document)
query = QuerySet._transform_query(_doc_cls=self._document, **query)
self._query.update(query)
return self
def filter(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__(*q_objs, **query)
def find(self, query):
self._query.update(self.transform.transform_incoming(query, self._collection))
return self
def exclude(self, *q_objs, **query):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
query["not"] = True
return self.__call__(*q_objs, **query)
def all(self):
"""An alias of :meth:`~mongoengine.queryset.QuerySet.__call__`
"""
return self.__call__()
def distinct(self, *args, **kwargs):
"""
Distinct method
"""
return self._cursor.distinct(*args, **kwargs)
@property
def _collection(self):
"""Property that returns the collection object. This allows us to
perform operations only if the collection is accessed.
"""
return self._collection_obj
def values(self, *args):
return (args and [dict(zip(args, [getattr(doc, key) for key in args])) for doc in self]) or [obj for obj in
self._cursor.clone()]
def values_list(self, *args, **kwargs):
flat = kwargs.pop("flat", False)
if flat and len(args) != 1:
raise Exception("args len must be 1 when flat=True")
return (flat and self.distinct(args[0] if not args[0] in ["id", "pk"] else "_id")) or zip(
*[self.distinct(field if field not in ["id", "pk"] else "_id") for field in args])
@property
def _cursor(self):
if self._cursor_obj is None:
cursor_args = {}
if self._loaded_fields:
cursor_args = {'fields': self._loaded_fields}
self._cursor_obj = self._collection.find(self._query,
**cursor_args)
# Apply where clauses to cursor
if self._where_clause:
self._cursor_obj.where(self._where_clause)
# apply default ordering
# if self._document._meta['ordering']:
|
# self.order_by(*self._document._meta
|
['ordering'])
return self._cursor_obj.clone()
@classmethod
def _lookup_field(cls, document, fields):
"""
Looks for "field" in "document"
"""
if isinstance(fields, (tuple, list)):
return [document._meta.get_field_by_name((field == "pk" and "id") or field)[0] for field in fields]
return document._meta.get_field_by_name((fields == "pk" and "id") or fields)[0]
@classmethod
def _translate_field_name(cls, doc_cls, field, sep='.'):
"""Translate a field attribute name to a database field name.
"""
parts = field.split(sep)
parts = [f.attname for f in QuerySet._lookup_field(doc_cls, parts)]
return '.'.join(parts)
@classmethod
def _transform_query(self, _doc_cls=None, **parameters):
"""
Converts parameters to elasticsearch queries.
"""
spec = {}
operators = ['ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod', 'all', 'size', 'exists']
match_operators = ['contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact',
'iexact']
exclude = parameters.pop("not", False)
for key, value in parameters.items():
parts = key.split("__")
lookup_type = (len(parts) >= 2) and ( parts[-1] in operators + match_operators and parts.pop()) or ""
# Let's get the right field and be sure that it exists
parts[0] = QuerySet._lookup_field(_doc_cls, parts[0]).attname
if not lookup_type and len(parts) == 1:
if exclude:
value = {"$ne": value}
spec.update({parts[0]: value})
continue
if parts[0] == "id":
parts[0] = "_id"
value = [isinstance(par, basestring) or par for par in value]
if lookup_type in ['contains', 'icontains',
'startswith', 'istartswith',
'endswith', 'iendswith',
'exact', 'iexact']:
flags = 0
if lookup_type.startswith('i'):
flags = re.IGNORECASE
lookup_type = lookup_type.lstrip('i')
regex = r'%s'
if lookup_type == 'startswith':
regex = r'^%s'
elif lookup_type == 'endswith':
regex = r'%s$'
elif lookup_type == 'exact':
regex = r'^%s$'
value = re.compile(regex % value, flags)
elif lookup_type in operators:
value = {"$" + lookup_type: value}
elif lookup_type and len(parts) == 1:
raise DatabaseError("Unsupported lookup type: %r" % lookup_type)
key = '.'.join(parts)
if exclude:
value = {"$ne": value}
spec.update({key: value})
return spec
def get(se
|
gborri/SickRage
|
tests/test_xem.py
|
Python
|
gpl-3.0
| 2,900
| 0.01
|
#!/usr/bin/env python2.7
# Author: echel0n <echel0n@sickrage.ca>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import unittest
import sickrage
from sickrage.core.tv.show import TVShow
from tests import SiCKRAGETestDBCase
class XEMBasicTests(SiCKRAGETestDBCase):
def loadShowsFromDB(self):
"""
Populates the showList with shows from the database
"""
for s in [s['doc'] for s in sickrage.app.main_db.db.all('tv_shows', with_doc=True)]:
try:
curShow = TVShow(int(s["indexer"]), int(s["indexer_id"]))
curShow.saveToDB()
curShow.loadFromDB(skipNFO=True)
sickrage.app.showlist.append
|
(curShow)
except Exception:
|
pass
def loadFromDB(self):
"""
Populates the showList with shows from the database
"""
for s in [s['doc'] for s in sickrage.app.main_db.db.all('tv_shows', with_doc=True)]:
try:
curShow = TVShow(int(s["indexer"]), int(s["indexer_id"]))
curShow.saveToDB()
curShow.loadFromDB(skipNFO=True)
sickrage.app.showlist.append(curShow)
except Exception as e:
print "There was an error creating the show"
def test_formating(self):
name = "Game.of.Thrones.S03.720p.HDTV.x264-CtrlHD"
release = "Game of Thrones"
# m = re.match('(?P<ep_ab_num>(?>\d{1,3})(?![ip])).+', name)
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(release))
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}\W+[a-zA-Z]{3,}\W+\d{4}.+))'
# print(u"Checking if show " + name + " matches " + curRegex)
match = re.search(curRegex, name, re.I)
# if match:
# print(u"Matched " + curRegex + " to " + name)
if __name__ == "__main__":
print "=================="
print "STARTING - XEM SCENE NUMBERING TESTS"
print "=================="
print "######################################################################"
unittest.main()
|
whutch/cwmud
|
cwmud/core/commands/communication/__init__.py
|
Python
|
mit
| 246
| 0
|
# -*- coding: utf-8 -*-
"""Communication commands package."""
# Part of Clockw
|
ork MUD Server (https://github.com/whutch/cwmud)
#
|
:copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
|
superdesk/superdesk-core
|
apps/archive/news.py
|
Python
|
agpl-3.0
| 767
| 0
|
"""
News resource
=============
It is an alias for archive without filtering out published items.
"""
from superdesk.resource import build_custom_hateoas
from apps.archive.archive import ArchiveResource, ArchiveService
from apps.ar
|
chive.common import CUSTOM_HATEOAS
class NewsResource(ArchiveResource):
datasource = ArchiveResource.datasource.copy()
datasource.update(
{
"source": "archive",
"elasti
|
c_filter": {"bool": {"must_not": {"term": {"version": 0}}}},
}
)
resource_methods = ["GET"]
item_methods = []
class NewsService(ArchiveService):
def enhance_items(self, items):
super().enhance_items(items)
for item in items:
build_custom_hateoas(CUSTOM_HATEOAS, item)
|
tangyanhan/homesite
|
manage_videos/urls.py
|
Python
|
mit
| 370
| 0.016216
|
from django.conf.urls import url
from . import views
app_name='manage'
urlpatterns = [
url(r'^index/$', views.index, name='index'),
url(r'^db/(\w+)/$', view
|
s.db, name='db'),
|
url(r'^import/index/$', views.import_index, name='import'),
url(r'^import/dir/$', views.import_dir, name='import-dir'),
url(r'^import/status/', views.import_status, name='import-status')
]
|
alxgu/ansible
|
lib/ansible/plugins/lookup/passwordstore.py
|
Python
|
gpl-3.0
| 11,134
| 0.003413
|
# (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: passwordstore
version_added: "2.3"
author:
- Patrick Deelman <patrick@patrickdeelman.nl>
short_description: manage passwords with passwordstore.org's pass utility
description:
- Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
It also retrieves YAML style keys stored as multilines in the passwordfile.
options:
_terms:
description: query key
required: True
passwordstore:
description: location of the password store
default: '~/.password-store'
directory:
description: The directory of the password store.
env:
- name: PASSWORD_STORE_DIR
create:
description: Create the password if it does not already exist.
type: bool
default: 'no'
overwrite:
description: Overwrite the password if it does already exist.
type: bool
default: 'no'
returnall:
description: Return all the content of the password, not only the first line.
type: bool
default: 'no'
subkey:
description: Return a specific subkey of the password. When set to C(password), always returns the first line.
default: password
userpass:
description: Specify a password to save, instead of a generated one.
length:
description: The length of the generated password
type: integer
default: 16
backup:
description: Used with C(overwrite=yes). Backup the previous password in a subkey.
type: bool
default: 'no'
version_added: 2.7
nosymbols:
description: use alphanumeric characters
type: bool
default: 'no'
version_added: 2.8
"""
EXAMPLES = """
# Debug is used for examples, BAD IDEA to show passwords on screen
- name: Basic lookup. Fails if example/test doesn't exist
debug:
msg: "{{ lookup('passwordstore', 'example/test')}}"
- name: Create pass with random 16 character password. If password exists just give the password
debug:
var: mypassword
vars:
mypassword: "{{ lookup('passwordstore', 'example/test create=true')}}"
- name: Different size password
debug:
msg: "{{ lookup('passwordstore', 'example/test create=true length=42')}}"
- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
debug:
msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}"
- name: Create an alphanumeric password
debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true) }}"
- name: Return the value for user in the KV pair user, username
debug:
msg: "{{ lookup('passwordstore', 'example/test subkey=user')}}"
- name: Return the entire password file content
set_fact:
passfilecontent: "{{ lookup('passwordstore', 'example/test returnall=true')}}"
"""
RETURN = """
_raw:
description:
- a password
"""
import os
import subprocess
import time
from distutils import util
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.encrypt import random_password
from ansible.plugins.lookup import LookupBase
from ansible import constants as C
# backhacked check_output with input for python 2.7
# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
def check_output2(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
del kwargs['input']
kwargs['stdin'] = subprocess.PIPE
else:
b_inputdata = None
process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
try:
b_out, b_err = process.communicate(b_inputdata)
except Exception:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode != 0 or \
b'encryption failed: Unusable public key' in b_out or \
b'encryption failed: Unusable public key' in b_err:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(
retcode,
cmd,
to_native(b_out + b_err, errors='surrogate_or_strict')
)
return b_out
class LookupModule(LookupBase):
def parse_params(self, term):
# I went with the "traditional" param followed with space separated KV pairs.
# Waiting for final implementation of lookup parameter parsing.
# See: https://github.com/ansible/ansible/issues/12255
params = term.split()
if len(params) > 0:
# the first param is the pass-name
self.passname = params[0]
# next parse the optional parameters in keyvalue pairs
try:
for param in params[1:]:
name, value = param.split('=')
if name not in self.paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
self.paramvals[name
|
] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
# check and convert values
try:
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
if not isinstance(self.paramvals[key], bool):
self.paramvals[key] = util.strtobool(self.paramvals[key])
except (ValueError, AssertionError
|
) as e:
raise AnsibleError(e)
if not isinstance(self.paramvals['length'], int):
if self.paramvals['length'].isdigit():
self.paramvals['length'] = int(self.paramvals['length'])
else:
raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
# Set PASSWORD_STORE_DIR if directory is set
if self.paramvals['directory']:
if os.path.isdir(self.paramvals['directory']):
os.environ['PASSWORD_STORE_DIR'] = self.paramvals['directory']
else:
raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
def check_pass(self):
try:
self.passoutput = to_text(
check_output2(["pass", self.passname]),
errors='surrogate_or_strict'
).splitlines()
self.password = self.passoutput[0]
self.passdict = {}
for line in self.passoutput[1:]:
if ':' in line:
name, value = line.split(':', 1)
self.passdict[name.strip()] = value.strip()
except (subprocess.CalledProcessError) as e:
if e.returncode == 1 and 'not in the password store' in e.output:
# if pass returns 1 and return string contains 'is not in the password store.'
# We need to determine if this is valid or Error.
if not self.paramvals['create']:
raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
else:
return False
else:
raise AnsibleError(e)
return True
def get_newpass(self):
if self.paramvals['nosymbols']:
|
cortedeltimo/SickRage
|
sickbeard/clients/transmission_client.py
|
Python
|
gpl-3.0
| 5,187
| 0.00135
|
# coding=utf-8
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import
|
re
from base64 import b64encode
import sickbeard
from sickbeard.clients.generic import GenericClient
class TransmissionAPI(GenericClient):
def __init__(self, host=None, username=None, passw
|
ord=None):
super(TransmissionAPI, self).__init__('Transmission', host, username, password)
self.url = '/'.join((self.host.rstrip('/'), sickbeard.TORRENT_RPCURL.strip('/'), 'rpc'))
def _get_auth(self):
post_data = json.dumps({'method': 'session-get', })
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120,
verify=sickbeard.TORRENT_VERIFY_CERT)
self.auth = re.search(r'X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1)
except Exception:
return None
self.session.headers.update({'x-transmission-session-id': self.auth})
# Validating Transmission authorization
post_data = json.dumps({'arguments': {},
'method': 'session-get'})
self._request(method='post', data=post_data)
return self.auth
def _add_torrent_uri(self, result):
arguments = {
'filename': result.url,
'paused': int(sickbeard.TORRENT_PAUSED)
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH + "/" + result.show.name + "/"
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _add_torrent_file(self, result):
arguments = {
'metainfo': b64encode(result.content),
'paused': 1 if sickbeard.TORRENT_PAUSED else 0
}
if sickbeard.TORRENT_PATH:
arguments['download-dir'] = sickbeard.TORRENT_PATH + "/" + result.show.name + "/"
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
mode = 0
if ratio:
if float(ratio) == -1:
ratio = 0
mode = 2
elif float(ratio) >= 0:
ratio = float(ratio)
mode = 1 # Stop seeding at seedRatioLimit
arguments = {'ids': [result.hash],
'seedRatioLimit': ratio,
'seedRatioMode': mode}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_seed_time(self, result):
if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1:
time = int(60 * float(sickbeard.TORRENT_SEED_TIME))
arguments = {'ids': [result.hash],
'seedIdleLimit': time,
'seedIdleMode': 1}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
else:
return True
def _set_torrent_priority(self, result):
arguments = {'ids': [result.hash]}
if result.priority == -1:
arguments['priority-low'] = []
elif result.priority == 1:
# set high priority for all files in torrent
arguments['priority-high'] = []
# move torrent to the top if the queue
arguments['queuePosition'] = 0
if sickbeard.TORRENT_HIGH_BANDWIDTH:
arguments['bandwidthPriority'] = 1
else:
arguments['priority-normal'] = []
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set'})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
api = TransmissionAPI()
|
opennetworkinglab/spring-open
|
scripts/perf-scripts/generate_flows.py
|
Python
|
apache-2.0
| 2,622
| 0.017162
|
#! /usr/bin/env python
# -*- Mode: python; py-indent-offset: 4; tab-width: 8; indent-tabs-mode: t; -*-
#
# A script for generating a number of flows.
#
# The output of the script should be saved to a file, and the flows from
# that file should be added by the following command:
#
# web/add_flow.py -f filename
#
# NOTE: Currently, some of the parameters fo the flows are hard-coded,
# and all flows are between same source and destination DPID and ports
# (differentiated by different matchSrcMac and matchDstMac).
#
import copy
import pprint
import os
import sys
import subprocess
import json
import argparse
import io
import time
## Global Var ##
DEBUG=0
pp =
|
pprint.PrettyPrinter(indent=4)
## Worker Functions ##
def log_error(txt):
print '%s' % (txt)
def debug(txt):
if DEBUG:
print '%s' % (txt)
if __name__ == "__main__":
usage_msg = "Generate a number of flows by using a
|
pre-defined template.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "NOTE: This script is work-in-progress. Currently all flows are within same\n"
usage_msg = usage_msg + "pair of switch ports and contain auto-generated MAC-based matching conditions.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "Usage: %s <begin-flow-id> <end-flow-id>\n" % (sys.argv[0])
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + " The output should be saved to a file, and the flows should be installed\n"
usage_msg = usage_msg + " by using the command './add_flow.py -f filename'\n"
# app.debug = False;
# Usage info
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print(usage_msg)
exit(0)
# Check arguments
if len(sys.argv) < 3:
log_error(usage_msg)
exit(1)
# Extract the arguments
begin_flow_id = int(sys.argv[1], 0)
end_flow_id = int(sys.argv[2], 0)
if begin_flow_id > end_flow_id:
log_error(usage_msg)
exit(1)
#
# Do the work
#
# NOTE: Currently, up to 65536 flows are supported.
# More flows can be supported by iterating by, say, iterating over some of
# the other bytes of the autogenereated source/destination MAC addresses.
#
flow_id = begin_flow_id
idx = 0
while flow_id <= end_flow_id:
mac3 = idx / 255
mac4 = idx % 255
str_mac3 = "%0.2x" % mac3
str_mac4 = "%0.2x" % mac4
src_mac = "00:00:" + str_mac3 + ":" + str_mac4 + ":00:00";
dst_mac = "00:01:" + str_mac3 + ":" + str_mac4 + ":00:00";
print "%s FOOBAR 00:00:00:00:00:00:00:01 1 00:00:00:00:00:00:00:01 2 matchSrcMac %s matchDstMac %s" % (flow_id, src_mac, dst_mac)
flow_id = flow_id + 1
idx = idx + 1
|
jmesteve/saas3
|
openerp/addons/hr_contract/hr_contract.py
|
Python
|
agpl-3.0
| 4,997
| 0.005803
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth', size=30),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle', size=64),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id':fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', size=32, required=True),
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', size=64, required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', size=256, required=False, readonly=False),
'visa_no': fields.char('Visa No', size=64, required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
return {'value': {'job_id': job_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['da
|
te_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
# vim:expandtab:smartindent:tabstop=4:s
|
ofttabstop=4:shiftwidth=4:
|
nidhididi/CloudBot
|
cloudbot/event.py
|
Python
|
gpl-3.0
| 15,182
| 0.003557
|
import asyncio
import enum
import logging
import concurrent.futures
logger = logging.getLogger("cloudbot")
@enum.unique
class EventType(enum.Enum):
message = 0
action = 1
# TODO: Do we actually want to have a 'notice' event type? Should the NOTICE command be a 'message' type?
notice = 2
join = 3
part = 4
kick = 5
other = 6
class Event:
"""
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type type: EventType
:type content: str
:type target: str
:type chan: str
:type nick: str
:type user: str
:type host: str
:type mask: str
:type db: sqlalchemy.orm.Session
:type db_executor: concurrent.futures.ThreadPoolExecutor
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: str
:type irc_ctcp_text: str
"""
def __init__(self, *, bot=None, hook=None, conn=None, base_event=None, event_type=EventType.other, content=None,
target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None, irc_prefix=None,
irc_command=None, irc_paramlist=None, irc_ctcp_text=None):
"""
All of these parameters except for `bot` and `hook` are optional.
The irc_* parameters should only be specified for IRC events.
Note that the `bot` argument may
|
be left out if you specify a `base_event`.
:param bot: The CloudBot instance this event was triggered from
:param conn: The Client instance this event was triggered from
:param hook: The hook this event will be passed to
:param base_event: The base event that this event is based on. If this parameter is not None, then nick, user,
host, mask, and irc_*
|
arguments are ignored
:param event_type: The type of the event
:param content: The content of the message, or the reason for an join or part
:param target: The target of the action, for example the user being kicked, or invited
:param channel: The channel that this action took place in
:param nick: The nickname of the sender that triggered this event
:param user: The user of the sender that triggered this event
:param host: The host of the sender that triggered this event
:param mask: The mask of the sender that triggered this event (nick!user@host)
:param irc_raw: The raw IRC line
:param irc_prefix: The raw IRC prefix
:param irc_command: The IRC command
:param irc_paramlist: The list of params for the IRC command. If the last param is a content param, the ':'
should be removed from the front.
:param irc_ctcp_text: CTCP text if this message is a CTCP command
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type base_event: cloudbot.event.Event
:type content: str
:type target: str
:type event_type: EventType
:type nick: str
:type user: str
:type host: str
:type mask: str
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: list[str]
:type irc_ctcp_text: str
"""
self.db = None
self.db_executor = None
self.bot = bot
self.conn = conn
self.hook = hook
if base_event is not None:
# We're copying an event, so inherit values
if self.bot is None and base_event.bot is not None:
self.bot = base_event.bot
if self.conn is None and base_event.conn is not None:
self.conn = base_event.conn
if self.hook is None and base_event.hook is not None:
self.hook = base_event.hook
# If base_event is provided, don't check these parameters, just inherit
self.type = base_event.type
self.content = base_event.content
self.target = base_event.target
self.chan = base_event.chan
self.nick = base_event.nick
self.user = base_event.user
self.host = base_event.host
self.mask = base_event.mask
# clients-specific parameters
self.irc_raw = base_event.irc_raw
self.irc_prefix = base_event.irc_prefix
self.irc_command = base_event.irc_command
self.irc_paramlist = base_event.irc_paramlist
self.irc_ctcp_text = base_event.irc_ctcp_text
else:
# Since base_event wasn't provided, we can take these parameters
self.type = event_type
self.content = content
self.target = target
self.chan = channel
self.nick = nick
self.user = user
self.host = host
self.mask = mask
# clients-specific parameters
self.irc_raw = irc_raw
self.irc_prefix = irc_prefix
self.irc_command = irc_command
self.irc_paramlist = irc_paramlist
self.irc_ctcp_text = irc_ctcp_text
@asyncio.coroutine
def prepare(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes a database object on this event, if the hook requires it.
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use prepare_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
logger.debug("Opening database session for {}:threaded=False".format(self.hook.description))
# we're running a coroutine hook with a db, so initialise an executor pool
self.db_executor = concurrent.futures.ThreadPoolExecutor(1)
# be sure to initialize the db in the database executor, so it will be accessible in that thread.
self.db = yield from self.async(self.bot.db_session)
def prepare_threaded(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes the database object on this event, if the hook requires it.
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use prepare.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
logger.debug("Opening database session for {}:threaded=True".format(self.hook.description))
self.db = self.bot.db_session()
@asyncio.coroutine
def close(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use close_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
logger.debug("Closing database session for {}:threaded=False".format(self.hook.description))
# be sure the close the database in the database executor, as it is only accessable in that one thread
yield from self.async(self.db.close)
self.db = None
def close_threaded(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use close.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
logger.debug
|
scompo/money
|
money/money.py
|
Python
|
bsd-3-clause
| 3,277
| 0.01007
|
from time import localtime, gmtime, strftime, strptime
from os.path import expanduser, join
from pprint import pprint
from decimal import *
def scrivi_movimento(path, m):
with open(path, 'a') as f:
f.write(m['tipo'] + m['valore'])
f.write(';')
f.write(m['data'])
f.write(';')
f.write(m['ora'])
f.write(';')
f.write(m['descrizione'])
f.write('\n')
return
def leggi_tipo():
t = 'n'
while not (t == '' or t == '+' or t == '-'):
t = input('tipo (+/-) [-]: ')
if t == '':
t='-'
elif t == '+':
t=''
return t
def leggi_valore():
v = ''
while v == '':
v = input('valore (#####.##) []: ')
return v
def leggi_data():
d = input('data (DD/MM/YYYY
|
) [oggi]: ')
if d == '':
d
|
= strftime("%d/%m/%Y", localtime())
return d
def leggi_ora():
o = input('ora (HH:MM) [adesso]: ')
if o == '':
o = strftime('%H:%M', localtime())
return o
def leggi_descrizione():
d = input('descrizione () []: ')
return d
def leggi_movimento():
tipo = leggi_tipo()
valore = leggi_valore()
data = leggi_data()
ora = leggi_ora()
descrizione = leggi_descrizione()
m = {
'tipo' : tipo,
'valore' : valore,
'data' : data,
'ora' : ora,
'descrizione': descrizione
}
return m
def get_file_dati():
home = expanduser('~')
nome_file_dati = 'movimenti.dat'
file_dati = join(home, 'dati', nome_file_dati)
print('file dati:', file_dati)
return file_dati
def carica_file(f):
dati = []
with open(f, "r") as df:
for l in df:
spl = l.split(';')
d = {
'valore' : spl[0],
'data' : spl[1],
'ora' : spl[2],
'descrizione' : spl[3]
}
dati.append(d)
return dati
def inserimento(file_dati):
m = leggi_movimento()
scrivi_movimento(file_dati, m)
def inserimento_dati():
file_dati = get_file_dati()
inserimento(file_dati)
def riassunto_dati():
file_dati = get_file_dati()
riassunto(file_dati)
def data_default(data):
try:
return strptime(data, '%d/%m/%Y')
except ValueError:
return gmtime(0)
def ora_default(ora):
try:
return strptime(ora, '%H:%M')
except ValueError:
return gmtime(0)
def ordina(dati):
return sorted(
dati,
key = lambda x: (
data_default(x['data']),
ora_default(x['ora'])
),
reverse = True
)
def riassunto(file_dati):
dati = carica_file(file_dati)
dati_ordinati = ordina(dati)
val_attuale = Decimal('0')
spese_tot = Decimal('0')
guadagni_tot = Decimal('0')
for d in dati:
m = Decimal(d['valore'])
val_attuale = val_attuale + m
if m > Decimal('0'):
guadagni_tot = guadagni_tot + m
else:
spese_tot = spese_tot + m
print('valore attuale:', str(val_attuale))
print('guadagni complessivi:', str(guadagni_tot))
print('spese complessive:', str(spese_tot))
print('ultimi 5 movimenti:')
for i in range(5):
if i < len(dati_ordinati):
print(dati_ordinati[i])
|
firstprayer/monsql
|
setup.py
|
Python
|
mit
| 371
| 0.013477
|
from setuptools import setup, find_package
|
s
setup(name='monsql',
version='0.1.7',
packages = find_packages(),
author='firstprayer',
author_email='zhangty10@gmail.com',
description='MonSQL - Mongodb-style way for using mysql.',
url='https://github.com/firstprayer/monsql
|
.git',
install_requires=[
'MySQL-python'
],
)
|
AdmiralenOla/Scoary
|
scoary/vcf2scoary.py
|
Python
|
gpl-3.0
| 8,390
| 0.005364
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Script to search vcf files for mutations within specific coordinates
# Input:
# -A vcf file
#
# Output:
# -A Roary-like file with mutations sorted in rows, strains as columns and presence/absence in cells
# -Columns: Chromosome, Position, variant (eg C->T), type (eg missense, synonymous, frameshift etc)
# Reading VCF
# File metainfo starts as ##key=value
# These are always formed and should be caught
# example ##fileformat=VCFv4.3 - give warning if format is off
# Columns 8 MANDATORY
# CHROM POS ID REF ALT QUAL FILTER INFO
# OPTIONAL COLUMNS
# FORMAT SAMPLE1 SAMPLE2 etc
# All data lines are tab-delimited
# CHROM : string, no whitespace
# POS : integer. Can have many lines with same pos. Pos=0 or N+1 for telomere positions
# ID : semicolon-delimited list of strings
# REF : string, ACGTN (can be multiple)
# ALT : comma-separated list, ACGTN* (* = allele is missing due to overlapping deletion)
# (NOTE: Suggest splitting ALT variants into different lines to preserve binarity)
# QUAL : float
# FILTER : PASS or semicolon-delimited list
# INFO : semicolon-delimited list of key=value pairs or flags
# FORMAT (optional) : colon-delimited list.
# Genotype fields - Genotype always first field
# GT encoded as allele values separated by | or /. 0 = reference. 1 = first ALT. 2 = second alt etc
# NOTE: Haploid calls (bacteria) have only 1 value
# NOTE: / means genotype unphased. | means genotype phased
# INFO field SVtypes : DELetion, INSertion, DUPlication, INVersion, CNV
import sys
import argparse
import os
import csv
import re
import traceback
__version__ = '0.1b'
__author__ = 'Ola Brynildsrud'
__credits = ['Ola Brynildsrud']
__email__ = 'olbb@fhi.no'
def main():
"""
Converts VCF files (version 4.x) to Scoary format
"""
##########################################################################
# Parse command line arguments
parser = argparse.ArgumentParser(
description='This script takes in vcf files and creates a '
'presence/absence matrix of mutations in the '
'Roary/Scoary format',
epilog='by Ola Brynildsrud (olbb@fhi.no)')
parser.add_argument(
'--out',
action='store',
default='./mutations_presence_absence.csv',
help='The path to the output file')
parser.add_argument(
'--types',
action='store',
default='ALL',
help='The types of variants to include in the output. NOTE: This '
'works if TYPE=XX can be found in the INFO column of the vcf '
'file. The special keyword ALL includes all types. This is '
'the default setting. Common types are snp, mnp, ins, del '
'and complex. Give as comma-separated list. '
'Example: --types snp,ins,del')
parser.add_argument(
'--version',
action='version',
version=__version__)
parser.add_argument(
'--force',
action='store_true',
default=False
|
,
help='Force overwriting of output file. (If it already '
'exists)')
parser.add_argument(
'vcf',
action='store',
metavar='<VCF_file>',
help='The VCF file to con
|
vert to Roary/Scoary format')
args = parser.parse_args()
if args.types is not "ALL":
args.types = args.types.split(",")
if os.path.isfile(args.out) and not args.force:
sys.exit("Outfile already exists. Change name of outfile or "
"run with --force")
if not os.path.isfile(args.vcf):
sys.exit("Unable to locate input file %s" % args.vcf)
with open(args.vcf,'rU') as vcffile, open(args.out,'w') as outfile:
lines = csv.reader(vcffile, delimiter='\t', quotechar='"')
metainfo = {"##INFO" : {},
"##FILTER" : {},
"##FORMAT" : {},
"##ALT" : {},
"##contig" : {},
"##META" : {},
"##SAMPLE" : {},
"##PEDIGREE" : {}
}
#for line in lines:
while True:
try:
line = next(lines)
except StopIteration:
print(traceback.print_exc())
sys.exit("ERROR: There appears to be only metainformation "
"(lines starting with ##) in your VCF file.")
# Get metainfo from file
if line[0][:2] == '##':
infoline = re.split('=',line[0], maxsplit=1)
# Capture list output for complex tags
if infoline[0] in metainfo:
ID=re.search(r'ID=(\w+)',infoline[1]).group(1)
infolist = re.split(',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)',infoline[1].strip("<>"))
metainfo[infoline[0]][ID] = {}
# Enter all elements in infolist into appropriate dic
for e in infolist:
esplit = e.split("=")
metainfo[infoline[0]][ID][esplit[0]] = esplit[1]
else:
metainfo[infoline[0]] = infoline[1]
else:
# Have reached the data section of the file
data = {"header": line}
break
try:
vcfversion = metainfo["##fileformat"].split("v")[1]
if int(vcfversion[0]) != 4:
print("WARNING: A VCF format other than 4.x detected."
" File parsing may proceed with errors.")
else:
print("VCF version %s detected" % vcfversion)
except:
print("WARNING: Could not detect VCF format. Expected "
"v4.x. File parsing may proceed with errors.")
print(traceback.print_exc())
# Check that genotype fields have a single allele
if metainfo["##FORMAT"]["GT"]["Number"] != "1":
sys.exit("ERROR: Expected a single allele per genotype. Scoary "
"only works for haploid organisms.")
# Have now caught all metainformation. Now get column information
#header = next(line)
#print header
data["header"] = data["header"][:9] + ["DUMMY"] + data["header"][9:]
outfile.write(','.join('"' + c + '"' for c in data["header"]) + '\n')
while True:
try:
line = next(lines)
except StopIteration:
print("Reached the end of the file")
sys.exit(0)
# Check if line is allowed:
if args.types is not "ALL":
vartype = re.search(r'TYPE=(\w+)',line[7]).group(1)
if vartype not in args.types:
continue
# Split line if ALT contains more than one variant
if "," in line[4]:
orgline = line[:]
alts = line[4].split(",")
c = 1
for a in alts:
newline = orgline[:]
newline[4] = a
# Only get GT
newline[9:] = \
[cell.split(":")[0] for cell in orgline[9:]]
# Fix dummy comparisons
newline[9:] = fixdummy(newline[9:], c)
newline = newline[:9] + ["True"] + newline[9:]
c += 1
writeLine(newline, outfile)
# Genotype fields need to be 0 or 1
# GT is always first in colon-separated list
else:
newline = line[:9] + ["False"] + line[9:]
writeLine(newline, outfile)
def writeLine(line, outfile):
writeline = line[:9] + [cell.split(":")[0] for cell in line[9:]]
outfile.write(','.join('"' + c + '"' for c in writeline) + '\n')
def fixdummy(line,c):
newline = line[:]
try:
for x in range(len(line)):
if line[x] == ".":
# Missing data get entered as reference / no presence
newline[x] = "0"
elif int(line[x]) == c:
newline[x] =
|
reyoung/SlideGen2
|
slidegen2/yaml_formatter.py
|
Python
|
mit
| 933
| 0.001072
|
from yaml import load_all
try:
from yaml import CLoader as Loader
except ImportError:
print("Using pure python YAML loader, it may be slow.")
from yaml import Loader
from iengine import IDocumentFormatter
__author__ = 'reyoung'
class YAMLFormatter(IDocumentFormatter):
def __init__(self, fn=None, content=None):
IDocumentFormatter.__init__(self)
if fn is not None:
with file(fn, "r") as f:
self.__
|
content = load_all(f, Loade
|
r=Loader)
else:
self.__content = load_all(content, Loader=Loader)
def get_command_iterator(self, *args, **kwargs):
for item in self.__content:
yield YAMLFormatter.__process_item(item)
@staticmethod
def __process_item(item):
if isinstance(item, dict) and len(item) == 1:
key = item.iterkeys().__iter__().next()
value = item[key]
return key, value
|
hydroshare/hydroshare
|
hs_access_control/migrations/0033_auto_20220217_2304.py
|
Python
|
bsd-3-clause
| 756
| 0.002646
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-02-17 23:04
from __future__ import unicode_literals
from django.db import migrations, models
import theme
|
.utils
class Migration(migrations.Migration):
dependencies = [
('hs_access_control', '0032_auto_20210607_2027'),
]
operations = [
migrations.AlterField(
model_name='community',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to=theme.utils.get_upload_path_community),
),
migrations.AlterField(
model_name='groupaccess',
name='picture',
field=models.I
|
mageField(blank=True, null=True, upload_to=theme.utils.get_upload_path_group),
),
]
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/alarm_policy/add_alarm_policy_actions.py
|
Python
|
apache-2.0
| 2,067
| 0.000968
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, V
|
ersion
|
2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import json
from qingcloud.cli.iaas_client.actions.base import BaseAction
class AddAlarmPolicyActionsAction(BaseAction):
action = 'AddAlarmPolicyActions'
command = 'add-alarm-policy-actions'
usage = '%(prog)s [-a <alarm_policy>...] [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-a", "--alarm-policy", dest="alarm_policy",
action="store", type=str, default='',
help="the ID of the alarm policy whose rules you want to add.")
parser.add_argument("-A", "--actions", dest="actions",
action="store", type=str, default='',
help="it's a JSON list of actions you want to add.")
@classmethod
def build_directive(cls, options):
if options.alarm_policy == '':
print('error: alarm_policy should be specified.')
return None
if options.actions == '':
print('error: actions should be specified.')
return None
directive = {
"alarm_policy": options.alarm_policy,
"actions": json.loads(options.actions),
}
return directive
|
Ilias95/lib389
|
lib389/__init__.py
|
Python
|
gpl-3.0
| 123,763
| 0.000525
|
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2015 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
"""The lib389 module.
IMPORTANT: Ternary operator syntax is unsupported on RHEL5
x if cond else y #don't!
The lib389 functionalities are split in various classes
defined in brookers.py
TODO: reorganize method parameters according to SimpleLDAPObject
naming: filterstr, attrlist
"""
try:
from subprocess import Popen, PIPE, STDOUT
HASPOPEN = True
except ImportError:
import popen2
HASPOPEN = False
import io
import sys
import os
import stat
import pwd
import grp
import os.path
import base64
import socket
import ldif
import re
import ldap
import ldapurl
import time
import operator
import shutil
import datetime
import logging
import decimal
import glob
import tarfile
import subprocess
import collections
import signal
import errno
from shutil import copy2
try:
# There are too many issues with this on EL7
# Out of the box, it's just outright broken ...
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import six
except ImportError:
pass
from ldap.ldapobject import SimpleLDAPObject
from ldap.cidict import cidict
from ldap import LDAPError
# file in this package
from lib389._constants import *
from lib389.properties import *
from lib389._entry import Entry
from lib389._replication import CSN, RUV
from lib389._ldifconn import LDIFConn
from lib389.tools import DirSrvTools
from lib389.mit_krb5 import MitKrb5
from lib389.utils import (
isLocalHost,
is_a_dn,
normalizeDN,
suffixfilt,
escapeDNValue,
update_newhost_with_fqdn,
formatInfData,
ensure_bytes,
ensure_str)
from lib389.paths import Paths
# mixin
# from lib389.tools import DirSrvTools
from lib389.exceptions import *
MAJOR, MINOR, _, _, _ = sys.version_info
if MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7):
from ldap.controls.simple import GetEffectiveRightsControl
from lib389._controls import DereferenceControl
RE_DBMONATTR = re.compile(r'^([a-zA-Z]+)-([1-9][0-9]*)$')
RE_DBMONATTRSUN = re.compile(r'^([a-zA-Z]+)-([a-zA-Z]+)$')
# This controls pyldap debug levels
TRACE_LEVEL = 0
# My logger
log = logging.getLogger(__name__)
# Initiate the paths object here. Should this be part of the DirSrv class
# for submodules?
def wrapper(f, name):
"""
Wrapper of all superclass methods using lib389.Entry.
@param f - DirSrv method inherited from SimpleLDAPObject
@param name - method to call
This seems to need to be an unbound method, that's why it's outside of
DirSrv. Perhaps there is some way to do this with the new classmethod
or staticmethod of 2.4.
We replace every call to a method in SimpleLDAPObject (the superclass
of DirSrv) with a call to inner. The f argument to wrapper is the bound
method of DirSrv (which is inherited from the superclass). Bound means
that it will implicitly be called with the self argument, it is not in
the args list. name is the name of the method to call. If name is a
method that returns entry objects (e.g. result), we wrap the data returned
by an Entry class. If name is a method that takes an entry argument, we
extract the raw data from the entry object to pass in.
"""
def inner(*args, **kwargs):
if name == 'result':
objtype, data = f(*args, **kwargs)
# data is either a 2-tuple or a list of 2-tuples
# print data
if data:
if isinstance(data, tuple):
return objtype, Entry(data)
elif isinstance(data, list):
# AD sends back these search references
# if objtype == ldap.RES_SEARCH_RESULT and \
# isinstance(data[-1],tuple) and \
# not data[-1][0]:
|
# print "Received search reference: "
# pprint.pprint(data[-1][1])
# data.pop() # remove the last non-entry element
return objtype, [Entry(x) for x in data]
else:
raise TypeError("unknown data type %s returned by result" %
type(data))
else:
return objtype, data
|
elif name.startswith('add'):
# the first arg is self
# the second and third arg are the dn and the data to send
# We need to convert the Entry into the format used by
# python-ldap
ent = args[0]
if isinstance(ent, Entry):
return f(ent.dn, ent.toTupleList(), *args[2:])
else:
return f(*args, **kwargs)
else:
return f(*args, **kwargs)
return inner
def pid_exists(pid):
if pid <= 0:
return False
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
elif err.errno == errno.EPERM:
return True
else:
raise
return True
def pid_from_file(pidfile):
pid = None
try:
with open(pidfile, 'rb') as f:
for line in f.readlines():
try:
pid = int(line.strip())
break
except ValueError:
continue
except IOError:
pass
return pid
def _ds_shutil_copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
This is taken from /usr/lib64/python3.5/shutil.py, but removes the
copystat function at the end. Why? Because in a container without
privileges, we don't have access to set xattr. But copystat attempts to
set the xattr when we are root, which causes the copy to fail. Remove it!
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
_ds_shutil_copytree(srcname, dstname, symlinks, ignore,
copy_function)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
_ds_shutil_copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
return dst
class DirSrv(SimpleLDAPObject, object):
def __initPart2(self):
"""Initialize the DirSrv structure filling various fields, like:
self.errlog -> nsslapd-errorlog
self.accesslog -> nsslapd
|
matrix65537/lab
|
leetcode/permutations/permutation2.py
|
Python
|
mit
| 785
| 0.003822
|
#!/usr/bin/env python
#coding:utf8
class Solution(object):
def permuteUnique(self, nums):
length = len(nums)
if length == 0:
return [[]]
rlists = [[nums[0]]]
for i in range(1, length):
tlists = []
for L in rlists:
|
v = nums[i]
for j in range(i + 1):
lcopy = L[::]
lcopy.insert(j, v)
tlists.append(lcopy)
rlists = tlists
d = {}
for L in
|
rlists:
d[tuple(L)] = True
return map(lambda x: list(x), d.keys())
def main():
nums = [1, 1, 2]
s = Solution()
rlists = s.permuteUnique(nums)
for L in rlists:
print L
if __name__ == '__main__':
main()
|
GluuFederation/community-edition-setup
|
schema/generator.py
|
Python
|
mit
| 8,810
| 0.000908
|
#!/usr/bin/env python3
"""
A Module containing the classes which generate schema files from JSON.
"""
import json
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class SchemaGenerator(object):
def __init__(self, jsontext, header=None):
self.data = json.loads(jsontext)
self.header = header
self.macroMap = {}
self.macroMapIndex = {}
if self.data['oidMacros']:
self.__mapMacros()
def __mapMacros(self):
if not self.data['oidMacros']:
return
macros = self.data['oidMacros']
# Find the root
for mac in macros:
if '.' in macros[mac]:
self.macroMap[mac] = macros[mac]
break
if not self.macroMap:
return
while len(macros) != len(self.macroMap):
for mac in macros:
if ':' not in macros[mac]:
continue
oid = macros[mac]
parent, index = oid.split(':')
if parent in self.macroMap:
self.macroMap[mac] = self.macroMap[parent] + '.' + index
self.macroMapIndex[mac] = 1
def __compare_defs(self, m1, m2):
n1 = int(m1[1].split(':')[1])
n2 = int(m2[1].split(':')[1])
return n1 - n2
def __get_macro_order(self, macros, parent):
children = [(k, v) for k, v in list(macros.items()) if parent in v]
items = [parent]
for k, v in sorted(children, key=cmp_to_key(self.__compare_defs)):
items.extend(self.__get_macro_order(macros, k))
return items
def generate_schema(self):
"""Function that generates the schema and returns it as a string"""
self.outString = ''
self.outString += self.header if self.header else ""
if len(self.outString):
self.outString += "\n"
if len(self.data['oidMacros']) > 0:
macros = self.data['oidMacros']
root = ''
for definition in macros:
if '.' in macros[definition]:
root = definition
break
order = self.__get_macro_order(macros, root)
for oid in order:
self.outString += "objectIdentifier {:15} {}\n".format(
oid, macros[oid])
self.outString += '\n'
for attr in self.data['attributeTypes']:
attr_str = "attributetype ( {} NAME ".format(attr['oid'])
if len(attr['names']) > 1:
namestring = ''
for name in attr['names']:
namestring += "'{}' ".format(name)
attr_str += "( {})".format(namestring)
elif len(attr['names']) == 1:
attr_str += "'{}'".format(attr['names'][0])
else:
print("Invalid attribute data. Doesn't define a name")
if 'desc' in attr:
attr_str += "\n\tDESC '{}'".format(attr['desc'])
if 'equality' in attr:
attr_str += "\n\tEQUALITY {}".format(attr['equality'])
if 'substr' in attr:
attr_str += "\n\tSUBSTR {}".format(attr['substr'])
if 'syntax' in attr:
attr_str += "\n\tSYNTAX {}".format(attr['syntax'])
if 'ordering' in attr:
attr_str += "\n\tORDERING {}".format(attr['ordering'])
if 'x_origin' in attr:
attr_str += "\n\tX-ORIGIN '{}'".format(attr['x_origin'])
attr_str += " )\n\n"
self.outString += attr_str
for obc in self.data['objectClasses']:
obc_str = "objectclass ( {} NAME ".format(obc['oid'])
if len(obc['names']) > 1:
namestring = ''
for name in obc['names']:
namestring += "'{}' ".format(name)
obc_str += "( {})".format(namestring)
elif len(obc['names']) == 1:
obc_str += "'{}'".format(obc['names'][0])
else:
print("Invalid objectclass data. Doesn't define a name")
if 'desc' in obc:
obc_str += "\n\tDESC '{}'".format(obc['desc'])
if 'sup' in obc:
sup = " $ ".join(obc['sup'])
obc_str += "\n\tSUP ( {} )".format(sup)
obc_str += "\n\t{}".format(obc['kind'])
if 'must' in obc:
must = " $ ".join(obc['must'])
obc_str += "\n\tMUST ( {} )".format(must)
if 'may' in obc:
may = " $ ".join(obc['may'])
obc_str += "\n\tMAY ( {} )".format(may)
if 'x_origin' in obc:
obc_str += "\n\tX-ORIGIN '{}'".format(obc['x_origin'])
obc_str += " )\n\n"
self.outString += obc_str
return self.outString.strip()
def _getOID(self, model):
oid = model['oid']
if oid.replace('.','').isdigit():
return oid
oid = self.macroMap[oid] + '.' + str(self.macroMapIndex[oid])
self.macroMapIndex[model['oid']] += 1
return oid
def generate_ldif(self):
"""Function which generates the OpenDJ LDIF format schema string."""
self.outString = ''
self.outString += self.header if self.header else ""
if len(self.outString):
self.outString += "\n"
self.outString += "dn: cn=schema\nobjectClass: top\nobjectClass: " \
+ "ldapSubentry\nobjectCl
|
ass: subschema\ncn: schema\n"
for attr in self.data['attributeTypes']:
attr_str = "attributeTypes: ( {} NAME ".format(self._getOID(attr))
if len(attr['names']) > 1:
namestring = ''
for name in attr['names']:
namestring += "'{}' ".format(name)
attr_str += "( {})".format(namestring)
|
elif len(attr['names']) == 1:
attr_str += "'{}'".format(attr['names'][0])
else:
print("Invalid attribute data. Doesn't define a name")
if 'desc' in attr:
attr_str += "\n DESC '{}'".format(attr['desc'])
if 'equality' in attr:
attr_str += "\n EQUALITY {}".format(attr['equality'])
if 'substr' in attr:
attr_str += "\n SUBSTR {}".format(attr['substr'])
if 'syntax' in attr:
attr_str += "\n SYNTAX {}".format(attr['syntax'])
if 'ordering' in attr:
attr_str += "\n ORDERING {}".format(attr['ordering'])
if 'x_origin' in attr:
attr_str += "\n X-ORIGIN '{}'".format(attr['x_origin'])
attr_str += " )\n"
self.outString += attr_str
for obc in self.data['objectClasses']:
obc_str = "objectClasses: ( {} NAME ".format(self._getOID(obc))
if len(obc['names']) > 1:
namestring = ''
for name in obc['names']:
namestring += "'{}' ".format(name)
obc_str += "( {})".format(namestring)
elif len(obc['names']) == 1:
obc_str += "'{}'".format(obc['names'][0])
else:
print("Invalid objectclass data. Doesn't define a name")
if 'desc' in obc:
obc_str += "\n DESC '{}'".format(obc['desc'])
if 'sup' in obc:
sup = " $ ".join(obc['sup'])
obc_str += "\n SUP ( {} )".format(sup)
obc_st
|
evernym/plenum
|
plenum/server/consensus/ordering_service.py
|
Python
|
apache-2.0
| 109,275
| 0.00205
|
import itertools
import logging
import time
from _sha256 import sha256
from collections import defaultdict, OrderedDict, deque
from functools import partial
from typing import Tuple, List, Set, Optional, Dict, Iterable, Callable
from orderedset._orderedset import OrderedSet
from sortedcontainers import SortedList
from common.exceptions import PlenumValueError, LogicError
from common.serializers.serialization import state_roots_serializer, invalid_index_serializer, serialize_msg_for_signing
from crypto.bls.bls_bft_replica import BlsBftReplica
from plenum.common.config_util import getConfig
from plenum.common.constants import POOL_LEDGER_ID, SEQ_NO_DB_LABEL, AUDIT_LEDGER_ID, TXN_TYPE, \
LAST_SENT_PP_STORE_LABEL, AUDIT_TXN_PP_SEQ_NO, AUDIT_TXN_VIEW_NO, AUDIT_TXN_PRIMARIES, AUDIT_TXN_DIGEST, \
PREPREPARE, PREPARE, COMMIT, DOMAIN_LEDGER_ID, TS_LABEL, AUDIT_TXN_NODE_REG, CONFIG_LEDGER_ID
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.exceptions import SuspiciousNode, InvalidClientMessageException, SuspiciousPrePrepare, \
UnknownIdentifier
from plenum.common.ledger import Ledger
from plenum.common.messages.internal_messages import RequestPropagates, BackupSetupLastOrdered, \
RaisedSuspicion, ViewChangeStarted, NewViewCheckpointsApplied, MissingMessage, CheckpointStabilized, \
ReAppliedInNewView, NewViewAccepted, CatchupCheckpointsApplied, MasterReorderedAfterVC
from plenum.common.messages.node_messages import PrePrepare, Prepare, Commit, Reject, ThreePhaseKey, Ordered, \
OldViewPrePrepareRequest, OldViewPrePrepareReply
from plenum.common.metrics_collector import MetricsName, MetricsCollector, NullMetricsCollector
from plenum.common.request import Request
from plenum.common.router import Subscription
from plenum.common.stashing_router import PROCESS
from pl
|
enum.common.timer import TimerService, RepeatingTimer
from plenum.common.txn_util import get_payload_digest, get_payload_data, get_seq_no, get_txn_time
from plenum.common.types import f
from plenum.common.util import compare_3PC_keys, updat
|
eNamedTuple, SortedDict, getMaxFailures, mostCommonElement, \
get_utc_epoch, max_3PC_key, reasonForClientFromException
from plenum.server.batch_handlers.three_pc_batch import ThreePcBatch
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.batch_id import BatchID
from plenum.server.consensus.metrics_decorator import measure_consensus_time
from plenum.server.consensus.ordering_service_msg_validator import OrderingServiceMsgValidator
from plenum.server.consensus.primary_selector import PrimariesSelector
from plenum.server.consensus.utils import replica_name_to_node_name, get_original_viewno, preprepare_to_batch_id
from plenum.server.replica_helper import PP_APPLY_REJECT_WRONG, PP_APPLY_WRONG_DIGEST, PP_APPLY_WRONG_STATE, \
PP_APPLY_ROOT_HASH_MISMATCH, PP_APPLY_HOOK_ERROR, PP_SUB_SEQ_NO_WRONG, PP_NOT_FINAL, PP_APPLY_AUDIT_HASH_MISMATCH, \
PP_REQUEST_ALREADY_ORDERED, PP_CHECK_NOT_FROM_PRIMARY, PP_CHECK_TO_PRIMARY, PP_CHECK_DUPLICATE, \
PP_CHECK_INCORRECT_POOL_STATE_ROOT, PP_CHECK_OLD, PP_CHECK_REQUEST_NOT_FINALIZED, PP_CHECK_NOT_NEXT, \
PP_CHECK_WRONG_TIME, Stats, OrderedTracker, TPCStat, generateName, PP_WRONG_PRIMARIES
from plenum.server.replica_freshness_checker import FreshnessChecker
from plenum.server.replica_helper import replica_batch_digest
from plenum.server.replica_validator_enums import STASH_VIEW_3PC, STASH_CATCH_UP, STASH_WAITING_FIRST_BATCH_IN_VIEW
from plenum.server.request_handlers.ledgers_freeze.ledger_freeze_helper import StaticLedgersFreezeHelper
from plenum.server.request_managers.write_request_manager import WriteRequestManager
from plenum.server.suspicion_codes import Suspicions
from stp_core.common.log import getlogger
logger = getlogger()
class OrderingService:
def __init__(self,
data: ConsensusSharedData,
timer: TimerService,
bus: InternalBus,
network: ExternalBus,
write_manager: WriteRequestManager,
bls_bft_replica: BlsBftReplica,
freshness_checker: FreshnessChecker,
stasher=None,
get_current_time: Optional[Callable[[], float]] = None,
get_time_for_3pc_batch: Optional[Callable[[], int]] = None,
metrics: MetricsCollector = NullMetricsCollector()):
self.metrics = metrics
self._data = data
self._requests = self._data.requests
self._timer = timer
self._bus = bus
self._network = network
self._write_manager = write_manager
self._name = self._data.name
# TODO: We shouldn't use get_utc_epoch here, time needs to be under full control through TimerService
self.get_time_for_3pc_batch = get_time_for_3pc_batch or get_utc_epoch
# Flag which node set, when it have set new primaries and need to send batch
self.primaries_batch_needed = False
self._config = getConfig()
# TODO: Change just to self._stasher = stasher
self._stasher = stasher
self._subscription = Subscription()
self._validator = OrderingServiceMsgValidator(self._data)
self.get_current_time = get_current_time or self._timer.get_current_time
self._out_of_order_repeater = RepeatingTimer(self._timer,
self._config.PROCESS_STASHED_OUT_OF_ORDER_COMMITS_INTERVAL,
self._process_stashed_out_of_order_commits,
active=False)
"""
Maps from legacy replica code
"""
self._state_root_serializer = state_roots_serializer
# Keeps a map of PRE-PREPAREs which did not satisfy timestamp
# criteria, they can be accepted if >f PREPAREs are encountered.
# This is emptied on view change. With each PRE-PREPARE, a flag is
# stored which indicates whether there are sufficient acceptable
# PREPAREs or not
self.pre_prepares_stashed_for_incorrect_time = {}
# Time of the last PRE-PREPARE which satisfied all validation rules
# (time, digest, roots were all correct). This time is not to be
# reverted even if the PRE-PREPAREs are not ordered. This implies that
# the next primary would have seen all accepted PRE-PREPAREs or another
# view change will happen
self.last_accepted_pre_prepare_time = None
# PRE-PREPAREs timestamps stored by non primary replica to check
# obsolescence of incoming PrePrepares. Pre-prepares with the same
# 3PC key are not merged since we need to keep incoming timestamps
# for each new PP from every nodes separately.
# Dictionary:
# key: Tuple[pp.viewNo, pp.seqNo]
# value: Dict[Tuple[PrePrepare, sender], timestamp]
self.pre_prepare_tss = defaultdict(dict)
# PRE-PREPAREs that are waiting to be processed but do not have the
# corresponding request finalised. Happens when replica has not been
# forwarded the request by the node but is getting 3 phase messages.
# The value is a list since a malicious entry might send PRE-PREPARE
# with a different digest and since we dont have the request finalised
# yet, we store all PRE-PPREPAREs
# type: List[Tuple[PrePrepare, str, Set[Tuple[str, int]]]]
self.prePreparesPendingFinReqs = []
# PrePrepares waiting for previous PrePrepares, key being tuple of view
# number and pre-prepare sequence numbers and value being tuple of
# PrePrepare and sender
# TODO: Since pp_seq_no will start from 1 in each view, the comparator
# of SortedDict needs to change
self.prePreparesPendingPrevPP = SortedDict(lambda k: (k[0], k[1]))
# PREPAREs that are stored by non primary replica for which it has not
# got any PRE-PREPARE. Dictionary that stores a tuple of view no and
# prepare sequence number as key and a deque o
|
orbitfp7/horizon
|
openstack_dashboard/test/api_tests/network_tests.py
|
Python
|
apache-2.0
| 34,720
| 0
|
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, W
|
ITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import uuid
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from novaclient.v1_1 import floating_
|
ip_pools
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class NetworkClientTestCase(test.APITestCase):
def test_networkclient_no_neutron(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(False)
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.nova.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager)
def test_networkclient_neutron(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.neutronclient = self.stub_neutronclient()
self.neutronclient.list_extensions() \
.AndReturn({'extensions': self.api_extensions.list()})
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.neutron.SecurityGroupManager)
def test_networkclient_neutron_with_nova_security_group(self):
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(True)
self.neutronclient = self.stub_neutronclient()
self.neutronclient.list_extensions().AndReturn({'extensions': []})
self.mox.ReplayAll()
nc = api.network.NetworkClient(self.request)
self.assertIsInstance(nc.floating_ips, api.neutron.FloatingIpManager)
self.assertIsInstance(nc.secgroups, api.nova.SecurityGroupManager)
class NetworkApiNovaTestBase(test.APITestCase):
def setUp(self):
super(NetworkApiNovaTestBase, self).setUp()
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.AndReturn(False)
class NetworkApiNovaSecurityGroupTests(NetworkApiNovaTestBase):
def test_server_update_security_groups(self):
all_secgroups = self.security_groups.list()
added_secgroup = all_secgroups[2]
rm_secgroup = all_secgroups[0]
cur_secgroups_raw = [{'id': sg.id, 'name': sg.name,
'rules': []}
for sg in all_secgroups[0:2]]
cur_secgroups_ret = {'security_groups': cur_secgroups_raw}
new_sg_ids = [sg.id for sg in all_secgroups[1:3]]
instance_id = self.servers.first().id
novaclient = self.stub_novaclient()
novaclient.security_groups = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.client = self.mox.CreateMockAnything()
novaclient.security_groups.list().AndReturn(all_secgroups)
url = '/servers/%s/os-security-groups' % instance_id
novaclient.client.get(url).AndReturn((200, cur_secgroups_ret))
novaclient.servers.add_security_group(instance_id, added_secgroup.name)
novaclient.servers.remove_security_group(instance_id, rm_secgroup.name)
self.mox.ReplayAll()
api.network.server_update_security_groups(
self.request, instance_id, new_sg_ids)
class NetworkApiNovaFloatingIpTests(NetworkApiNovaTestBase):
def test_floating_ip_pools_list(self):
pool_names = ['pool1', 'pool2']
pools = [floating_ip_pools.FloatingIPPool(
None, {'name': pool}) for pool in pool_names]
novaclient = self.stub_novaclient()
novaclient.floating_ip_pools = self.mox.CreateMockAnything()
novaclient.floating_ip_pools.list().AndReturn(pools)
self.mox.ReplayAll()
ret = api.network.floating_ip_pools_list(self.request)
self.assertEqual(pool_names, [p.name for p in ret])
def test_floating_ip_list(self):
fips = self.api_floating_ips.list()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(fips)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_list(self.request)
for r, e in zip(ret, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(e, attr), getattr(r, attr))
self.assertEqual(e.instance_id, r.port_id)
exp_instance_type = 'compute' if e.instance_id else None
self.assertEqual(exp_instance_type, r.instance_type)
def test_floating_ip_get(self):
fip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(fip.id).AndReturn(fip)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_get(self.request, fip.id)
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(fip, attr), getattr(ret, attr))
self.assertEqual(fip.instance_id, ret.port_id)
self.assertEqual(fip.instance_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
def test_floating_ip_allocate(self):
pool_name = 'fip_pool'
fip = [fip for fip in self.api_floating_ips.list()
if not fip.instance_id][0]
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create(pool=pool_name).AndReturn(fip)
self.mox.ReplayAll()
ret = api.network.tenant_floating_ip_allocate(self.request, pool_name)
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'instance_id']:
self.assertEqual(getattr(fip, attr), getattr(ret, attr))
self.assertIsNone(ret.port_id)
self.assertIsNone(ret.instance_type)
def test_floating_ip_release(self):
fip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(fip.id)
self.mox.ReplayAll()
api.network.tenant_floating_ip_release(self.request, fip.id)
def test_floating_ip_associate(self):
server = api.nova.Server(self.servers.first(), self.request)
floating_ip = self.floating_ips.first()
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
novaclient.floating_ips.get(floating_ip.id).AndReturn(floating_ip)
novaclient.servers.add_floating_ip(server.id, floating_ip.ip) \
.AndReturn(server)
self.mox.ReplayAll()
api.network.floating_ip_associate(self.request,
floating_ip.id,
server.id)
def test_floating_ip_disassociate(self):
server = api.nova.Server(self.servers.first(), self.request)
floating_ip = self.api_floating_ips.first()
novaclient = self.stub_novaclient()
novaclie
|
google-research/lag
|
libml/layers.py
|
Python
|
apache-2.0
| 18,151
| 0.001598
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom neural network layers.
Low-level primitives such as custom convolution with custom initialization.
"""
import math
import numpy as np
import tensorflow as tf
NCHW, NHWC = 'NCHW', 'NHWC'
DATA_FORMAT_ORDER = {
'channels_first': NCHW,
'channels_last': NHWC
}
def smart_shape(x):
s, t = x.shape, tf.shape(x)
return [t[i] if s[i].value is None else s[i] for i in range(len(s))]
def to_nchw(x):
return tf.transpose(x, [0, 3, 1, 2])
def to_nhwc(x):
return tf.transpose(x, [0, 2, 3, 1])
def torus_pad(x, w, order=NCHW):
if w < 1:
return x
if order == NCHW:
y = tf.concat([x[:, :, -w:], x, x[:, :, :w]], axis=2)
y = tf.concat([y[:, :, :, -w:], y, y[:, :, :, :w]], axis=3)
else:
y = tf.concat([x[:, -w:], x, x[:, :w]], axis=1)
y = tf.concat([y[:, :, -w:], y, y[:, :, :w]], axis=2)
return y
def downscale2d(x, n=2, order=NCHW):
"""Box downscaling.
Args:
x: 4D tensor.
n: integer scale.
order: NCHW or NHWC.
Returns:
4D tensor down scaled by a factor n.
"""
if n <= 1:
return x
if order == NCHW:
return tf.nn.avg_pool(x, [1, 1, n, n], [1, 1, n, n], 'VALID', 'NCHW')
else:
return tf.nn.avg_pool(x, [1, n, n, 1], [1, n, n, 1], 'VALID', 'NHWC')
def upscale2d(x, n=2, order=NCHW):
"""Box upscaling (also called nearest neighbors).
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor up scaled by a factor n.
"""
if n == 1:
return x
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1], ts[2], 1, ts[3], 1])
x = tf.tile(x, [1, 1, 1, n, 1, n])
x = tf.reshape(x, [-1, s[1], ts[2] * n, ts[3] * n])
else:
x = tf.reshape(x, [-1, ts[1], 1, ts[2], 1, s[3]])
x = tf.tile(x, [1, 1, n, 1, n, 1])
x = tf.reshape(x, [-1, ts[1] * n, ts[2] * n, s[3]])
return x
def remove_details2d(x, n=2):
"""Remove box details by upscaling a downscaled image.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor image with removed details of size nxn.
"""
if n == 1:
return x
return upscale2d(downscale2d(x, n), n)
def bicubic_downscale2d(x, n=2, order=NCHW):
"""Downscale x by a factor of n, using dense bicubic weights.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
4D tensor down scaled by a factor n.
"""
def kernel_weight(x):
"""https://clouard.users.greyc.fr/Pantheon/experiments/rescaling/index-en.html#bicubic"""
x = abs(x)
if x <= 1:
return 1.5 * x ** 3 - 2.5 * x ** 2 + 1
elif 1 < x < 2:
return - 0.5 * x ** 3 + 2.5 * x ** 2 - 4 * x + 2
else:
return 0
def kernel():
k1d = np.array([kernel_weight((x + 0.5) / n) for x in range(-2 * n, 2 * n)])
k1d /= k1d.sum()
k2d = np.outer(k1d, k1d.T).astype('f')
return tf.constant(k2d.reshape((4 * n, 4 * n, 1, 1)))
if order == NHWC:
x = to_nchw(x)
y = tf.pad(x, [[0, 0], [0, 0], [2 * n - 1, 2 * n], [2 * n - 1, 2 * n]], mode='REFLECT')
s, ts = y.shape, tf.shape(y)
y = tf.reshape(y, [ts[0] * s[1], 1, ts[2], ts[3]])
y = tf.nn.conv2d(y, filter=kernel(), strides=[1, 1, n, n], padding='VALID', data_format='NCHW')
y = tf.reshape(y, [ts[0], s[1], tf.shape(y)[2], tf.shape(y)[3]])
return y if order == NCHW else to_nhwc(y)
def space_to_channels(x, n=2, order=NCHW):
"""Reshape image tensor by moving space to channels.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
Reshaped 4D tensor image of shape (N, C * n**2, H // n, W // n).
"""
s, ts = x.shape, tf.shape(x)
if order == NCHW:
x = tf.reshape(x, [-1, s[1], ts[2] // n, n, ts[3] // n, n])
x = tf.transpose(x, [0, 1, 3, 5, 2, 4])
x = tf.reshape(x, [-1, s[1] * (n ** 2), ts[2] // n, ts[3] // n])
else:
x = tf.reshape(x, [-1, ts[1] // n, n, ts[2] // n, n, s[3]])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [-1, ts[1] // n, ts[2] // n, s[3] * (n ** 2)])
return x
def channels_to_space(x, n=2, order=NCHW):
"""Reshape image tensor by moving channels to space.
Args:
x: 4D tensor in NCHW format.
n: integer scale (must be a power of 2).
Returns:
Reshaped 4D tensor image of shape (N, C // n**2, H
|
* n, W * n).
"""
s, ts = x.shape, tf.shape(x)
if order == NCHW:
|
x = tf.reshape(x, [-1, s[1] // (n ** 2), n, n, ts[2], ts[3]])
x = tf.transpose(x, [0, 1, 4, 2, 5, 3])
x = tf.reshape(x, [-1, s[1] // (n ** 2), ts[2] * n, ts[3] * n])
elif order == NHWC:
x = tf.reshape(x, [-1, ts[1], ts[2], n, n, s[3] // (n ** 2)])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [-1, ts[1] * n, ts[2] * n, s[3] // (n ** 2)])
else:
assert 0, 'Only supporting NCHW and NHWC.'
return x
class HeNormalInitializer(tf.initializers.random_normal):
def __init__(self, slope, dtype=tf.float32):
self.slope = slope
self.dtype = dtype
def get_config(self):
return dict(slope=self.slope, dtype=self.dtype.name)
def __call__(self, shape, dtype=None, partition_info=None):
del partition_info
if dtype is None:
dtype = self.dtype
std = np.sqrt(2) * tf.rsqrt((1. + self.slope ** 2) *
tf.cast(tf.reduce_prod(shape[:-1]),
tf.float32))
return tf.random_normal(shape, stddev=std, dtype=dtype)
def blend_resolution(lores, hires, alpha):
"""Blend two images.
Args:
lores: 4D tensor in NCHW, low resolution image.
hires: 4D tensor in NCHW, high resolution image.
alpha: scalar tensor in [0, 1], 0 produces the low resolution, 1 the high one.
Returns:
4D tensor in NCHW of blended images.
"""
return lores + alpha * (hires - lores)
class SingleUpdate:
COLLECTION = 'SINGLE_UPDATE'
@classmethod
def get_update(cls, variable):
for v, u in tf.get_collection(cls.COLLECTION):
if v == variable:
return u
return None
@classmethod
def register_update(cls, variable, update):
assert cls.get_update(variable) is None
tf.add_to_collection(cls.COLLECTION, (variable, update))
return update
class Conv2DSpectralNorm(tf.layers.Conv2D):
def build(self, input_shape):
was_built = self.built
tf.layers.Conv2D.build(self, input_shape)
self.built = was_built
shape = self.kernel.shape.as_list()
self.u = self.add_variable(name='u', shape=[1, shape[-1]], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(),
trainable=False)
self.built = True
def call(self, inputs):
shape = self.kernel.shape.as_list()
kernel = self.kernel
if self.data_format == 'channels_first':
kernel = tf.transpose(kernel, [0, 2, 3, 1])
kernel = tf.reshape(kernel, [-1, shape[-1]])
u = self.u
v_ = tf.nn.l2_normalize(tf.matmul(u, kernel, transpose_b=True))
u_ = tf.nn.l2_normalize(tf.matmul(v_, kernel))
sigma = tf.squeeze(tf.matmul(tf.matmul(v_, kernel), u_, transpose_b=True))
if Si
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.