repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
MatthieuMichon/f24 | src/v1/data_suppliers/cache.py | Python | gpl-2.0 | 2,208 | 0 | #!/usr/bin/python3
"""
f24.data_suppliers.cache
~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the class implementing a simple cache.
"""
import hashlib
import os
import time
import json
from pathlib import Path
class Cache:
CACHE_DIR = 'cache_db'
CACHE_FILENAME_LEN = 40
def __init__(self):
p = Path('.')
self.path = p / self.CACHE_DIR
self.file_list = self.get_file_list(self.path)
def lookup(self, uri, ttl=None):
"""Perform a lookup for the given URI
:param string uri: Target URI
:param int ttl: Time-to-live in seconds
"""
sha1 = self.get_hash_str(uri)
# TDB: add TTL check
file_present = any(x == sha1 for x in self.file_list)
print("Result: {}; URI: {}; sha1: {}".format(file_present, uri, sha1))
if file_present is True:
with open(str(self.path / sha1)) as cache_file:
ts_data = json.load(cache_file)
return ts_data['data']
else:
return None
def store(self, uri, data):
"""Perform a lookup for the given URI
:param string uri: Target URI
:param dict data: Time-to-live in seconds
"""
ts = int(time.time())
ts_data = {}
ts_data["ts"] = ts
ts_data["data"] = data
sha1 = self.get_hash_str(uri)
with open(str(self.path / sha1), mode='w') as cache_file:
json.dump(ts_data, cache_file)
def get_file_list(self, path):
if not path.exists():
print('Creating missing directory cache: {}'.format(path))
os.mkdir(str(path))
return [str(x.name) for x in path.iterdir() if not x.is_dir()]
def get_hash_str(self, data):
s = hashlib.sha1()
s.update(str.encode(data))
return s.hexdigest()
def main():
c = Cache()
c.lookup(uri='example.com/path/filename?arg=lil')
c.lookup(uri='example.com/path/filename?arg=lol')
c.lookup(uri='example.com/path/filename? | arg=luzl", ttl=3')
my_dict = {}
my_dic | t['key1'] = 'arg1'
my_dict['key2'] = 'arg2'
c.store('example.com/path/filename?arg=lol', my_dict)
if __name__ == "__main__":
main()
|
memsharded/conan | conans/test/unittests/model/options_test.py | Python | mit | 17,355 | 0.00219 | import sys
import unittest
import six
from conans.errors import ConanException
from conans.model.options import Options, OptionsValues, PackageOptionValues, PackageOptions, \
option_undefined_msg
from conans.model.ref import ConanFileReference
class OptionsTest(unittest.TestCase):
def setUp(self):
package_options = PackageOptions.loads("""{static: [True, False],
optimized: [2, 3, 4],
path: ANY}""")
values = PackageOptionValues()
values.add_option("static", True)
values.add_option("optimized", 3)
values.add_option("path", "NOTDEF")
package_options.values = values
self.sut = Options(package_options)
def test_int(self):
self.assertEqual(3, int(self.sut.optimized))
def test_in(self):
package_options = PackageOptions.loads("{static: [True, False]}")
sut = Options(package_options)
self.assertTrue("static" in sut)
self.assertFalse("shared" in sut)
self.assertTrue("shared" not in sut)
self.assertFalse("static" not in sut)
def undefined_value_test(self):
""" Not assigning a value to options will raise an error at validate() step
"""
package_options = PackageOptions.loads("""{
path: ANY}""")
with six.assertRaisesRegex(self, ConanException, option_undefined_msg("path")):
package_options.validate()
package_options.path = "Something"
package_options.validate()
def undefined_value_none_test(self):
""" The value None is allowed as default, not necessary to default to it
"""
package_options = PackageOptions.loads('{path: [None, "Other"]}')
package_options.validate()
package_options = PackageOptions.loads('{path: ["None", "Other"]}')
package_options.validate()
def items_test(self):
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "NOTDEF"),
("static", "True")])
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "NOTDEF"),
("static", "True")])
def change_test(self):
self.sut.path = "C:/MyPath"
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "C:/MyPath"),
("static", "True")])
self.assertEqual(self.sut.items(), [("optimized", "3"), ("path", "C:/MyPath"),
("static", "True")])
with six.assertRaisesRegex(self, ConanException,
"'5' is not a valid 'options.optimized' value"):
self.sut.optimized = 5
def boolean_test(self):
self.sut.static = False
self.assertFalse(self.sut.static)
self.assertTrue(not self.sut.static)
self.assertTrue(self.sut.static == False)
self.assertFalse(self.sut.static == True)
self.assertFalse(self.sut.static != False)
self.assertTrue(self.sut.static != True)
self.assertTrue(self.sut.static == "False")
self.assertTrue(self.sut.static != "True")
def basic_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("thread", True)
boost_values.add_option("thread.multi", "off")
poco_values = PackageOptionValues()
poco_values.add_option("deps_bundled", True)
hello1_values = PackageOptionValues()
hello1_values.add_option("static", False)
hello1_values.add_option("optimized", 4)
options = {"Boost": boost_values,
"Poco": poco_values,
"Hello1": hello1_values}
down_ref = ConanFileReference.loads("Hello0/0.1@diego/testing")
own_ref = ConanFileReference.loads("Hello1/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "4"),
("path", "NOTDEF"),
("static", "False"),
("Boost:static", "False"),
("Boost:thread", "True"),
("Boost:thread.multi", "off"),
("Poco:deps_bundled", "True")])
boost_values = PackageOptionValues()
boost_values.add_option("static", 2)
boost_values.add_option("thread", "Any")
boost_values.add_option("thread.multi", "on")
poco_values = PackageOptionValues()
poco_values.add_option("deps_bundled", "What")
hello1_values = PackageOptionValues()
hello1_values.add_option("static", True)
hello1_values.add_option("optimized", "2")
options2 = {"Boost": boost_values,
"Poco": poco_values,
"Hello1": hello1_values}
down_ref = ConanFileReference.loads("Hello2/0.1@diego/testing")
with six.assertRaisesRegex(self, ConanException, "Hello2/0.1@diego/testing tried to change "
"Hello1/0.1@diego/testing option optimized to 2"):
self.sut.propagate_upstream(options2, down_ref, own_ref)
self.assertEqual(self.sut.values.dumps(),
"""optimized=4
path=NOTDEF
static=False
Boost:static=False
Boost:thread=True
Boost:thread.multi=off
Poco:deps_bundled=True""")
def pattern_positive_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", F | alse)
bo | ost_values.add_option("path", "FuzzBuzz")
options = {"Boost.*": boost_values}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "3"),
("path", "FuzzBuzz"),
("static", "False"),
("Boost.*:path", "FuzzBuzz"),
("Boost.*:static", "False"),
])
def multi_pattern_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("static", False)
boost_values.add_option("path", "FuzzBuzz")
boost_values2 = PackageOptionValues()
boost_values2.add_option("optimized", 2)
options = {"Boost.*": boost_values,
"*": boost_values2}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [("optimized", "2"),
("path", "FuzzBuzz"),
("static", "False"),
('*:optimized', '2'),
("Boost.*:path", "FuzzBuzz"),
("Boost.*:static", "False"),
])
def multi_pattern_error_test(self):
boost_values = PackageOptionValues()
boost_values.add_option("optimized", 4)
boost_values2 = PackageOptionValues()
boost_values2.add_option("optimized", 2)
options = {"Boost.*": boost_values,
"*": boost_values2}
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
self.sut.propagate_upstream(options, down_ref, own_ref)
self.assertEqual(self.sut.values.as_list(), [ |
melt6457/MMProteinAnalysis | Source/presentation.py | Python | mit | 2,760 | 0.002174 | import ProteinAnalyzer as pa
import fileManager as files
import matplotlib.pyplot as plt
import simulation
print('End of Summer Presentation Files')
simNames = pa.selectMultipleSimulations()
simNames2 = pa.selectMultipleSimulations()
sims = []
sims2 = []
simNames3 = pa.selectMultipleSimulations()
sims3 = []
for count in range(0, len(simNames)):
fileSet = files.getSimulationFiles(simNames[count])
sim1 = pa.createSimAnalysis(fileSet)
sim1.loadLog()
sim1.loadRMSD()
sims.append(sim1)
for count in range(1, len(simNames)):
sims[0].combine(sims[count])
# sims[0].rmsd.calculateFrameTimes(0, sims[0].log.getEndTime() - sims[0].log.getStartTime())
plt.plot(sims[0].log.times, sims[0].log.pot_energy)
for count in range(0, len(simNames2)):
fileSet2 = files.getSimulationFiles(simNames2[count])
sim2 = pa.createSimAnalysis(fileSet2)
sim2.loadLog()
sim2.loadR | MSD()
sims2.append(sim2)
for num in range(1, len(simNames2)):
sims2[0].combine(sims2[num])
# sims2[0].rmsd.calculateFrameTimes(0, sims2[0].log.getEndTime() - sims2[0].log.getStartTime())
# plt.plot(sims2[0].rmsd.times, sims2[0].rmsd.RMSDs)
plt.plot(sims2[0].log.times, sims2[0].log.pot_energy)
'''for count in range(0, len(simNames)):
fileSet = files.getSimulationFiles(simNames3[count])
sim3 = pa.createSimAnalysis(fileSet)
sim3.loadLog()
| sim3.loadRMSD()
sims3.append(sim3)
for count in range(1, len(simNames)):
sims3[0].combine(sims[count])
plt.plot(sims3[0].log.times, sims3[0].log.pot_energy)'''
avePotEnergy = sims[0].calcAvePotentialEnergy()
avePotEnergy2 = sims2[0].calcAvePotentialEnergy()
#avePotEnergy3 = sims3[0].calcAvePotentialEnergy()
print("The Average Potential Energy of sim 1 is: ", avePotEnergy)
print("The Average Potential Energy of sim 2 is: ", avePotEnergy2)
#print("The Average Potential Energy of sim 3 is: ", avePotEnergy3)
plt.show()
'''print('Data')
aveDAT = sims[0].calcAveDAT()
stdDevDAT = sims[0].calcStdDevDAT()
print("The Average of the 300 Data is: ", aveDAT)
print("The Standard Deviation of the 300 Data:", stdDevDAT)
aveDATs = []
stdDevDATs = []
aveDATs.append(aveDAT)
stdDevDATs.append(stdDevDAT)
objects = ('300 K', '500 K', '300 K Complex')
aveDAT2 = sims2[0].calcAveDAT()
stdDevDAT2 = sims2[0].calcStdDevDAT()
print("The Average of the 300 Data is: ", aveDAT2)
print("The Standard Deviation of the 300 Data:", stdDevDAT2)
aveDATs.append(aveDAT2)
stdDevDATs.append(stdDevDAT2)
aveDAT3 = sims3[0].calcAveDAT()
stdDevDAT3 = sims3[0].calcStdDevDAT()
print("The Average of the 500 Data is: ", aveDAT3)
print("The Standard Deviation of the 500 Data:", stdDevDAT3)
aveDATs.append(aveDAT3)
stdDevDATs.append(stdDevDAT3)
sims[0].graphErrorBars(objects, aveDATs, stdDevDATs)''' |
qiita-spots/qp-shotgun | qp_shogun/shogun/shogun.py | Python | bsd-3-clause | 10,590 | 0 | # - | ------------------------------------------------------ | ----------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join
from tempfile import TemporaryDirectory
from .utils import readfq, import_shogun_biom
from qp_shogun.utils import (make_read_pairs_per_sample, _run_commands)
import gzip
from qiita_client import ArtifactInfo
from biom import util
SHOGUN_PARAMS = {
'Database': 'database', 'Aligner tool': 'aligner',
'Number of threads': 'threads'}
def generate_fna_file(temp_path, samples):
# Combines reverse and forward seqs per sample
# Returns filepaths of new combined files
output_fp = join(temp_path, 'combined.fna')
output = open(output_fp, "a")
count = 0
for run_prefix, sample, f_fp, r_fp in samples:
with gzip.open(f_fp, 'rt') as fp:
# Loop through forward file
for header, seq, qual in readfq(fp):
output.write(">%s_%d\n" % (sample, count))
output.write("%s\n" % seq)
count += 1
if r_fp is not None:
with gzip.open(r_fp, 'rt') as fp:
# Loop through reverse file
for header, seq, qual in readfq(fp):
output.write(">%s_%d\n" % (sample, count))
output.write("%s\n" % seq)
count += 1
output.close()
return output_fp
def _format_params(parameters, func_params):
params = {}
# Loop through all of the commands alphabetically
for param in func_params:
# Find the value using long parameter names
parameter = func_params[param]
value = parameters[param]
params[parameter] = value
return params
def generate_shogun_align_commands(input_fp, temp_dir, parameters):
cmds = []
cmds.append(
'shogun align --aligner {aligner} --threads {threads} '
'--database {database} --input {input} --output {output}'.format(
aligner=parameters['aligner'],
threads=parameters['threads'],
database=parameters['database'],
input=input_fp,
output=temp_dir))
return cmds
def generate_shogun_assign_taxonomy_commands(temp_dir, parameters):
cmds = []
aln2ext = {'utree': 'tsv', 'burst': 'b6', 'bowtie2': 'sam'}
ext = aln2ext[parameters['aligner']]
output_fp = join(temp_dir, 'profile.tsv')
cmds.append(
'shogun assign-taxonomy '
'--aligner {aligner} '
'--database {database} '
'--input {input} --output {output}'.format(
aligner=parameters['aligner'],
database=parameters['database'],
input=join(temp_dir, 'alignment.%s.%s' % (parameters['aligner'],
ext)),
output=output_fp))
return cmds, output_fp
def generate_shogun_functional_commands(profile_dir, temp_dir,
parameters, sel_level):
cmds = []
output = join(temp_dir, 'functional')
cmds.append(
'shogun functional '
'--database {database} '
'--input {input} '
'--output {output} '
'--level {level}'.format(
database=parameters['database'],
input=profile_dir,
output=output,
level=sel_level))
return cmds, output
def generate_shogun_redist_commands(profile_dir, temp_dir,
parameters, sel_level):
cmds = []
output = join(temp_dir, 'profile.redist.%s.tsv' % sel_level)
cmds.append(
'shogun redistribute '
'--database {database} '
'--level {level} '
'--input {input} '
'--output {output}'.format(
database=parameters['database'],
input=profile_dir,
output=output,
level=sel_level))
return cmds, output
def run_shogun_to_biom(in_fp, biom_in, out_dir, level, version='alignment'):
if version in ('redist', 'alignment'):
output_fp = join(out_dir, 'otu_table.%s.%s.biom'
% (version, level))
else:
output_fp = join(out_dir, 'otu_table.%s.%s.%s.biom'
% (version, level, biom_in[0]))
tb = import_shogun_biom(in_fp, biom_in[1],
biom_in[2], biom_in[3])
with util.biom_open(output_fp, 'w') as f:
tb.to_hdf5(f, "shogun")
return output_fp
def shogun(qclient, job_id, parameters, out_dir):
"""Run Shogun with the given parameters
Parameters
----------
qclient : tgp.qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
parameters : dict
The parameter values to run split libraries
out_dir : str
The path to the job's output directory
Returns
-------
bool, list, str
The results of the job
"""
# Step 1 get the rest of the information need to run Atropos
qclient.update_job_step(job_id, "Step 1 of 6: Collecting information")
artifact_id = parameters['input']
del parameters['input']
# Get the artifact filepath information
artifact_info = qclient.get("/qiita_db/artifacts/%s/" % artifact_id)
fps = artifact_info['files']
# Get the artifact metadata
prep_info = qclient.get('/qiita_db/prep_template/%s/'
% artifact_info['prep_information'][0])
qiime_map = prep_info['qiime-map']
# Step 2 converting to fna
qclient.update_job_step(
job_id, "Step 2 of 6: Converting to FNA for Shogun")
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as temp_dir:
rs = fps['raw_reverse_seqs'] if 'raw_reverse_seqs' in fps else []
samples = make_read_pairs_per_sample(
fps['raw_forward_seqs'], rs, qiime_map)
# Combining files
comb_fp = generate_fna_file(temp_dir, samples)
# Formatting parameters
parameters = _format_params(parameters, SHOGUN_PARAMS)
# Step 3 align
align_cmd = generate_shogun_align_commands(
comb_fp, temp_dir, parameters)
sys_msg = "Step 3 of 6: Aligning FNA with Shogun (%d/{0})".format(
len(align_cmd))
success, msg = _run_commands(
qclient, job_id, align_cmd, sys_msg, 'Shogun Align')
if not success:
return False, None, msg
# Step 4 taxonomic profile
sys_msg = "Step 4 of 6: Taxonomic profile with Shogun (%d/{0})"
assign_cmd, profile_fp = generate_shogun_assign_taxonomy_commands(
temp_dir, parameters)
success, msg = _run_commands(
qclient, job_id, assign_cmd, sys_msg, 'Shogun taxonomy assignment')
if not success:
return False, None, msg
sys_msg = "Step 5 of 6: Converting output to BIOM"
qclient.update_job_step(job_id, msg)
output = run_shogun_to_biom(profile_fp, [None, None, None, True],
out_dir, 'profile')
ainfo = [ArtifactInfo('Shogun Alignment Profile', 'BIOM',
[(output, 'biom')])]
# Step 5 redistribute profile
sys_msg = "Step 6 of 6: Redistributed profile with Shogun (%d/{0})"
levels = ['phylum', 'genus', 'species']
redist_fps = []
for level in levels:
redist_cmd, output = generate_shogun_redist_commands(
profile_fp, temp_dir, parameters, level)
redist_fps.append(output)
success, msg = _run_commands(
qclient, job_id, redist_cmd, sys_msg, 'Shogun redistribute')
if not success:
return False, None, msg
# Converting redistributed files to biom
for redist_fp, level in zip(redist_fps, levels):
biom_in = ["redist", None, '', True]
output = run_shogun_to_biom(
redist_fp, biom_in, out_dir, level, 'redist |
kaiweifan/vse-lbaas-plugin-poc | quantum/plugins/bigswitch/plugin.py | Python | apache-2.0 | 52,432 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Quantum REST Proxy Plug-in for Big Switch and FloodLight Controllers
QuantumRestProxy provides a generic quantum plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all quantum
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between quantum and the
network controller
- independent upgrade/development cycles between quantum and the controller
as it limits the proxy code upgrade requirement to quantum release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with quantum for independent recovery/reset
External REST API used by proxy is the same API as defined for quantum (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import base64
import copy
import httplib
import json
import socket
from oslo.config import cfg
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.common import constants as const
from quantum.common import exceptions
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.common import utils
from quantum import context as qcontext
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import l3_db
from quantum.extensions import l3
from quantum.extensions import portbindings
from quantum.openstack.common import lockutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.plugins.bigswitch.version import version_string_with_vcs
from | quantum import policy
LOG = logging.getLogger(__name__ | )
restproxy_opts = [
cfg.StrOpt('servers', default='localhost:8800',
help=_("A comma separated list of servers and port numbers "
"to proxy request to.")),
cfg.StrOpt('server_auth', default='username:password', secret=True,
help=_("Server authentication")),
cfg.BoolOpt('server_ssl', default=False,
help=_("Use SSL to connect")),
cfg.BoolOpt('sync_data', default=False,
help=_("Sync data on connect")),
cfg.IntOpt('server_timeout', default=10,
help=_("Maximum number of seconds to wait for proxy request "
"to connect and complete.")),
cfg.StrOpt('quantum_id', default='Quantum-' + utils.get_hostname(),
help=_("User defined identifier for this Quantum deployment")),
cfg.BoolOpt('add_meta_server_route', default=True,
help=_("Flag to decide if a route to the metadata server "
"should be injected into the VM")),
]
cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
# The following are used to invoke the API on the external controller
NET_RESOURCE_PATH = "/tenants/%s/networks"
PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
ROUTER_RESOURCE_PATH = "/tenants/%s/routers"
ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces"
NETWORKS_PATH = "/tenants/%s/networks/%s"
PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
ROUTERS_PATH = "/tenants/%s/routers/%s"
ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s"
SUCCESS_CODES = range(200, 207)
FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
504, 505]
SYNTAX_ERROR_MESSAGE = 'Syntax error in server config file, aborting plugin'
BASE_URI = '/networkService/v1.1'
ORCHESTRATION_SERVICE_ID = 'Quantum v2.0'
METADATA_SERVER_IP = '169.254.169.254'
class RemoteRestError(exceptions.QuantumException):
def __init__(self, message):
if message is None:
message = "None"
self.message = _("Error in REST call to remote network "
"controller") + ": " + message
super(RemoteRestError, self).__init__()
class ServerProxy(object):
"""REST server proxy to a network controller."""
def __init__(self, server, port, ssl, auth, quantum_id, timeout,
base_uri, name):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.success_codes = SUCCESS_CODES
self.auth = None
self.quantum_id = quantum_id
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
@lockutils.synchronized('rest_call', 'bsn-', external=True)
def rest_call(self, action, resource, data, headers):
uri = self.base_uri + resource
body = json.dumps(data)
if not headers:
headers = {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['QuantumProxy-Agent'] = self.name
headers['Instance-ID'] = self.quantum_id
headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
if self.auth:
headers['Authorization'] = self.auth
LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, "
"ssl=%(ssl)r, action=%(action)s"),
{'server': self.server, 'port': self.port, 'ssl': self.ssl,
'action': action})
LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, "
"headers=%(headers)r"), locals())
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('ServerProxy: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('ServerProxy: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
conn.request(action, uri, body, headers)
response = conn.getresponse()
respstr = response.read()
respdata = respstr
if response.status in self.success_codes:
try:
respdata = json.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except (socket.timeout, socket.error) as e:
LOG.error(_('ServerProxy: %(action)s failure, %(e)r'), locals())
ret = 0, None, None, None
conn.close()
LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, "
|
kevalds51/sympy | sympy/geometry/polygon.py | Python | bsd-3-clause | 62,413 | 0.000417 | from __future__ import division, print_function
from sympy.core import Expr, S, Symbol, oo, pi, sympify
from sympy.core.compatibility import as_int, range
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cos, sin, tan
from sympy.geometry.exceptions import GeometryError
from sympy.logic import And
from sympy.matrices import Matrix
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import has_dups, has_variety, uniq
from .entity import GeometryEntity, GeometrySet
from .point import Point
from .ellipse import Circle
from .line import Line, Segment
from .util import _symbol
import warnings
class Polygon(GeometrySet):
"""A two-dimensional polygon.
A simple polygon in space. Can be constructed from a | sequence of points
or from a center, radius, number of sides and rotation angle.
Parameters
==========
vertices : sequence of Points
Attributes
==========
area
angles
perimeter
vertices
centroid
sides
Raises
======
GeometryError
If all parameters are not Points.
If the Polygon has intersecting sides. |
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment, Triangle
Notes
=====
Polygons are treated as closed paths rather than 2D areas so
some calculations can be be negative or positive (e.g., area)
based on the orientation of the points.
Any consecutive identical points are reduced to a single point
and any points collinear and between two points will be removed
unless they are needed to define an explicit intersection (see examples).
A Triangle, Segment or Point will be returned when there are 3 or
fewer points provided.
Examples
========
>>> from sympy import Point, Polygon, pi
>>> p1, p2, p3, p4, p5 = [(0, 0), (1, 0), (5, 1), (0, 1), (3, 0)]
>>> Polygon(p1, p2, p3, p4)
Polygon(Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1))
>>> Polygon(p1, p2)
Segment(Point2D(0, 0), Point2D(1, 0))
>>> Polygon(p1, p2, p5)
Segment(Point2D(0, 0), Point2D(3, 0))
While the sides of a polygon are not allowed to cross implicitly, they
can do so explicitly. For example, a polygon shaped like a Z with the top
left connecting to the bottom right of the Z must have the point in the
middle of the Z explicitly given:
>>> mid = Point(1, 1)
>>> Polygon((0, 2), (2, 2), mid, (0, 0), (2, 0), mid).area
0
>>> Polygon((0, 2), (2, 2), mid, (2, 0), (0, 0), mid).area
-2
When the the keyword `n` is used to define the number of sides of the
Polygon then a RegularPolygon is created and the other arguments are
interpreted as center, radius and rotation. The unrotated RegularPolygon
will always have a vertex at Point(r, 0) where `r` is the radius of the
circle that circumscribes the RegularPolygon. Its method `spin` can be
used to increment that angle.
>>> p = Polygon((0,0), 1, n=3)
>>> p
RegularPolygon(Point2D(0, 0), 1, 3, 0)
>>> p.vertices[0]
Point2D(1, 0)
>>> p.args[0]
Point2D(0, 0)
>>> p.spin(pi/2)
>>> p.vertices[0]
Point2D(0, 1)
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('n', 0):
n = kwargs.pop('n')
args = list(args)
# return a virtual polygon with n sides
if len(args) == 2: # center, radius
args.append(n)
elif len(args) == 3: # center, radius, rotation
args.insert(2, n)
return RegularPolygon(*args, **kwargs)
vertices = [Point(a) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points unless they are shared points
got = set()
shared = set()
for p in nodup:
if p in got:
shared.add(p)
else:
got.add(p)
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = nodup[i], nodup[i + 1], nodup[i + 2]
if b not in shared and Point.is_collinear(a, b, c):
nodup.pop(i + 1)
if a == c:
nodup.pop(i)
else:
i += 1
vertices = list(nodup)
if len(vertices) > 3:
rv = GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 3:
return Triangle(*vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
# reject polygons that have intersecting sides unless the
# intersection is a shared point or a generalized intersection.
# A self-intersecting polygon is easier to detect than a
# random set of segments since only those sides that are not
# part of the convex hull can possibly intersect with other
# sides of the polygon...but for now we use the n**2 algorithm
# and check if any side intersects with any preceding side,
# excluding the ones it is connected to
try:
convex = rv.is_convex()
except ValueError:
convex = True
if not convex:
sides = rv.sides
for i, si in enumerate(sides):
pts = si.args
# exclude the sides connected to si
for j in range(1 if i == len(sides) - 1 else 0, i - 1):
sj = sides[j]
if sj.p1 not in pts and sj.p2 not in pts:
hit = si.intersection(sj)
if not hit:
continue
hit = hit[0]
# don't complain unless the intersection is definite;
# if there are symbols present then the intersection
# might not occur; this may not be necessary since if
# the convex test passed, this will likely pass, too.
# But we are about to raise an error anyway so it
# won't matter too much.
if all(i.is_number for i in hit.args):
raise GeometryError(
"Polygon has intersecting sides.")
return rv
@property
def area(self):
"""
The area of the polygon.
Notes
=====
The area calculation can be positive or negative based on the
orientation of the points.
See Also
========
sympy.geometry.ellipse.Ellipse.area
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.area
3
"""
area = 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
area += x1*y2 - x2*y1
return simplify(area) / 2
@staticmethod
def _isright(a, b, c):
ba = b - a
ca = c - a
t_area = simplify(ba.x*ca.y - ca.x*ba.y)
res = t_area.is_nonpositive
if res is None:
raise ValueError("Can't determine orientation")
return res
@property
def angles(self):
"""The internal angle at each vertex.
Returns
=======
angles : dict
A dictionary where each key is a vertex and each value is the
internal angle at that vertex. The vertices are represented as
Points.
See Also
========
sympy.geometry.point.Point, s |
klahnakoski/jx-sqlite | vendor/jx_base/expressions/base_inequality_op.py | Python | mpl-2.0 | 1,864 | 0.000536 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import builtin_ops
from jx_base.expressions.expression import Expression
from jx_base.expressions.false_op import FALSE
from jx_base.expressions.literal import is_literal, Literal
from jx_base.expressions.variable import Variable
from jx_base.language import is_op
fr | om mo_json.types import T_BOOLEAN
class BaseInequalityOp(Expression):
has_simple_form = True
data_type = T_BOOLEAN
op = None
def __init__(self, terms):
Expression.__init__(self, terms)
self.lhs, self.rhs = terms
@property
def name(self):
return self.op
def __data__(self):
if is_op(self.lhs, Variable) and is_literal(self.rhs):
return {self.op: {self.lhs.var, self.rh | s.value}}
else:
return {self.op: [self.lhs.__data__(), self.rhs.__data__()]}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.op == other.op and self.lhs == other.lhs and self.rhs == other.rhs
def vars(self):
return self.lhs.vars() | self.rhs.vars()
def map(self, map_):
return self.__class__([self.lhs.map(map_), self.rhs.map(map_)])
def missing(self, lang):
return FALSE
def partial_eval(self, lang):
lhs = self.lhs.partial_eval(lang)
rhs = self.rhs.partial_eval(lang)
if is_literal(lhs) and is_literal(rhs):
return Literal(builtin_ops[self.op](lhs, rhs))
return self.__class__([lhs, rhs])
|
OmairAJ/Plagia | local/indexMap.py | Python | gpl-2.0 | 4,474 | 0.023469 | #indexMap.py
# python indexMap.py -s -d test.txt
import os
import sys
import math
import string
import argparse
import fileinput
## Casefold text
def casefold(text):
text = text.lower()
text = text.translate(string.maketrans("",""), string.punctuation)
text = text.split()
text = filter(None, text)
return text
## Number of combinations
# combinations(windowSize, patternSize)
def combinations(n, s):
return math.factorial(n) / (math.factorial(s) * math.factorial(n-s))
## Pattern list
def keyPattern(keyList, patternSize, startPosition, endPosition, flag):
if patternSize == 0:
return ['']
| keys = []
# this code is pretty messed up
# need to clean it and
# fix the words start and end
# positions calculation/processing
for i in range(len(keyList)):
j = 0
k = 0
subKeys = keyPattern(keyList[i + 1:], patternSize - 1, startPosition, endPosition, False)
for subKey in subKeys:
key = ""
key = keyList[i]
if (len(subKey) > 0):
key += "_"
key += subKey
if k == 3:
j = 1
k += 0
endP | osition = startPosition + patternSize + j - 1
if flag:
j += 1
k += 1
keyI = [key, startPosition, endPosition, documentFile]
else:
keyI = key
keys.append(keyI)
return keys
## Generate n-grams
def genNGrams(wordList, windowSize, overlapSize, fileName):
nGrams = []
for i in range(len(wordList) - (windowSize - (windowSize - overlapSize))):
nGram = keyPattern(wordList[i:i + windowSize], patternSize, i, i + windowSize, True)
for j in range(len(nGram)):
nGrams.append(nGram[j])
return nGrams
## Command-line arguments parser
parser = argparse.ArgumentParser(description="Index documents for contextual n-grams based plagiarism detection. Only text files with .txt extension are accepted for indexing.")
parser.add_argument("-d", action="store", dest="Document", type=str, help="Document to index")
parser.add_argument("-w", action="store", dest="Window", type=int, default=5, help="Window size for index")
parser.add_argument("-o", action="store", dest="Overlap", type=int, default=4, help="Overlap size for index")
parser.add_argument("-p", action="store", dest="Pattern", type=int, default=3, help="Pattern size for index")
parser.add_argument("-s", action="store_true", dest="Source", help="This is a source document")
parser.add_argument("-v", action="version", version="%(prog)s 1.0")
parserResults = parser.parse_args()
documentOpen = parserResults.Document
windowSize = parserResults.Window
overlapSize = parserResults.Overlap
patternSize = parserResults.Pattern
documentType = parserResults.Source
if documentType:
documentFolder = "sources/"
else:
documentFolder = "users/"
## Checks
if windowSize == 0:
print "The window size must be greater than 0."
print "\n"
sys.exit()
if overlapSize == 0:
print "The overlap size must be greater than 0."
print "\n"
sys.exit()
if patternSize == 0:
print "The pattern size must be greater than 0."
print "\n"
sys.exit()
if (documentOpen is None):
print "This application requires at least one text file with .txt extesion to function."
print "\n"
sys.exit()
else:
documentPath = os.path.dirname(documentOpen)
documentName = casefold(os.path.splitext(os.path.basename(documentOpen))[0])[0]
documentExtension = casefold(os.path.splitext(os.path.basename(documentOpen))[1])[0]
documentFile = documentName + "." + documentExtension
if documentExtension != "txt":
print "This application only accepts plain text files with .txt extension."
print "\n"
sys.exit()
documentRead = open(documentOpen, "r")
wordstring = documentRead.read()
# Apply casefolding to the text from the document
wordList = casefold(wordstring)
documentNGrams = genNGrams(wordList, windowSize, overlapSize, documentFile)
documentSavePath = "maps/" + documentFolder
if not os.path.exists(documentSavePath): os.makedirs(documentSavePath)
if not os.path.exists("documents/" + documentFolder): os.makedirs("documents/" + documentFolder)
os.system ("cp %s %s" % (documentOpen, "documents/" + documentFolder + documentFile))
documentExport = open(documentSavePath + documentName + ".m","w")
for i in range(0, len(documentNGrams)):
documentExport.write("%s\t1\t(%s, %s, %s)\n" % (documentNGrams[i][0], documentNGrams[i][1], documentNGrams[i][2], documentNGrams[i][3]))
print "%s\t1\t(%s, %s, %s)" % (documentNGrams[i][0], documentNGrams[i][1], documentNGrams[i][2], documentNGrams[i][3])
documentExport.close()
|
rgeorgi/intent | intent/scripts/igt/extract_lang.py | Python | mit | 2,741 | 0.008026 | """
Created on Dec 19, 2014
@author: rgeorgi
This script is used to point at a dump of the ODIN database and extract the specified language from it.
"""
# Built-in imports -------------------------------------------------------------
import argparse, re, logging
import sys
EXTR_LOG = logging.getLogger('LANG_EXTRACTOR')
# Internal imports -------------------------------------------------------------
from intent.utils.argutils import configfile, writefile
from intent.utils.fileutils import matching_files
def extract_lang(dir, lang, outfile, limit=None):
i = 0
EXTR_LOG.info('Extracting language "%s" from ODIN...' % lang)
# Iterate through each ".check" file in the given directory.
try:
matching_paths = matching_files(dir, '.*\.check$', recursive=True)
except FileNotFoundError as fnfe:
EXTR_LOG.critical("Path {} was not found.".format(dir))
sys.exit(10)
else:
for path in matching_paths:
EXTR_LOG.debug('Working on path... "%s"' % path)
# Open up the file...
f = open(path, 'r', encoding='latin-1')
data = f.read()
f.close()
# And get the list of instances.
instances = re.split('\n\n+', data)
# Remove blank "instances"
instances = [i for i in instances if i.strip()]
# Now, for each instance, look for the language.
for instance in instances[1:]: # <-- skip the first pgph, because it's not an instance.
inst_lang = None
# First, if there is a "gold" lang code, use that one.
gold_re = re.search('gold_lang_code:.*?\(([a-z:]+)\)', instance, flags=re.I)
chosen_re = re.search('stage3_lang_chosen:.*?\(([a-z:]+)\)', instance, flags=re.I)
if gold_re:
inst_lang = gold_re.group(1)
elif chosen_re:
inst_lang = chosen_re.group(1)
if inst_lang == lang:
outfile.write(instance+'\n\n')
i += 1
if limit and i == limit: break
if limit and i == limit: break
print("{} instances written.".format(i))
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-c', '--config', type=configfile)
| p.add_argument('-d', '--dir', help="Path to the ODIN database directory.", required=True)
p.add_argument('-l', '--lang', help="Language to search for.", required=True)
p.add_argument( | '-o', '--outfile', help="Text file which to output the resulting instances to.", required=True, type=writefile)
args = p.parse_args()
extract_lang(args.dir, args.lang, args.outfile) |
bataeves/kaggle | instacart/imba/arboretum_cv.py | Python | unlicense | 18,971 | 0.005587 | import gc
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np
import os
import arboretum
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_prev = pd.merge(order_train, orders, on='order_id')
order_prev.order_number -= 1
order_prev = pd.merge(order_prev[
['user_id', 'order_number', 'product_id', 'reordered', 'add_to_cart_order', 'order_dow',
'order_hour_of_day']], orders[['user_id', 'order_number', 'order_id']],
on=['user_id', 'order_number'])
order_prev.drop(['order_number', 'user_id'], axis=1, inplace=True)
order_prev.rename(columns={
'reordered': 'reorder | ed_prev',
'add_to_cart_order': 'add_to_cart | _order_prev',
'order_dow': 'order_dow_prev',
'order_hour_of_day': 'order_hour_of_day_prev'
}, inplace=True)
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
# product_periods.prev1 = product_periods['last'] / product_periods.prev1
# product_periods.prev2 = product_periods['last'] / product_periods.prev2
# product_periods['mean'] = product_periods['last'] / product_periods['mean']
# product_periods['median'] = product_periods['last'] / product_periods['median']
print(order_train.columns)
###########################
weights = order_train.groupby('order_id')['reordered'].sum().to_frame('weights')
weights.reset_index(inplace=True)
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'})\
.rename(columns = {'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data_dow = orders_products.groupby([' |
cmdelatorre/roses | roses_project/settings.py | Python | gpl-2.0 | 2,124 | 0 | """
Django settings for roses_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
| # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!5ju*8(7&c3*y2nt$$^r%eecs95uo!237^0ke-$!bgwj)-%u^$'
# SECURITY | WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'skills',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'roses_project.urls'
WSGI_APPLICATION = 'roses_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Argentina/Cordoba'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
a2ialabelme/LabelMeAnnotationTool | labelFile.py | Python | gpl-3.0 | 10,321 | 0.024126 | # -*- coding : utf-8 -*-
#
# Copyright (C) 2011 Michael Pitidis, Hussein Abdulwahid.
#
# This file is part of Labelme.
#
# Labelme is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Labelme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labelme. If not, see <http://www.gnu.org/licenses/>.
#
#from PyQt4.QtGui import *
#from PyQt4.QtCore import *
from PySide.QtGui import *
from PySide.QtCore import *
from lib import newIcon
import json
import os.path
from os import environ
import sys
import ntpath
from base64 import b64encode, b64decode
from xml.etree.cElementTree import Element, SubElement, Comment, tostring, ElementTree
import xml.etree.ElementTree as ET
import xml.etree.cElementTree as et
import re
from xmltools import indent
from shape import DEFAULT_LINE_COLOR_LIST, DEFAULT_FILL_COLOR_LIST
from labelDialog import validWriteTypes
# helpers
def getLines(snip):
_linesOK = []
for _line in snip.getiterator('Line'):
if not re.match(ur'^\s*$', _line.get('Value')):
_linesOK.append(_line)
return _linesOK
def pointsFromBox(snip):
if snip.get('Top') != | None: # bounding box
top = int(snip.get('Top'))
left = int(snip.get('Left'))
bottom = int( | snip.get('Bottom'))
right = int(snip.get('Right'))
return [[left,top], [right,top], [right,bottom], [left,bottom]]
elif snip.get('polygon') != None:
return [[int(x[0]),int(x[1])] for x in [z.split(',') for z in snip.get('polygon').replace('(', '').replace(')', '').split(';')]]
def polyFromPoints(points, tag='BBox'):
ret = []
if str(tag) == u'Polygon':
if points[1][0] < points[0][0]: # couterclockwise?
points = reversed(points)
ret.append((u'polygon',u';'.join(["("+str(int(x))+","+str(int(y))+")" for x,y in points])))
elif str(tag) == 'BBox':
allx = [x for x,y in points]
ally = [y for x,y in points]
left = int(min(allx))
right = int(max(allx))
top = int(min(ally))
bottom = int(max(ally))
ret.append((u'Left', str(left)))
ret.append((u'Top', str(top)))
ret.append((u'Right', str(right)))
ret.append((u'Bottom', str(bottom)))
else:
raise RuntimeError('Unknown tag for polyFromPoints <%s>'%tag)
return ret
def getTop(points):
return min([x[1] for x in points])
BB = QDialogButtonBox
class LabelTagDialog(QDialog):
def __init__(self, text="Enter object label", parent=None):
super(LabelTagDialog, self).__init__(parent)
layout = QVBoxLayout()
self.buttonBox = bb = BB(BB.Ok, Qt.Horizontal, self)
bb.button(BB.Ok).setIcon(newIcon('done'))
#bb.button(BB.Cancel).setIcon(newIcon('undo'))
bb.accepted.connect(self.validate)
#bb.rejected.connect(self.reject)
self.lbl = QLabel('BBox')
# Center align text
self.lbl.setAlignment(Qt.AlignHCenter)
layout.addWidget(self.lbl)
layout.addWidget(bb)
self.combo = QComboBox()
self.combo.addItems([u"Polygon", u"BBox"])
self.connect(self.combo, SIGNAL('activated(QString)'), self.combo_chosen)
self.connect(self.combo, SIGNAL('currentIndexChanged(QString)'), self.combo_chosen)
layout.addWidget(self.combo)
self.setLayout(layout)
def getChoice(self):
return self.lbl.text()
def combo_chosen(self, text):
self.lbl.setText(text)
def validate(self):
self.accept()
#def validate(self):
#if self.edit.text().trimmed():
#self.accept()
#def postProcess(self):
#self.edit.setText(self.edit.text().trimmed())
#def popUp(self, text='', move=True):
#self.edit.setText(text)
#self.edit.setSelection(0, len(text))
#self.edit.setFocus(Qt.PopupFocusReason)
#if move:
#self.move(QCursor.pos())
#return self.edit.text() if self.exec_() else None
###
class LabelFileError(Exception):
pass
class LabelFile(object):
#suffix = '.lif'
suffixes = [u'.xml'] # '.lif' not used
def __init__(self, filename=None):
self.shapes = ()
self.imagePath = None
self.imageData = None
if filename is not None:
self.load(filename)
# RM: TODO make something cleaner
def load(self, filename):
ext = os.path.splitext(filename)[1].lower()
if ext == u'.lif':
self._loadLIF(filename)
elif ext == u'.xml':
self._loadDL(filename)
else:
raise LabelFileError('Unknown label extension %s'%ext)
def _loadLIF(self, filename):
try:
with open(filename, 'rb') as f:
data = json.load(f)
imagePath = data['imagePath']
imageData = b64decode(data['imageData'])
lineColor = data['lineColor']
fillColor = data['fillColor']
shapes = ((s['label'], s['points'], s['line_color'], s['fill_color'])\
for s in data['shapes'])
# Only replace data after everything is loaded.
self.shapes = shapes
self.imagePath = imagePath
self.imageData = imageData
self.lineColor = lineColor
self.fillColor = fillColor
except Exception, e:
raise LabelFileError(e)
def _loadDL(self, filename):
#try:
#RM: just read a DocumentList with a single image file, or just the first one TODO
linesOK = {}
for _, elem in et.iterparse(filename) :
if elem.tag == "SinglePage" :
img = elem.get('FileName')
pars = list(elem.getiterator('Paragraph'))
if len(pars) > 0: # read lines
for k,para in enumerate(pars):
linesOK[k] = []
linesOK[k].append(getLines(para))
else: # try lines only
linesOK['noPara'] = []
linesOK['noPara'].append(getLines(elem))
break # just a single image... TODO
if len(linesOK.keys()) >= 1: # found some lines
self.imagePath = img
if not os.path.isfile(img):
self.imagePath = os.path.join(os.path.split(filename)[0], img)
with open(self.imagePath, "rb") as image_file:
self.imageData = image_file.read()
self.lineColor = DEFAULT_LINE_COLOR_LIST #RM: got from an example, RGB_Alpha
self.fillColor = DEFAULT_FILL_COLOR_LIST #RM: got from an example
# shape is a tuple: see above (s['label'], s['points'], s['line_color'], s['fill_color'])
# points is a list of [x,y] points
shapes = []
if linesOK.has_key('noPara'):
for xl in linesOK['noPara']:
if xl != []:
for line in xl:
shapes.append((line.get('Value'), pointsFromBox(line), DEFAULT_LINE_COLOR_LIST, None))
else:
for k in sorted(linesOK.keys()):
#if pars[k].get('Value') is None:
#paraT = u''
#else:
#paraT = pars[k].get('Value')
#shapes.append((u'__P__:'+paraT+str(len(linesOK[k])), pointsFromBox(pars[k]), [255, 255, 224, 128], [255, 255, 224, 32]))
for xl in linesOK[k]:
if xl != []:
for line in xl:
shapes.append((line.get('Value'), pointsFromBox(line), DEFAULT_LINE_COLOR_LIST, None))
self.shapes = (x for x in shapes)
else:
raise LabelFileError('Did not find any lines in %s'%filename)
#except Exception, e:
#raise LabelFileError(e)
def save(self, filename, shapes, imagePath, imageData,
lineColor=None, fillColor=None):
ext = os.path.splitext(str |
google-research/google-research | simulation_research/traffic/random_traffic_generator_test.py | Python | apache-2.0 | 13,728 | 0.002258 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import absltest
import numpy as np
import sumolib
from simulation_research.traffic import file_util
from simulation_research.traffic import random_traffic_generator
def _load_file(folder, file_name):
file_path = os.path.join(folder, file_name)
return file_util.f_abspath(file_path)
class RandomTrafficGeneratorTest(absltest.TestCase):
def setUp(self):
super(RandomTrafficGeneratorTest, self).setUp()
testdata_dir = './testdata'
self._output_dir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())
sumo_net_file = 'mtv_tiny.net.xml'
map_file = _load_file(testdata_dir, sumo_net_file)
self._net = sumolib.net.readNet(map_file)
traffic_generator = random_traffic_generator.RandomTrafficGenerator(
self._net)
self._random_traffic_generator = traffic_generator
# The traffic generator uses numpy to draw random samples. The numpy random
# seed is set here to make the result replicable.
np.random.seed(0)
def test_get_freeway_input_output(self):
figure_path = os.path.join(self._output_dir, 'freeway_routes.pdf')
input_output = self._random_traffic_generator.get_freeway_input_output(
figure_path=figure_path)
self.assertLen(input_output, 9)
def test_get_arterial_input_output(self):
figure_path = os.path.join(self._output_dir, 'arterial_routes.pdf')
input_output = self._random_traffic_generator.get_arterial_input_output(
figure_path=figure_path)
self.assertLen(inp | ut_output, 21)
def test_setup_shortest_routes(self):
# Test case for freeways.
input_output = self._random_traffic_generator.get_freeway_input_output()
output_file = os.path.join(self._output_dir, 'freeway_routes.xml')
routes = self._random_traffic_generator.setup_shortest_routes(
input_output,
edge_type_list=random_traffic_generator.FREEWAY_EDGE_TYPES,
routes_file=outpu | t_file,
figures_folder=self._output_dir)
self.assertLen(routes, 3)
routes_length = [routes[0]['route_length'],
routes[1]['route_length'],
routes[2]['route_length']]
routes_length.sort()
self.assertAlmostEqual(routes_length[0], 450.18)
self.assertAlmostEqual(routes_length[1], 622.57)
self.assertAlmostEqual(routes_length[2], 1051.25)
# Test case for arterial roads.
input_output = self._random_traffic_generator.get_arterial_input_output()
output_file = os.path.join(self._output_dir, 'arterial_routes.xml')
routes = self._random_traffic_generator.setup_shortest_routes(
input_output,
edge_type_list=(random_traffic_generator.FREEWAY_EDGE_TYPES +
random_traffic_generator.ARTERIAL_EDGE_TYPES),
routes_file=output_file,
figures_folder=self._output_dir)
self.assertLen(routes, 5)
routes_length = [routes[0]['route_length'],
routes[1]['route_length'],
routes[2]['route_length'],
routes[3]['route_length'],
routes[4]['route_length']]
routes_length.sort()
self.assertAlmostEqual(routes_length[0], 16.84)
self.assertAlmostEqual(routes_length[1], 269.97)
self.assertAlmostEqual(routes_length[2], 512.76)
self.assertAlmostEqual(routes_length[3], 637.8299999999999)
self.assertAlmostEqual(routes_length[4], 1051.25)
def test_generate_freeway_routes_flow(self):
"""Test for the freeway demands generation workflow.
All the unit tests have been done above, and there is no calculation in this
test. So this one just verifies nothing is block in the workflow.
"""
routes_file = os.path.join(self._output_dir, 'freeway_routes_demands.xml')
token = '<routes>\n'
file_util.append_line_to_file(routes_file, token)
token = (' <vType id="Car" accel="0.8" decel="4.5" sigma="0.5" '
'length="5" minGap="2.5" maxSpeed="38" guiShape="passenger"/>\n')
file_util.append_line_to_file(routes_file, token)
input_output = self._random_traffic_generator.get_freeway_input_output()
token = ' <!-- freeway routes -->'
file_util.append_line_to_file(routes_file, token)
freeway_routes = self._random_traffic_generator.setup_shortest_routes(
input_output,
edge_type_list=random_traffic_generator.FREEWAY_EDGE_TYPES,
routes_file=routes_file,
figures_folder=self._output_dir)
token = ' <!-- freeway demands -->'
file_util.append_line_to_file(routes_file, token)
time_step_size = 100
for time_point in range(0, 1200, time_step_size):
freeway_routes_demands = [(0, 0.3), (1, 0.3)]
self._random_traffic_generator.generate_routes_flow(
time_point, time_step_size, freeway_routes, freeway_routes_demands,
routes_file)
token = '\n</routes>'
file_util.append_line_to_file(routes_file, token)
# Test by counting number of lines in the file.
with file_util.f_open(routes_file, 'r') as f:
self.assertLen(f.readlines(), 36)
def test_generate_arterial_routes_flow(self):
"""Test for the arterial roads demand generation workflow.
All the unit tests have been done above, and there is no calculation in this
test. So this one just verifies nothing is block in the workflow.
"""
routes_file = os.path.join(self._output_dir, 'arterial_routes_demands.xml')
token = '<routes>\n'
file_util.append_line_to_file(routes_file, token)
token = (' <vType id="Car" accel="0.8" decel="4.5" sigma="0.5" '
'length="5" minGap="2.5" maxSpeed="38" guiShape="passenger"/>\n')
file_util.append_line_to_file(routes_file, token)
# Setup freeway routes.
input_output = self._random_traffic_generator.get_freeway_input_output()
token = ' <!-- freeway routes -->'
file_util.append_line_to_file(routes_file, token)
freeway_routes = self._random_traffic_generator.setup_shortest_routes(
input_output,
edge_type_list=random_traffic_generator.FREEWAY_EDGE_TYPES,
routes_file=routes_file,
figures_folder=self._output_dir)
# Setup arterial roads routes.
input_output = self._random_traffic_generator.get_arterial_input_output()
token = ' <!-- arterial routes -->'
file_util.append_line_to_file(routes_file, token)
arterial_routes = self._random_traffic_generator.setup_shortest_routes(
input_output,
edge_type_list=(random_traffic_generator.FREEWAY_EDGE_TYPES +
random_traffic_generator.ARTERIAL_EDGE_TYPES),
routes_file=routes_file,
figures_folder=self._output_dir)
token = ' <!-- freeway + arterial roads demands -->'
file_util.append_line_to_file(routes_file, token)
time_step_size = 100
for time_point in range(0, 1200, time_step_size):
freeway_routes_demands = [(0, 0.6), (1, 0.6), (2, 0.6)]
self._random_traffic_generator.generate_routes_flow(
time_point, time_step_size, freeway_routes, freeway_routes_demands,
routes_file)
arterial_routes_demands = [(route_id, 0.1) for route_id in
range(len(arterial_routes))]
self._random_traffic_generator.generate_routes_flow(
time_point, time_step_size, arterial_routes, arterial_routes_demands,
routes_file)
token = '\n</routes>'
file_util.append_line_to_file(routes_file, token |
OCA/account-analytic | analytic_tag_dimension/models/account_move_line.py | Python | agpl-3.0 | 393 | 0 | # Copyright 2017 PESOL (http://pesol.es) - Angel Moya (angel.moya@pesol.es)
# Copyr | ight 2020 Tecnativa - Carlos Dauden
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models
class AccountMoveLine(models.Model):
_name = "account.move.line"
_inherit = ["analytic.dime | nsion.line", "account.move.line"]
_analytic_tag_field_name = "analytic_tag_ids"
|
Irides-Chromium/python_modules | modules/__init__.py | Python | gpl-3.0 | 71 | 0 | #!/usr/bin/python3
from | scale import scale
from putchar | import putchar
|
mabotech/maboss.py | maboss/motorx/jobexecutor/je_server1.py | Python | mit | 1,138 | 0.018453 |
# -*- coding: utf-8 -*-
import sys
import logging
import logging.handlers
import logging.config
from config import CENTRAL_CONFIG
#from config import LOGGING_CFG_SRV, ENDPOINT
#logging.config.fileConfig(LOGGING_CFG_SRV)
#log = logging.getLogger(__name__)
import time
from time import strftime, localtime
import gevent
import zerorpc
import random
from msgpack import packb, unpackb
from mabolab.executor import py_executor
#from print_worker import PrintWorker
class JobExecutorServer(zerorp | c.Server):
def __init__(self):
# initialize parent class
super(JobExecutorServer, self).__init__()
def execute(self, name, args, func_type='PY'):
module_path = "c | :/mtp/mabotech/maboss1.1"
log.debug("[%s]%s:%s" % (module_path, func_type, name) )
rtn = py_executor.execute(name, args, module_path)
return rtn
def run():
endpoint = ENDPOINT
srv = JobExecutorServer()
srv.bind(endpoint)
log.info("running:" + endpoint)
srv.run()
if __name__ == "__main__":
run() |
noironetworks/neutron | neutron/agent/l2/l2_agent_extensions_manager.py | Python | apache-2.0 | 2,311 | 0.000433 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.or | g/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, so | ftware
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from neutron.agent import agent_extensions_manager as agent_ext_manager
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
LOG = log.getLogger(__name__)
L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions'
def register_opts(conf):
agent_ext_mgr_config.register_agent_ext_manager_opts(conf)
class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
"""Manage l2 agent extensions. The handle_port and delete_port methods are
guaranteed to be attributes of each extension because they have been
marked as abc.abstractmethod in the extensions' abstract class.
"""
def __init__(self, conf):
super(L2AgentExtensionsManager, self).__init__(conf,
L2_AGENT_EXT_MANAGER_NAMESPACE)
def handle_port(self, context, data):
"""Notify all agent extensions to handle port."""
for extension in self:
if hasattr(extension.obj, 'handle_port'):
extension.obj.handle_port(context, data)
else:
LOG.error(
"Agent Extension '%(name)s' does not "
"implement method handle_port",
{'name': extension.name}
)
def delete_port(self, context, data):
"""Notify all agent extensions to delete port."""
for extension in self:
if hasattr(extension.obj, 'delete_port'):
extension.obj.delete_port(context, data)
else:
LOG.error(
"Agent Extension '%(name)s' does not "
"implement method delete_port",
{'name': extension.name}
)
|
tuxfux-hlp-notes/python-batches | archieves/batch-57/debugging/first.py | Python | gpl-3.0 | 954 | 0.041929 | #!/usr/bin/python
# usage: A debugging class
import pdb
version=2.0
def my_add(a,b):
''' This is the function for addition of numbers and strings '''
print "value of a is {}".format(a)
print "value of b is {}".format(b)
return a+b
def my_div(a,b):
''' This is the function for division '''
return a/b
def my_sub(a,b):
''' This is the function for substraction '''
if a > b:
return a - b
elif b > a:
return b - a
def my_mul(a,b):
''' This is the function for multiplication '''
return a * b
# Application code
if __name__ == '__main__':
print "This is a example on understading debugging"
print "Congo, i learned to write a calculator"
pdb.set_trace()
print "summation of two numbers- {}".format(my_add(1,2))
print "multiplication of two numbers- {}".format(my_mul(1,2))
print "substarti | on of two numbers - {}".format(my_sub(1,2))
print "division | of two numbers - {}".format(my_div(4,2))
|
simpeg/simpeg | SimPEG/electromagnetics/utils/__init__.py | Python | mit | 212 | 0 | from .waveform_utils | import omega, k, VTEMFun, TriangleFun, SineFun
from .current_utils import (
getStraightLineCurrentIntegral,
getSourceTermLineCurrentPolygon,
segmented_line_current_source_term,
)
| |
getsentry/sentry-auth-github | sentry_auth_github/client.py | Python | apache-2.0 | 1,850 | 0.002703 | from __future__ import absolut | e_import
import six
from r | equests.exceptions import RequestException
from sentry import http
from sentry.utils import json
from .constants import API_DOMAIN
class GitHubApiError(Exception):
def __init__(self, message='', status=0):
super(GitHubApiError, self).__init__(message)
self.status = status
class GitHubClient(object):
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self.http = http.build_session()
def _request(self, path, access_token):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
headers = {
'Authorization': 'token {0}'.format(access_token),
}
try:
req = self.http.get('https://{0}/{1}'.format(API_DOMAIN, path.lstrip('/')),
params=params,
headers=headers,
)
except RequestException as e:
raise GitHubApiError(six.text_type(e), status=getattr(e, 'status_code', 0))
if req.status_code < 200 or req.status_code >= 300:
raise GitHubApiError(req.content, status=req.status_code)
return json.loads(req.content)
def get_org_list(self, access_token):
return self._request('/user/orgs', access_token)
def get_user(self, access_token):
return self._request('/user', access_token)
def get_user_emails(self, access_token):
return self._request('/user/emails', access_token)
def is_org_member(self, access_token, org_id):
org_list = self.get_org_list(access_token)
org_id = six.text_type(org_id)
for o in org_list:
if six.text_type((o['id'])) == org_id:
return True
return False
|
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/datasets/mldata.py | Python | agpl-3.0 | 6,651 | 0.00015 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: Simplified BSD
import os
from os.path import join, exists
import re
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
import urllib2
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch objec | t with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemi | a", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> iris = fetch_mldata('iris')
>>> iris.target[0]
1
>>> print(iris.data[0])
[-0.555556 0.25 -0.864407 -0.916667]
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True)
>>> print(leuk.data.shape[0])
72
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0')
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % urllib2.quote(dataname)
try:
mldata_url = urllib2.urlopen(urlname)
except urllib2.HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, (int, np.integer)):
target_name = col_names[target_name]
if isinstance(data_name, (int, np.integer)):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
|
AutorestCI/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/service_type_info.py | Python | mit | 1,980 | 0.00202 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# ----------------------------------- | ---------------------------------------
from msrest.serialization import Model
class ServiceTypeInfo(Model):
"""Information about a service type that is defined in a service manifest of a
provisioned application type.
:param service_type_description:
:type servic | e_type_description: :class:`ServiceTypeDescription
<azure.servicefabric.models.ServiceTypeDescription>`
:param service_manifest_name:
:type service_manifest_name: str
:param service_manifest_version: The version of the service manifest in
which this service type is defined.
:type service_manifest_version: str
:param is_service_group: Indicates whether the service is a service group.
If it is, the property value is true otherwise false.
:type is_service_group: bool
"""
_attribute_map = {
'service_type_description': {'key': 'ServiceTypeDescription', 'type': 'ServiceTypeDescription'},
'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'},
'service_manifest_version': {'key': 'ServiceManifestVersion', 'type': 'str'},
'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'},
}
def __init__(self, service_type_description=None, service_manifest_name=None, service_manifest_version=None, is_service_group=None):
self.service_type_description = service_type_description
self.service_manifest_name = service_manifest_name
self.service_manifest_version = service_manifest_version
self.is_service_group = is_service_group
|
zbyte64/django-hyperadmin | hyperadmin/models.py | Python | bsd-3-clause | 1,912 | 0.007845 | """
Helper function for logging all events that come through the API.
"""
# TODO Write logging model
from contextlib import contextmanager |
import logging
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
class RelList(list):
"""
A list subclass that allows u | s to use dot notation to search for elements
that match the reltype.
>>> links = RelList([{"rel":"self", "href": "self"}, {"rel":"other", "href":"other"}])
>>> links.self["href"]
'self'
>>> links.other["href"]
'other'
>>> links.foo
"""
def __getattr__(self, name):
for item in self:
if item['rel'] == name:
return item
LOGGER = logging.getLogger('hypermedia')
UNSPECIFIED = 0
ADDITION = 1
CHANGE = 2
DELETION = 3
ACTION_NAME = {
UNSPECIFIED: "???",
ADDITION: "added",
CHANGE: "changed",
DELETION: "deleted"
}
@contextmanager
def log_action(user, obj, action_flag, change_message="", request=None):
"""
A context manager that logs the action.
If the action fails, it logs that, too.
"""
if user.is_anonymous:
user = None
action = ACTION_NAME[action_flag]
object_repr = smart_unicode(obj)
if not change_message:
change_message = _(u"Object %s was %s." % (
object_repr, action
))
# Allow the body of the context to run
try:
yield
except:
LOGGER.error(_(u"Unable to %s object %s." % (action, object_repr)),
exc_info=True,
extra={
'request': request,
'data': {
'user': user
}
}
)
raise
LOGGER.info(change_message, exc_info=True, extra={
'stack': True,
'request': request,
'data': {
'user': user
}
}) |
jianajavier/pnc-cli | pnc_cli/swagger_client/models/build_record_set_singleton.py | Python | apache-2.0 | 2,941 | 0 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed unde | r the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/ | LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class BuildRecordSetSingleton(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuildRecordSetSingleton - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'content': 'BuildRecordSetRest'
}
self.attribute_map = {
'content': 'content'
}
self._content = None
@property
def content(self):
"""
Gets the content of this BuildRecordSetSingleton.
:return: The content of this BuildRecordSetSingleton.
:rtype: BuildRecordSetRest
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this BuildRecordSetSingleton.
:param content: The content of this BuildRecordSetSingleton.
:type: BuildRecordSetRest
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
bwrsandman/openerp-hr | hr_language/hr_language.py | Python | agpl-3.0 | 1,839 | 0.003263 | # -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARR | ANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import tools
from openerp.osv import fields, orm
class hr_language(orm.Model):
_name | = 'hr.language'
_columns = {
'name': fields.selection(tools.scan_languages(), 'Language', required=True),
'description': fields.char('Description', size=64, required=True, translate=True),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'read': fields.boolean('Read'),
'write': fields.boolean('Write'),
'speak': fields.boolean('Speak'),
}
_defaults = {
'read': True,
'write': True,
'speak': True,
}
class hr_employee(orm.Model):
_inherit = 'hr.employee'
_columns = {
'language_ids': fields.one2many('hr.language', 'employee_id', 'Languages'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gokudomatic/cobiv | cobiv.py | Python | mit | 78 | 0 | from | cobiv.Mai | nApp import Cobiv
if __name__ == '__main__':
Cobiv().run()
|
t3dev/odoo | addons/crm/models/res_partner.py | Python | gpl-3.0 | 2,508 | 0.00319 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class Partner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one('crm.team', string='Sales Team', oldname='section_id')
opportunity_ids = fields.One2many('crm.lead', 'partner_id', string='Opportunities', domain=[('type', '=', 'opportunity')])
meeting_ids = fields.Many2many('calendar.event', 'calendar_event_res_partner_rel', 'res_partner_id', 'calendar_event_id', string='Meetings', copy=False)
opportunity_count = fields.Integer("Opportunity", compute='_compute_opportunity_count')
meeting_count = fields.Integer("# Meetings", compute='_compute_meeting_count')
@api.model
def default_get(self, fields):
rec = super(Partner, self).default_get(fields)
active_model = self.env.context.get('active_model')
if active_model == 'crm.lead':
lead = self.env[active_model].browse(self.env.context.get('active_id')).exists()
if lead:
rec.update(
phone=lead.phone,
mobile=lead.mobile,
function=lead.function,
title=lead.title.id,
website=lead.website,
street=lead.street,
street2=lead.street2,
city=lead.city,
state_id=lead.state_id.id,
country_id=lead.country_id.id,
zip=lead.zip,
)
return rec
@api.multi
def _compute_opportunity_count(self):
for partner in self:
operator = 'child_of' if partner.is_company else '=' # the opportunity count should counts the opportunities of this company and all its contacts
partner.opportunity_count = self.env['crm.le | ad'].search_count([('partner_id', operator, partner.id), ('type', '=', 'opportunity')])
@api.multi
def _compute_meeting_count(self):
for partner in self:
partner.meeting_count = len(partner.meeting_ids)
@api.multi
def schedule_meeting(self):
partner_ids = self.ids
partner_ids.append(self.env.user.partner_id.id)
action = self.env.ref('calendar.action_calendar_event').read()[0]
action['context'] = {
| 'search_default_partner_ids': self._context['partner_name'],
'default_partner_ids': partner_ids,
}
return action
|
sebastianffx/active_deep_segmentation | save_nus_images.py | Python | gpl-2.0 | 401 | 0.007481 | img_ids = open('chairs_nuswide_imgids.txt','r')
nus_urls | = open('NUS-WIDE-urls.txt', 'r')
dict_urls = {}
for line in nus_urls:
dict_urls[line.split()[0]] = line.split()[3]
chairs_urls = []
count = 0
for line in img_ids:
print('searching url ' + str(count))
count = count +1
if not dict_urls[line.split()[0]] == 'null':
| chairs_urls.append(dict_urls[line.split()[0]])
|
krisrogers/textisbeautiful | manage.py | Python | apache-2.0 | 246 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tib.settings")
|
from django.core.management import execute_from_command_line
| execute_from_command_line(sys.argv)
|
maxalbert/bokeh | bokeh/compat/mpl.py | Python | bsd-3-clause | 2,836 | 0.003173 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#------------------------------------------------------------- | ----------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from warnings import warn
import matplotlib.pyplot as plt
from .bokeh_exporter import BokehExporter
from .bokeh_renderer import BokehRenderer
#-----------------------------------------------------------------------------
# Classes and funct | ions
#-----------------------------------------------------------------------------
def to_bokeh(fig=None, name=None, server=None, notebook=None, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "default"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation assumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if name is not None:
warn("Use standard output_file(...) from bokeh.io")
if server is not None:
warn("Use standard output_server(...) from bokeh.io")
if notebook is not None:
warn("Use standard output_notebook() from bokeh.io")
if fig is None:
fig = plt.gcf()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
|
yujikato/DIRAC | .github/workflows/make_release.py | Python | gpl-3.0 | 2,353 | 0.001275 | #!/usr/bin/env python
import argparse
from packaging.version import Version
import requests
from uritemplate import expand as uri_expand
def make_release(version, commit_hash, release_notes=""):
"""Create a new GitHub release using the given data
This function always makes a pre-release first to ensure the "latest" release never corresponds
to one without artifacts uploaded. If the new version number is not a pre-release, as
determined by PEP-440, it is promoted to at full release after the uploads have completed
successfully.
:param str version: The version of the new release
:param str commit_hash: Git revision used for the release
:param str release_notes: Release notes
"""
# Create a draft release
r = requests.post(
f"{api_root}/releases",
json={
"tag_name": version,
"target_commitish": commit_hash,
"body": release_notes,
"draft": True,
"prerelease": Version(version).is_ | prerelease,
},
headers=headers,
)
r.raise_for_status()
release_data = r.json()
print(f"Created draft release at: {release_data['html_url']}")
# Publish the release
r = requests.patch(
release_data["url"],
json={
"draft": False,
},
headers=headers,
)
r.raise_for_status()
release_data = r.json()
print(f"Published release at: {release_data['html_url']}")
if __name__ | == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--token", required=True)
parser.add_argument("--owner", default="DIRACGrid")
parser.add_argument("--repo", default="DIRAC")
parser.add_argument("--version", required=True)
parser.add_argument("--rev", required=True)
args = parser.parse_args()
token = args.token
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {token}",
}
api_root = f"https://api.github.com/repos/{args.owner}/{args.repo}"
if not args.version.startswith("v"):
raise ValueError('For consistency versions must start with "v"')
v = Version(args.version)
if (v.major, v.minor) < (7, 2):
raise NotImplementedError("Only supported for DIRAC 7.2 or later")
make_release(args.version, args.rev, release_notes="")
|
lorien/runscript | test/script/foo.py | Python | mit | 41 | 0 | def | main(**kwargs):
print(' | foo foo')
|
datashaman/putio-automator | putio_automator/__init__.py | Python | mit | 1,262 | 0.008716 | """
Initialize the application.
"""
import logging
logger = logging.getLogger(__name__)
import appdirs
import click
import datetime
import distutils.dir_util
import os
import putiopy
import sqlite3
APP_NAME = 'putio-automator'
APP_AUTHOR = 'datashaman'
DIRS = appdirs.AppDirs(APP_NAME, APP_AUTHOR)
from .db import create_db, database_path
create_db()
def date_handler(obj):
"Date handler for JSON serialization"
if isinstance(obj, datetime.datetime) or isins | tance(obj, datetime.date):
return obj.isoformat()
else:
return None
def find_config(verbose=False):
"Search for config on wellknown paths"
search_paths = [
os.path.join(os.getcwd(), 'config.py'),
os.path.join(DIRS.user_data_dir, 'config.py'),
os.path.join(DIRS.site | _data_dir, 'config.py'),
]
config = None
for search_path in search_paths:
message = 'Searching %s' % search_path
logger.debug(message)
if verbose:
click.echo(message)
if os.path.exists(search_path) and not os.path.isdir(search_path):
config = search_path
break
return config
def echo(level, message):
log_func = getattr(logger, level)
log_func(message)
click.echo(message)
|
alejo8591/maker | core/templatetags/administration.py | Python | mit | 2,069 | 0.00725 | # encoding: utf-8
# Copyright 2013 maker
# License
"""
Administration templatetags
"""
from coffin import template
from maker.core.rendering import render_to_string
from jinja2 import contextfunction, Markup
from django.template import RequestContext
register = template.Library()
@contextfunction
def administration_user_list(context, users, skip_group=False):
"Print a list of users"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('core/administration/tags/user_list',
{'users': users, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(administration_user_list)
@contextfunction
def admi | nistration_group_list(context, groups, skip_group=False):
"Print a list of g | roups"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('core/administration/tags/group_list',
{'groups': groups, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(administration_group_list)
@contextfunction
def administration_module_list(context, modules):
"Print a list of users"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('core/administration/tags/module_list',
{'modules': modules},
context_instance=RequestContext(request),
response_format=response_format))
register.object(administration_module_list)
|
oso/qgis-etri | qgis_etri/xmcda.py | Python | gpl-3.0 | 12,747 | 0.009022 | import sys #FIXME: useless
import time
from qgis_etri.pysimplesoap.client import SoapClient
from qgis_etri.pysimplesoap.simplexml import SimpleXMLElement, TYPE_MAP
ETRI_BM_URL = 'http://webservices.decision-deck.org/soap/ElectreTriBMInference-PyXMCDA.py'
def format_alternatives(alts):
output = "<alternatives>\n"
for alt in alts:
output += "\t<alternative id=\"%s\">\n" % alt
output += "\t\t<active>true</active>\n"
output += "\t</alternative>\n"
output += "</alternatives>\n"
return output
def format_affectations(affectations):
output = "<alternativesAffectations>\n"
for alternative, category in affectations.items():
output += "\t<alternativeAffectation>\n"
output += "\t\t<alternativeID>%s</alternativeID>\n" % alternative
output += "\t\t<categoryID>%s</categoryID>\n" % category
output += "\t</alternativeAffectation>\n"
output += "</alternativesAffectations>\n"
return output
def format_criteria(criteria, directions=None, q_thresholds=None, p_thresholds=None, v_thresholds=None):
output = "<criteria>\n"
for criterion in criteria:
output += "\t<criterion id=\"%s\">\n" % criterion
if directions:
if directions[criterion] == -1:
direction = 'min'
else:
direction = 'max'
output += "\t\t<scale>\n"
output += "\t\t\t<quantitative>\n"
output += "\t\t\t\t<preferenceDirection>%s</preferenceDirection>\n" % direction
output += "\t\t\t</quantitative>\n"
output += "\t\t</scale>\n"
if q_thresholds or p_thresholds or v_thresholds:
output += "\t\t<thresholds>\n"
if q_thresholds:
for i, q in enumerate(q_thresholds):
output += "\t\t\t<threshold id=\"q%d\" name=\"indifference\" mcdaConcept=\"indifference\">\n" % (i+1)
output += "\t\t\t\t<constant><real>%f</real></constant>\n" % q[criterion]
output += "\t\t\t</threshold>\n"
if p_thresholds:
for i, p in enumerate(p_thresholds):
output += "\t\t\t<threshold id=\"p%d\" name=\"preference\" mcdaConcept=\"preference\">\n" % (i+1)
output += "\t\t\t\t<constant><real>%f</real></constant>\n" % p[criterion]
output += "\t\t\t</threshold>\n"
if v_thresholds:
for i, v in enumerate(v_thresholds):
if criterion in v:
output += "\t\t\t<threshold id=\"v%d\" name=\"veto\" mcdaConcept=\"veto\">\n" % (i+1)
output += "\t\t\t\t<constant><real>%f</real></constant>\n" % v[criterion]
output += "\t\t\t</threshold>\n"
output += "\t\t</thresholds>\n"
output += "\t</criterion>\n"
output += "</criteria>\n"
return output
def format_categories(categories):
output = "<categories>\n"
for i, category in enumerate(categories):
output += "\t<category id=\"%s\">\n" % category
output += "\t\t<active>true</active>\n"
output += "\t\t<rank><integer>%d</integer></rank>\n" % (i+1)
output += "\t</category>\n"
output += "</categories>\n"
return output
def format_performances_table(perfs_table):
output = "<performanceTable>\n"
for alternative, perfs in perfs_table.items():
output += "\t<alternativePerformances>\n"
output += "\t\t<alternativeID>%s</alternativeID>\n" % alternative
for criterion, value in perfs.items():
output += "\t\t<performance>\n"
output += "\t\t\t<criterionID>%s</criterionID>\n" % criterion
output += "\t\t\t<value><real>%s</real></value>\n" % value
output += "\t\t</performance>\n"
output += "\t</alternativePerformances>\n"
output += "</performanceTable>\n"
return output
def format_criteria_weights(weights):
output = "<criteriaValues>\n"
for crit, weight in weights.items():
output += "\t<criterionValue>\n"
output += "\t\t<criterionID>%s</criterionID>\n" % crit
output += "\t\t<value><real>%s</real></value>\n" % weight
output += "\t</criterionValue>\n"
output += "</criteriaValues>\n"
return output
def format_category_profiles(profiles, palts_id, cat_id):
output = "<categoriesProfiles>\n"
for i, profile in enumerate(profiles):
output += "\t<categoryProfile>\n"
output += "\t\t<alternativeID>%s</alternativeID>\n" % palts_id[i]
output += "\t\t<limits>\n"
output += "\t\t\t<lowerCategory><categoryID>%s</categoryID></lowerCategory>\n" % cat_id[i]
output += "\t\t\t<upperCategory><categoryID>%s</categoryID></upperCategory>\n" % cat_id[i+1]
output += "\t\t</limits>\n"
output += "\t</categoryProfile>\n"
output += "</categoriesProfiles>\n"
return output
def format_pt_reference_alternatives(profiles, palts_id, crit_id):
output = "<performanceTable>\n"
output += "\t<description>\n"
output += "\t\t<title>Performance table of reference alternatives</title>\n"
output += "\t</description>\n"
for i, profile in enumerate(profiles):
output += "\t<alternativePerformances>\n"
output += "\t\t<alternativeID>%s</alternativeID>\n" % palts_id[i]
for j, crit in enumerate(crit_id):
output += "\t\t<performance>\n"
output += "\t\t\t<criterionID>%s</criterionID>\n" % crit
output += "\t\t\t<value><real>%s</real></value>\n" % profile['refs'][crit]
output += "\t\t</performance>\n"
output += "\t</alternativePerformances>\n"
output += "</performanceTable>\n"
return output
def format_lambda(lbda):
output = "<methodParameters>\n"
output += "\t<parameter name=\"lambda\">\n"
output += "\t\t<value><real>%s</real></value>\n" % lbda
output += "\t</parameter>\n"
output += "</methodParameters>\n"
return output
def add_xmcda_tags(xml_data):
output = '<?xml version="1.0" encoding="UTF-8"?>\n'
output += '<?xml-stylesheet type="text/xsl" href="xmcdaXSL.xsl"?>\n'
output += '<xmcda:XMCDA xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.decision-deck.org/2009/XMCDA-2.0.0 file:../XMCDA-2.0.0.xsd" xmlns:xmcda="http://www.decision-deck.org/2009/XMCDA-2.0.0" instanceID="void">\n'
output += xml_data
output += "</xmcda:XMCDA>\n"
return output
def su | bmit_problem(url, params):
p = SimpleXMLElement("<submitProblem></submitProblem>")
for k, v in params.items():
child = p.add_child(k, v)
child.add_attribute("xsi:type", "xsd:string")
client = SoapClient(
location = url,
| action = '',
soap_ns = 'soapenv',
namespace = 'http://www.decision-deck.org/2009/XMCDA-2.0.0',
trace = False)
sp = client.call('submitProblem', p)
reply = sp.submitProblemResponse
return str(reply.ticket)
def request_solution(url, ticket_id, timeout=0):
client = SoapClient(
location = url,
action = '',
soap_ns = 'soapenv',
namespace = 'http://www.decision-deck.org/2009/XMCDA-2.0.0',
trace = False)
start = time.time()
while True:
rs = client.call('requestSolution', ticket=ticket_id)
reply = rs.requestSolutionResponse
if getattr(reply,'service-status') != 1:
break
time.sleep(0.5)
if timeout and time.time()>start+timeout:
return None
return reply
def get_lambda(xmltree):
xml_lbda = xmltree.find(".//methodParameters/parameter/value/real")
return float(xml_lbda.text)
def get_method_messages(xmltree):
messages = []
for xmlmsg in xmltree.findall(".//methodMessages/logMessage/text"):
messages.append(xmlmsg.text)
return messages
def get_method_errors(xmltree):
errors = []
for xmlmsg in xmltree.findall(".//methodMessages/errorMessage/text"):
errors.append(xmlmsg.text)
return errors
def save_file(filename, xmcda_data):
file = open(filename, "w")
file.write(add_xmcda |
hip-odoo/odoo | addons/delivery/models/delivery_carrier.py | Python | agpl-3.0 | 13,010 | 0.002998 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class DeliveryCarrier(models.Model):
_name = 'delivery.carrier'
_inherits = {'product.product': 'product_id'}
_description = "Carrier"
_order = 'sequence, id'
''' A Shipping Provider
In order to add your own external provider, follow these steps:
1. Create your model MyProvider that _inherit 'delivery.carrier'
2. Extend the selection of the field "delivery_type" with a pair
('<my_provider>', 'My Provider')
3. Add your methods:
<my_provider>_get_shipping_price_from_so
<my_provider>_send_shipping
<my_provider>_open_tracking_page
<my_provider>_cancel_shipment
(they are documented hereunder)
'''
# -------------------------------- #
# Internals for shipping providers #
# -------------------------------- #
sequence = fields.Integer(help="Determine the display order", default=10)
# This field will be overwritten by internal shipping providers by adding their own type (ex: 'fedex')
delivery_type = fields.Selection([('fixed', 'Fixed Price'), ('base_on_rule', 'Based on Rules')], string='Provider', default='fixed', required=True)
product_type = fields.Selection(related='product_id.type', default='service')
product_sale_ok = fields.Boolean(related='product_id.sale_ok', default=False)
product_id = fields.Many2one('product.product', string='Delivery Product', required=True, ondelete="cascade")
price = fields.Float(compute='get_price')
available = fields.Boolean(compute='get_price')
free_if_more_than = fields.Boolean('Free if Order total is more than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping", default=False)
amount = fields.Float(string='Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency")
country_ids = fields.Many2many('res.country', 'delivery_carrier_country_rel', 'carrier_id', 'country_id', 'Countries')
state_ids = fields.Many2many('res.country.state', 'delivery_carrier_state_rel', 'carrier_id', 'state_id', 'States')
zip_from = fields.Char('Zip From')
zip_to = fields.Char('Zip To')
price_rule_ids = fields.One2many('delivery.price.rule', 'carrier_id', 'Pricing Rules', copy=True)
fixed_price = fields.Float(compute='_compute_fixed_price', inverse='_set_product_fixed_price', store=True, string='Fixed Price',help="Keep empty if the pricing depends on the advanced pricing per destination")
integration_level = fields.Selection([('rate', 'Get Rate'), ('rate_and_ship', 'Get Rate and Create Shipment')], string="Integration Level", default='rate_and_ship', help="Action while validating Delivery Orders")
prod_environment = fields.Boolean("Environment", help="Set to True if your cre | dentials are certified for production.")
margin = fields.Integer(help='This percentage will be added to the shipping price.')
_sql_constraints = [
('margin_not_under_100_percent', 'CHECK (margin >= -100)', 'Margin cannot be lower than -100%'),
]
@api.one
def toggle_prod_environment(self):
self.prod_environment = not self.prod_environment
@api | .multi
def install_more_provider(self):
return {
'name': 'New Providers',
'view_mode': 'kanban',
'res_model': 'ir.module.module',
'domain': [['name', 'ilike', 'delivery_']],
'type': 'ir.actions.act_window',
'help': _('''<p class="oe_view_nocontent">
Buy Odoo Enterprise now to get more providers.
</p>'''),
}
@api.multi
def name_get(self):
display_delivery = self.env.context.get('display_delivery', False)
order_id = self.env.context.get('order_id', False)
if display_delivery and order_id:
order = self.env['sale.order'].browse(order_id)
currency = order.pricelist_id.currency_id.name or ''
res = []
for carrier_id in self.ids:
try:
r = self.read([carrier_id], ['name', 'price'])[0]
res.append((r['id'], r['name'] + ' (' + (str(r['price'])) + ' ' + currency + ')'))
except ValidationError:
r = self.read([carrier_id], ['name'])[0]
res.append((r['id'], r['name']))
else:
res = super(DeliveryCarrier, self).name_get()
return res
@api.depends('product_id.list_price', 'product_id.product_tmpl_id.list_price')
def _compute_fixed_price(self):
for carrier in self:
carrier.fixed_price = carrier.product_id.list_price
def _set_product_fixed_price(self):
for carrier in self:
carrier.product_id.list_price = carrier.fixed_price
@api.one
def get_price(self):
SaleOrder = self.env['sale.order']
self.available = False
self.price = False
order_id = self.env.context.get('order_id')
if order_id:
# FIXME: temporary hack until we refactor the delivery API in master
order = SaleOrder.browse(order_id)
if self.delivery_type not in ['fixed', 'base_on_rule']:
try:
computed_price = self.get_shipping_price_from_so(order)[0]
self.available = True
except ValidationError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s, not found", self.name, e.name)
computed_price = 0.0
else:
carrier = self.verify_carrier(order.partner_shipping_id)
if carrier:
try:
computed_price = carrier.get_price_available(order)
self.available = True
except UserError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s", carrier.name, e.name)
computed_price = 0.0
else:
computed_price = 0.0
self.price = computed_price * (1.0 + (float(self.margin) / 100.0))
# -------------------------- #
# API for external providers #
# -------------------------- #
# TODO define and handle exceptions that could be thrown by providers
def get_shipping_price_from_so(self, orders):
''' For every sale order, compute the price of the shipment
:param orders: A recordset of sale orders
:return list: A list of floats, containing the estimated price for the shipping of the sale order
'''
self.ensure_one()
if hasattr(self, '%s_get_shipping_price_from_so' % self.delivery_type):
return getattr(self, '%s_get_shipping_price_from_so' % self.delivery_type)(orders)
def send_shipping(self, pickings):
''' Send the package to the service provider
:param pickings: A recordset of pickings
:return list: A list of dictionaries (one per picking) containing of the form::
{ 'exact_price': price,
'tracking_number': number }
'''
self.ensure_one()
if hasattr(self, '%s_send_shipping' % self.delivery_type):
return getattr(self, '%s_send_shipping' % self.delivery_type)(pickings)
def get_tracking_link(self, pickings):
''' Ask the tracking link to the service provider
:param pickings: A recordset of pickings
:return list: A list of string URLs, containing the tracking links for every picking
'''
self.ensure_one()
if hasattr(self, '%s_get_tracking_link' % self.delivery_type):
return getattr(self, '%s_get_tracking_link' % self.d |
crmccreary/openerp_server | openerp/addons/hr_timesheet/hr_timesheet.py | Python | agpl-3.0 | 8,348 | 0.00563 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields
from osv import osv
from tools.translate import _
class hr_employee(osv.osv):
_name = "hr.employee"
_inherit = "hr.employee"
_columns = {
'product_id': fields.many2one('product.product', 'Product', help="Specifies employee's designation as a product with type 'service'."),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'uom_id': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='UoM', store=True, readonly=True)
}
def _getAnalyticJournal(self, cr, uid, context=None):
md = self.pool.get('ir.model.data')
try:
result = md.get_object_reference(cr, uid, 'hr_timesheet', 'analytic_journal')
return result[1]
except ValueError:
pass
return False
def _getEmployeeProduct(self, cr, uid, context=None):
md = self.pool.get('ir.model.data')
try:
result = md.get_object_reference(cr, uid, 'product', 'product_consultant')
return result[1]
except ValueError:
pass
return False
_defaults = {
'journal_id': _getAnalyticJournal,
'product_id': _getEmployeeProduct
}
hr_employee()
class hr_analytic_timesheet(osv.osv):
_name = "hr.analytic.timesheet"
_table = 'hr_analytic_timesheet'
_description = "Timesheet Line"
_inherits = {'account.analytic.line': 'line_id'}
_order = "id desc"
_columns = {
'line_id': fields.many2one('account.analytic.line', 'Analytic Line', ondelete='cascade', required=True),
'partner_id': fields.related('account_id', 'partner_id', type='many2one', string='Partner', relation='res.partner', store=True),
}
def unlink(self, cr, uid, ids, context=None):
toremove = {}
for obj in self.browse(cr, uid, ids, context=context):
toremove[obj.line_id.id] = True
self.pool.get('account.analytic.line').unlink(cr, uid, toremove.keys(), context=context)
return super(hr_analytic_timesheet, self).unlink(cr, uid, ids, context=context)
def on_change_unit_amount(self, cr, uid, id, prod_id, unit_amount, company_id, unit=False, journal_id=False, context=None):
res = {'value':{}}
if prod_id and unit_amount:
# find company
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=context)
r = self.pool.get('account.analytic.line').on_change_unit_amount(cr, uid, id, prod_id, unit_amount, company_id, unit, journal_id, context=context)
if r:
res.update(r)
# update unit of measurement
if prod_id:
uom = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
if uom.uom_id:
res['value'].update({'product_uom_id': uom.uom_id.id})
else:
res['value'].update({'product_uom_id': False})
return res
def _getEmployeeProduct(self, cr, uid, context=None):
if context is None:
context = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id', uid))], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.product_id:
return emp.product_id.id
return False
def _getEmployeeUnit(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id', uid))], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.product_id:
return emp.product_id.uom_id.id
return False
def _getGeneralAccount(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id', uid))], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if bool(emp.product_id):
a = emp.product_id.product_tmpl_id.property_account_expense.id
if not a:
a = emp.product_id.categ_id.property_account_expense_categ.id
if a:
return a
return False
def _getAnalyticJournal(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id', uid))], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.journal_id:
return emp.journal_id.id
return False
_defaults = {
'product_uom_id': _getEmployeeUnit,
'product_id': _getEmployeeProduct,
'general_account_id': _getGeneralAccount,
'journal_id': _getAnalyticJournal,
'date': lambda self, cr, uid, ctx: ctx.get('date', fields.date.context_today(self,cr,uid,context=ctx)),
'user_id': lambda obj, cr, uid, ctx: ctx.get('user_id', uid),
}
def on_change_account_id(self, cr, uid, ids, account_id):
return {'value':{}}
def on_change_date(self, cr, uid, ids, date):
if ids:
new_date = self.read(cr, uid, ids[0], ['date'])['date']
if date != new_date:
warning = {'title':'User Alert!','message':'Changing the date will let this entry appear in the timesheet of the new date.'}
return {'value':{},'warning':warning}
return {'value':{}}
def create(self, cr, uid, vals, context=None):
if context is None:
contex | t = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id', uid))], context=context)
ename = ''
if emp_id:
ename = emp_obj.browse(cr, uid, emp_id[0], context=context).name
if not vals.get('journal_id',False):
raise osv.except_osv(_('Warning !'), _('Analytic journal is not defined for employee %s \nDefine an employee for the selected user and assign an analytic jo | urnal!')%(ename,))
if not vals.get('account_id',False):
raise osv.except_osv(_('Warning !'), _('No analytic account defined on the project.\nPlease set one or we can not automatically fill the timesheet.'))
return super(hr_analytic_timesheet, self).create(cr, uid, vals, context=context)
def on_change_user_id(self, cr, uid, ids, user_id):
if not user_id:
return {}
context = {'user_id': user_id}
return {'value': {
'product_id': self. _getEmployeeProduct(cr, uid, context),
'product_uom_id': self._getEmployeeUnit(cr, uid, context),
'general_account_id': self._getGeneralAccount(cr, uid, context),
|
slightstone/SickRage | sickbeard/postProcessor.py | Python | gpl-3.0 | 45,995 | 0.004805 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import glob
import fnmatch
import os
import re
import subprocess
import stat
import sickbeard
from sickbeard import db
from sickbeard import common
from sickbeard import exceptions
from sickbeard import helpers
from sickbeard import history
from sickbeard import logger
from sickbeard import notifiers
from sickbeard import show_name_helpers
from sickbeard import failed_history
from sickbeard import name_cache
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from lib import adba
class PostProcessor(object):
"""
A class which will process a media file according to the post processing settings in the config.
"""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
IGNORED_FILESTRINGS = ["/.AppleDouble/", ".DS_Store"]
def __init__(self, file_path, nzb_name=None, process_method=None, is_priority=None):
"""
Creates a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# absolute path to the folder that is being processed
self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
# full path to file
self.file_path = file_path
# file name only
self.file_name = ek.ek(os.path.basename, file_path)
# the name of the folder only
self.folder_name = ek.ek(os.path.basename, self.folder_path)
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.process_method = process_method if process_method else sickbeard.PROCESS_METHOD
self.in_history = False
self.release_group = None
self.release_name = None
self.is_proper = False
self.is_priority = is_priority
self.log = ''
self.version = None
def _log(self, message, level=logger.INFO):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
message: The string to log (unicode)
level: The log level to use (optional)
"""
logger.log(message, level)
self.log += message + '\n'
def _checkForExistingFile(self, existing_file):
"""
Checks if a file exists already and if it does whether it's bigger or smaller than
the file we are post processing
existing_file: The file to compare to
Returns:
DOESNT_EXIST if th | e file doesn't exist
EXISTS_LARGER if the file exists and is larger than the file we are post processing
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
EXISTS_SAME if the file exists and is the same size as the file we are post processing
"""
if not existing_file:
self._log(u"There is no existing file so there's no worries about replacing it", | logger.DEBUG)
return PostProcessor.DOESNT_EXIST
# if the new file exists, return the appropriate code depending on the size
if ek.ek(os.path.isfile, existing_file):
# see if it's bigger than our old file
if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
self._log(u"File " + existing_file + " is larger than " + self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_LARGER
elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
self._log(u"File " + existing_file + " is the same size as " + self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SAME
else:
self._log(u"File " + existing_file + " is smaller than " + self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SMALLER
else:
self._log(u"File " + existing_file + " doesn't exist so there's no worries about replacing it",
logger.DEBUG)
return PostProcessor.DOESNT_EXIST
def list_associated_files(self, file_path, base_name_only=False, subtitles_only=False, subfolders=False):
"""
For a given file path searches for files with the same name but different extension and returns their absolute paths
file_path: The file to check for associated files
base_name_only: False add extra '.' (conservative search) to file_path minus extension
Returns: A list containing all files which are associated to the given file
"""
def recursive_glob(treeroot, pattern):
results = []
for base, dirs, files in os.walk(treeroot):
goodfiles = fnmatch.filter(files, pattern)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
if not file_path:
return []
file_path_list = []
if subfolders:
base_name = ek.ek(os.path.basename, file_path).rpartition('.')[0]
else:
base_name = file_path.rpartition('.')[0]
if not base_name_only:
base_name = base_name + '.'
# don't strip it all and use cwd by accident
if not base_name:
return []
# don't confuse glob with chars we didn't mean to use
base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
if subfolders:
filelist = ek.ek(recursive_glob, ek.ek(os.path.dirname, file_path), base_name + '*')
else:
filelist = ek.ek(glob.glob, base_name + '*')
for associated_file_path in filelist:
# only add associated to list
if associated_file_path == file_path:
continue
# only list it if the only non-shared part is the extension or if it is a subtitle
if subtitles_only and not associated_file_path[len(associated_file_path) - 3:] in common.subtitleExtensions:
continue
# Exclude .rar files from associated list
if re.search('(^.+\.(rar|r\d+)$)', associated_file_path):
continue
if ek.ek(os.path.isfile, associated_file_path):
file_path_list.append(associated_file_path)
return file_path_list
def _delete(self, file_path, associated_files=False):
"""
Deletes the file and optionally all associated files.
file_path: The file to delete
associated_files: True to delete all files which differ only by extension, False to leave them
"""
if not file_path:
return
# figure out which files we want to delete
file_list = [file_path]
if associated_files:
file_list = file_list + self.list_associated_files(file_path, base_name_only=True, subfolders=True)
if not file_list:
self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
return
# delete the file and any other files which we want to delete
for cur_file in file_list:
if ek.ek |
smallyear/linuxLearn | salt/salt/modules/system.py | Python | apache-2.0 | 1,800 | 0 | # -*- coding: utf-8 -*-
'''
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import
import salt.utils
def __virtual__():
'''
Only supported on POSIX-like systems
'''
if salt.utils.is_windows() or not salt.utils.which('shutdown'):
return False
return True
def halt():
'''
Halt a running system
CLI Example:
.. code-block:: bash
salt '*' system.halt
'''
cmd = ['halt']
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret
def init(runlevel):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
cmd = ['init', '{0}'.format(runlevel)]
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret
def poweroff():
'''
Poweroff a running system
CLI Example:
.. code-block:: bash
salt '*' system.poweroff
'''
cmd = ['poweroff']
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret
def reboot(at_time=None):
'''
Reboot the system
at_time
The wait time in minutes before the system will be shutdown.
CLI Example:
.. code-block:: bash
salt '*' system.reboot
'''
cmd = ['shutdown', '-r', ('{0}'.format(at_time) if at_time else 'now')]
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret
def shutdown(at_time=None):
'''
Shutdown a running system
at_time
The wait time in minutes before the system w | ill be shutdown.
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
'''
cmd = ['shutdown', '-h', ('{0}'.format(at_time) if at_time e | lse 'now')]
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret
|
Open511/roadcast | travis-ci/travis511/settings.py | Python | agpl-3.0 | 5,239 | 0.002291 | #coding: utf-8
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'open511', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
OPEN511_BASE_URL = 'http://open511.example.com'
OPEN511_UI_TEST_BROWSER = 'phantomjs'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
LANGUAGES = [
('en', 'English'),
('fr', u'Français')
]
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
US | E_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
PROJ_ROOT = os.path.dirname(os.path.realpath(__file__))
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/ho | me/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', 'media'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', 'collected_static'))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'x_cuca9f9f-r!!3lcm^x1&uwzy_v85rq00vg(-a1k*w3dzo0%u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# FIXME 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'django.middleware.locale.LocaleMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'travis511.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'travis511.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJ_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGIN_REDIRECT_URL = 'o5ui_home'
LOGIN_URL = 'login'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'open511_server',
'django_open511_ui',
'django.contrib.admin',
'django.contrib.gis',
'django_extensions'
)
|
tuaris/suncoin | contrib/spendfrom/spendfrom.py | Python | mit | 10,053 | 0.005968 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SUN values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SUN available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, lar | ger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
impo | rt optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount |
licko/vpp-1701-licko | test/test_vxlan.py | Python | apache-2.0 | 7,549 | 0 | #!/usr/bin/env python
import socket
import unittest
from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
class TestVxlan(BridgeDomain, VppTestCase):
""" VXLAN Test Case """
def __init__(self, *args):
BridgeDomain.__init__(self)
VppTestCase.__init__(self, *args)
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding VXLAN header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def encap_mcast(self, pkt, src_ip, src_mac, vni):
"""
Encapsulate the original payload frame by adding VXLAN header with its
UDP, IP and Ethernet fields
"""
return (Ether(src=src_mac, dst=self.mcast_mac4) /
IP(src=src_ip, dst=self.mcast_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN header
"""
# check if is set I flag
self.assertEqual(pkt[VXLAN].flags, int('0x8', 16))
return pkt[VXLAN].payload
# Method for checking VXLAN encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False):
# TODO: add error messages
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
# Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
# Verify UDP destination port is VXLAN 4789, source UDP port could be
# a | rbitrary.
self.assertEqual(pkt[UDP].dpor | t, type(self).dport)
# TODO: checksum check
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
@staticmethod
def ip4_range(ip4n, s=10, e=20):
base = str(bytearray(ip4n)[:3])
return ((base + ip) for ip in str(bytearray(range(s, e))))
@classmethod
def create_vxlan_flood_test_bd(cls, vni):
# Create 10 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = 20
next_hop_address = cls.pg0.remote_ip4n
for dest_addr in cls.ip4_range(next_hop_address, ip_range_start,
ip_range_end):
# add host route so dest_addr will not be resolved
cls.vapi.ip_add_del_route(dest_addr, 32, next_hop_address)
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_addr,
vni=vni)
cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
@classmethod
def add_del_mcast_load(cls, is_add):
ip_range_start = 10
ip_range_end = 210
for dest_addr in cls.ip4_range(cls.mcast_ip4n, ip_range_start,
ip_range_end):
vni = bytearray(dest_addr)[3]
cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_addr,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
@classmethod
def add_mcast_load(cls):
cls.add_del_mcast_load(is_add=1)
@classmethod
def del_mcast_load(cls):
cls.add_del_mcast_load(is_add=0)
# Class method to start the VXLAN test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestVxlan, cls).setUpClass()
try:
cls.dport = 4789
cls.flags = 0x8
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Our Multicast address
cls.mcast_ip4 = '239.1.1.1'
cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4)
iplong = atol(cls.mcast_ip4)
cls.mcast_mac4 = "01:00:5e:%02x:%02x:%02x" % (
(iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)
# Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1
# into BD.
cls.single_tunnel_bd = 1
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.pg0.remote_ip4n,
vni=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(cls.pg1.sw_if_index,
bd_id=cls.single_tunnel_bd)
# Setup vni 2 to test multicast flooding
cls.mcast_flood_bd = 2
cls.create_vxlan_flood_test_bd(cls.mcast_flood_bd)
r = cls.vapi.vxlan_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
bd_id=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(cls.pg2.sw_if_index,
bd_id=cls.mcast_flood_bd)
# Add and delete mcast tunnels to check stability
cls.add_mcast_load()
cls.del_mcast_load()
# Setup vni 3 to test unicast flooding
cls.ucast_flood_bd = 3
cls.create_vxlan_flood_test_bd(cls.ucast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(cls.pg3.sw_if_index,
bd_id=cls.ucast_flood_bd)
except Exception:
super(TestVxlan, cls).tearDownClass()
raise
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestVxlan, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
self.logger.info(self.vapi.cli("show vxlan tunnel"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
shahbaz17/zamboni | mkt/comm/tests/test_utils_mail.py | Python | bsd-3-clause | 10,981 | 0 | import base64
import os.path
from django.conf import settings
from django.core import mail
import mock
from nose import SkipTest
from nose.tools import eq_, ok_
import mkt
from mkt.comm.models import CommunicationThread, CommunicationThreadToken
from mkt.comm.tests.test_views import CommTestMixin
from mkt.comm.utils import create_comm_note
from mkt.comm.utils_mail import CommEmailParser, save_from_email_reply
from mkt.constants import comm
from mkt.site.fixtures import fixture
from mkt.site.tests import TestCase, user_factory
from mkt.site.utils import app_factory
from mkt.users.models import UserProfile
sample_email = os.path.join(settings.ROOT, 'mkt', 'comm', | 'tests', 'emails',
'email.txt')
multi_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_multipart.txt')
quopri_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_quoted_printable.txt')
attach_email = | os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_attachment.txt')
attach_email2 = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests', 'emails',
'email_attachment2.txt')
class TestSendMailComm(TestCase, CommTestMixin):
def setUp(self):
self.developer = user_factory()
self.mozilla_contact = user_factory()
self.reviewer = user_factory()
self.senior_reviewer = user_factory()
self.grant_permission(self.senior_reviewer, '*:*',
'Senior App Reviewers')
self.app = app_factory()
self.app.webappuser_set.create(user=self.developer)
self.app.update(mozilla_contact=self.mozilla_contact.email)
def _create(self, note_type, author=None):
author = author or self.reviewer
return create_comm_note(self.app, self.app.current_version, author,
'Test Comment', note_type=note_type)
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
return recipients
def _check_template(self, call, template):
eq_(call[0][1], 'comm/emails/%s.html' % template)
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_approval(self, email):
self._create(comm.APPROVAL)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.mozilla_contact.email in recipients
self._check_template(email.call_args, 'approval')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation(self, email):
self._create(comm.ESCALATION)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args_list[0],
'escalation_senior_reviewer')
self._check_template(email.call_args_list[1],
'escalation_developer')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation_vip_app(self, email):
self._create(comm.ESCALATION_VIP_APP)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args,
'escalation_vip')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation_prerelease_app(self, email):
self._create(comm.ESCALATION_PRERELEASE_APP)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args,
'escalation_prerelease_app')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reviewer_comment(self, email):
another_reviewer = user_factory()
self._create(comm.REVIEWER_COMMENT, author=self.reviewer)
self._create(comm.REVIEWER_COMMENT, author=another_reviewer)
eq_(email.call_count, 3)
recipients = self._recipients(email)
assert self.reviewer.email in recipients
assert self.mozilla_contact.email in recipients
assert self.developer.email not in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_developer_comment(self, email):
self._create(comm.REVIEWER_COMMENT)
self._create(comm.DEVELOPER_COMMENT, author=self.developer)
eq_(email.call_count, 4)
recipients = self._recipients(email)
assert self.mozilla_contact.email in recipients
assert self.reviewer.email in recipients
assert self.developer.email not in recipients
assert settings.MKT_REVIEWS_EMAIL in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_additional_review(self, email):
self._create(comm.ADDITIONAL_REVIEW_PASSED)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.mozilla_contact.email in recipients
assert self.developer.email in recipients
self._check_template(email.call_args, 'tarako')
def test_mail_templates_exist(self):
for note_type in comm.COMM_MAIL_MAP:
self._create(note_type)
for note_type in comm.EMAIL_SENIOR_REVIEWERS_AND_DEV:
self._create(note_type)
self._create(comm.NO_ACTION)
def test_email_formatting(self):
"""
Manually run test in case you want to spot-check if every email is
formatted nicely and consistently. Prints out each note type email
once.
"""
raise SkipTest
for note_type in comm.COMM_MAIL_MAP:
self._create(note_type)
email_subjects = []
for email in mail.outbox:
if email.subject in email_subjects:
continue
email_subjects.append(email_subjects)
print '##### %s #####' % email.subject
print email.body
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reply_to(self, email):
note, thread = self._create(comm.APPROVAL)
reply_to = email.call_args_list[1][1]['headers']['Reply-To']
ok_(reply_to.startswith('commreply+'))
ok_(reply_to.endswith('marketplace.firefox.com'))
class TestEmailReplySaving(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.app = app_factory(name='Antelope', status=mkt.STATUS_PENDING)
self.profile = UserProfile.objects.get(pk=999)
t = CommunicationThread.objects.create(
_webapp=self.app, _version=self.app.current_version,
read_permission_reviewer=True)
self.token = CommunicationThreadToken.objects.create(
thread=t, user=self.profile)
self.token.update(uuid='5a0b8a83d501412589cc5d562334b46b')
self.email_base64 = open(sample_email).read()
self.grant_permission(self.profile, 'Apps:Review')
def test_successful_save(self):
note = save_from_email_reply(self.email_base64)
eq_(note.body, 'test note 5\n')
def test_developer_comment(self):
self.profile.webappuser_set.create(webapp=self.app)
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.DEVELOPER_COMMENT)
def test_reviewer_comment(self):
self.grant_permission(self.profile, 'Apps:Review')
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.REVIEWER_COMMENT)
def test_with_max_count_token(self):
# Test with an invalid token.
self.token.update(use_count=comm.MAX_TOKEN_USE_COUNT + 1)
assert not save_from_email_reply(self.email_base64)
def test_with_unpermitted_token(self):
"""Test whe |
Distrotech/yum-utils | plugins/refresh-updatesd/refresh-updatesd.py | Python | gpl-2.0 | 1,858 | 0.002691 | # A plugin for yum which notifies yum-updatesd to refresh its data
#
# Written by James Bowes <jbowes@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundati | on; either version 2 of the License, or
# (at your option) any lat | er version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# version 0.0.5
import os
import dbus
from yum.plugins import TYPE_CORE
requires_api_version = '2.5'
plugin_type = TYPE_CORE
def posttrans_hook(conduit):
"""
Tell yum-updatesd to refresh its state. Run only after an rpm transaction.
"""
if os.geteuid(): # If we aren't root, we _can't_ have updated anything
return
try:
bus = dbus.SystemBus()
except dbus.DBusException, e:
conduit.info(2, "Unable to connect to dbus")
conduit.info(6, "%s" %(e,))
return
try:
o = bus.get_object('org.freedesktop.DBus', '/')
o = dbus.Interface(o, "org.freedesktop.DBus")
if not o.NameHasOwner("edu.duke.linux.yum"):
conduit.info(2, "yum-updatesd not on the bus")
return
except dbus.DBusException, e:
conduit.info(2, "Unable to look at what's on dbus")
conduit.info(6, "%s" %(e,))
return
try:
updatesd_proxy = bus.get_object('edu.duke.linux.yum', '/Updatesd')
updatesd_iface = dbus.Interface(updatesd_proxy, 'edu.duke.linux.yum')
updatesd_iface.CheckNow()
except dbus.DBusException, e:
conduit.info(2, "Unable to send message to yum-updatesd")
conduit.info(6, "%s" %(e,))
|
MauricioAcosta/Aprendiendo_Python | encriptar/criptografia.py | Python | gpl-3.0 | 2,469 | 0.001623 | #!/usr/bin/env python
# -*- coding: utf-8 -*
KEYS = {
'a': 'w',
'b': 'E',
'c': 'x',
'd': '1',
'e': 'a',
'f': 't',
'g': '0',
'h': 'C',
'i': 'b',
'j': '!',
'k': 'z',
'l': '8',
'm': 'M',
'n': 'I',
'o': 'd',
'p': '.',
'q': 'U',
'r': 'Y',
's': 'i',
't': '3',
'u': ',',
'v': 'J',
'w': 'N',
'x': 'f',
'y': 'm',
'z': 'W',
'A': 'G',
'B': 'S',
'C': 'j',
'D': 'n',
'E': 's',
'F': 'Q',
'G': 'o',
'H': 'e',
'I': 'u',
'J': 'g',
'K': '2',
'L': '9',
'M': 'A',
'N': '5',
'O': '4',
'P': '?',
'Q': 'c',
'R': 'r',
'S': 'O',
'T': 'P',
'U': 'h',
'V': '6',
'W': 'q',
'X': 'H',
'Y': 'R',
'Z': 'l',
'0': 'k',
'1': '7',
'2': 'X',
'3': 'L',
'4': 'P',
'5': 'v',
'6': 'T',
'7': 'V',
'8': 'y',
'9': 'k',
'.': 'z',
',': 'D',
'?': 'F',
'!': 'B',
}
def cypher(message):
words = message.split(' ')
cypher_messager = []
for word in words:
cypher_word = ''
for letter in word:
cypher_word += KEYS[letter]
cypher_messager.append(cypher_word)
return ' '.join(cypher_messager)
def decipher(message):
words = message.split(' ')
decipher_message = []
for word in words:
decipher_word = ''
for letter in word:
for key,value in KEYS.iteritems():
if value == letter:
decipher_word += key
decipher_message.append(decipher_word)
return ' '.join(decipher_message)
def run():
while True:
command = str(raw_input('''--- * --- * --- * --- * --- * ---* --- * ---
Bienvenido a criptografia. ¿Qué deseas hacer?
| [c]ifrar mensaje
[d]ecifrar mensaje
[s]alir
'''))
if command == 'c':
message = str(raw_input('Escribe tu mensaje: '))
cypher_messager = cypher(message)
print(cypher_messager)
elif command == 'd':
message = str(raw_input('Escribe tu mensaje cifrado: '))
decypher_message = decipher(message)
print(decypher_message)
elif command == 's':
print('¡Adios!')
| break
else:
print('¡Comando no encontrado!')
if __name__ == '__main__':
print('M E N S A J E S C I F R A D O S')
run()
|
pauljohnleonard/pod-world | pod/gui.py | Python | gpl-2.0 | 2,156 | 0.038497 | from math import *
import gui_base
keys=gui_base.keys
display_str=None
display_font_size=14
class SimpleGui:
""" The simulation class is responsible for running the Pod World.
"""
def __init__(self,world,pods,frames_per_sec,title="BraveNewWorld",back_ground=(0,0,0),log_file=None):
#: world a World
#: agents list of agents
self.back_ground=back_ground
self.pods=pods
self.world = world
if self.world != None:
w,h=(self.world.dimensions())
else:
w,h=(1024-20 , 768-20)
|
dim_world = (w+20,h+20)
self.screen = gui_base.init_surface(dim_world,title)
if log_file != None:
self.log_file=open(log_file,"w")
self.run_name=title
| self.frames_per_sec=frames_per_sec
def check_for_quit(self):
return gui_base.check_for_quit()
def _display_mess(self,pos=(20,20),col=(255,255,0)):
gui_base.draw_string(self.screen,display_str,pos,col,display_font_size)
def clear(self):
self.screen.fill(self.back_ground)
def display(self,clear=True,fps=None):
if clear:
self.clear()
if self.world==None:
return
self.world.draw(self.screen)
for pod in self.pods:
gui_base.draw_pod(pod,self.screen)
gui_base.draw_sensors(pod,self.screen)
if display_str!=None:
self._display_mess()
gui_base.blit(self.screen)
if fps != None:
self.frames_per_sec=fps
if self.frames_per_sec > 0:
gui_base.tick(self.frames_per_sec)
def setWorld(self,world):
self.world=world
def get_pressed(self):
return gui_base.get_pressed()
def set_message(self,mess,font_size=12):
global display_str,display_font_size
display_str=mess
display_font_size=font_size
|
srluge/SickRage | tests/encoding_tests.py | Python | gpl-3.0 | 1,487 | 0.008754 | # coding=utf-8
import sys, os.path
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import locale
import unittest
import test_lib as test
import sickbeard
from sickbeard.helpers import sanitizeFileName
from sickrage.helper.encoding import ek, ss, uu
class EncodingTests(test.SiCKRAGETestCase):
def test_encoding(self):
rootDir = 'C:\\Temp\\TV'
strings = [u'Les Enfants De La T\xe9l\xe9', u'RT� One']
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
for s in strings:
show_dir = ek(os.path.join, rootDir, sa | nitizeFileName(s))
self.assertIsInstance(show_dir, unicode)
if __name__ == "__main__":
print "=================="
print "STARTING - ENCODING TESTS"
print "=================="
print "################################################################# | #####"
suite = unittest.TestLoader().loadTestsFromTestCase(EncodingTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
grodrigo/django_general | persons/migrations/0003_auto_20161108_1710.py | Python | gpl-3.0 | 550 | 0.001818 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-0 | 8 20:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('persons', '0002_person_is_staff'),
]
operations = [
migrations. | AlterField(
model_name='person',
name='staff_member',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='persons.Person'),
),
]
|
adamalton/django-href-field | hreffield/fields.py | Python | mit | 657 | 0.004566 | from django.db.models import CharField
DEFALT_PROTOCOLS = ('http', 'https', 'mailto', 'tel')
class HrefField(CharField):
def __init__(
self,
protocols=DEFALT_PROTOCOLS,
allow_paths=True,
| allow_fragments=T | rue,
allow_query_strings=True,
max_length=255,
**kwargs):
self.protocols = protocols
self.allow_paths = allow_paths
self.allow_fragments = allow_fragments
self.allow_query_strings = allow_query_strings
kwargs['max_length'] = max_length
super(HrefField, self).__init__(**kwargs)
#TODO - FUNCTIONALITY!
|
xuru/pyvisdk | pyvisdk/enums/datastore_summary_maintenance_mode_state.py | Python | mit | 272 | 0 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
DatastoreSummaryMainten | anceModeState = Enum(
'enteringMaintenance',
'inMainten | ance',
'normal',
)
|
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/template/debug.py | Python | bsd-3-clause | 3,732 | 0.004019 | from django.template import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, ori | gin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
| last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source,msg):
e = TemplateSyntaxError(msg)
e.source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'source'):
e.source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
result = node.render(context)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = node.source
raise
except Exception, e:
from sys import exc_info
wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' %
(e.__class__.__name__, force_unicode(e, errors='replace')))
wrapped.source = node.source
wrapped.exc_info = exc_info()
raise wrapped, None, wrapped.exc_info[2]
return result
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = localize(output)
output = force_unicode(output)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = self.source
raise
except UnicodeDecodeError:
return ''
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
|
zepheira/open-science | etc/sitecustomize.py | Python | apache-2.0 | 84 | 0 | import site
site | .addsitedir('/open-science/projectdir/lib/python2.5/site-packa | ges')
|
internetfett/grocerylist | backend/recipes/admin.py | Python | mit | 2,138 | 0.008419 | from django.contrib import admin
from django.db.models import Sum
from recipes.models import Category, Recipe, Ingredient, RecipeIngredient
class RecipeIngredientInline(admin.TabularInline):
model = RecipeIngredient
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ['name']
ordering = ('name',)
class RecipeAdmin(admin.ModelAdmin):
list_display = ('name','calories_per_serving')
search_fields = ['name']
inlines = [RecipeIngredientInline]
actions = ['generate_checklist']
def generate_checklist(self, request, queryset):
checklist_id = 1
from checklist.models import Chec | klist, ChecklistIngredient, Exclusion, Repeatable
ids = queryset.values_list('id', flat=True)
exclusions = Exclusion.objects.values_list('ingredient', flat=True)
recipe_ingredients = RecipeIngredient.objects.filter(recipe__in=ids) \
.values('ingredient', 'unit').annotate(amount=Sum('amount'))
for recipe_ingredient in recipe_ingredients:
if recipe_ingredient['ingredient'] not in exclusions:
| checklist_ingredient = ChecklistIngredient.objects.create(
checklist = Checklist.objects.get(id=checklist_id),
amount = recipe_ingredient['amount'],
unit = recipe_ingredient['unit'],
ingredient = Ingredient.objects.get(id=recipe_ingredient['ingredient'])
)
for repeatable in Repeatable.objects.all():
if repeatable.ingredient:
repeatable.ingredient.status = False
repeatable.ingredient.save()
if repeatable.item:
repeatable.item.status = False
repeatable.item.save()
generate_checklist.short_description = "Generate a checklist from selected recipes"
class IngredientAdmin(admin.ModelAdmin):
list_display = ('name', 'category')
search_fields = ['name']
ordering = ('name',)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Recipe, RecipeAdmin)
admin.site.register(Ingredient, IngredientAdmin)
|
Contraz/demosys-py | demosys/view/screenshot.py | Python | isc | 1,308 | 0.003058 | import os
from datetime import datetime
from PIL import Image
from demosys import context
from demosys.conf import settings
class Config:
"""Container for screenshot target"""
target = None
alignment = 1
def create(file_format='png', name=None):
"""
Create a scr | eenshot
:param file_format: formats supported by PIL (png, jpeg etc)
"""
dest = ""
if settings.SCREENSHOT_PATH:
if not os. | path.exists(settings.SCREENSHOT_PATH):
print("SCREENSHOT_PATH does not exist. creating: {}".format(settings.SCREENSHOT_PATH))
os.makedirs(settings.SCREENSHOT_PATH)
dest = settings.SCREENSHOT_PATH
else:
print("SCREENSHOT_PATH not defined in settings. Using cwd as fallback.")
if not Config.target:
Config.target = context.window().fbo
image = Image.frombytes(
"RGB",
(Config.target.viewport[2], Config.target.viewport[3]),
Config.target.read(viewport=Config.target.viewport, alignment=Config.alignment),
)
image = image.transpose(Image.FLIP_TOP_BOTTOM)
if not name:
name = "{}.{}".format(datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f"), file_format)
dest = os.path.join(dest, name)
print("Creating screenshot:", dest)
image.save(dest, format=file_format)
|
maurov/xraysloth | sloth/math/deglitch.py | Python | bsd-3-clause | 5,178 | 0.001738 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Deglitch utilities
=====================
"""
import numpy as np
import logging
_logger = logging.getLogger("sloth.math.deglitch")
def remove_spikes_medfilt1d(y_spiky, backend="silx", kernel_size=3, threshold=0.1):
"""Remove spikes in a 1D array using medfilt from silx.math
Parameters
----------
y_spiky : array
spiky data
backend : str, optional
library to use as backend
- 'silx' -> from silx.math.medianfilter import medfilt1d
- 'pymca' -> from PyMca5.PyMcaMath.PyMcaSciPy.signal import medfilt1d
- 'pandas' : TODO
kernel_size : int, optional
kernel size where to calculate median, must be odd [3]
threshold : float, optional
relative difference between filtered and spiky data [0.1]
Returns
-------
array
filtered array
"""
ynew = np.zeros_like(y_spiky)
if not (kernel_size % 2):
kernel_size += 1
_logger.warning("'kernel_size' must be odd -> adjusted to %d", kernel_size)
if backend == "silx":
return remove_spikes_silx(y_spiky, kernel_size=kernel_size, threshold=threshold)
elif backend == "pymca":
return remove_spikes_silx(y_spiky, kernel_size=kernel_size, threshold=threshold)
elif backend == "pandas":
return remove_spikes_pandas(y_spiky, window=kernel_size, threshold=threshold)
else:
_logger.warning("backend for medfilt1d not found! -> returning zeros")
return ynew
def remove_spikes_silx(y_spiky, kernel_size=3, threshold=0.1):
"""Remove spikes in a 1D array using medfilt from silx.math
Parameters
----------
y_spiky : array
spiky data
kernel_size : int, optional
kernel size where to calculate median, must be odd [3]
threshold : float, optional
difference between filtered and spiky data relative [0.1]
Returns
-------
array
filtered array
"""
ynew = np.zeros_like(y_spiky)
try:
from silx.math.medianfilter import medfilt1d
except ImportError:
_logger.warning("medfilt1d (from SILX) not found! -> returning zeros")
return ynew
y_filtered = medfilt1d(
y_spiky, kernel_size=kernel_size, conditional=True, mode="nearest", cval=0
)
diff = y_filtered - y_spiky
rel_diff = diff / y_filtered
ynew = np.where(abs(rel_diff) > threshold, y_filtered, y_spiky)
return ynew
def remove_spikes_pymca(y_spiky, kernel_size=9, threshold=0.66):
"""Remove spikes in a 1D array using medfilt from PyMca5.PyMcaMath.PyMcaScipy.signal
Parameters
----------
y_spiky : array
spiky data
kernel_size : int, optional
kernel size where to calculate median, should be odd [9]
threshold : float, optional
difference between filtered and spiky data in sigma units [0.66]
Returns
-------
array
filtered array
"""
ynew = np.zeros_like(y_spiky)
try:
from PyMca5.PyMcaMath.PyMcaSciPy.signal import medfilt1d
except ImportError:
_logger.warning("medfilt1d (from PyMca5) not found! -> returning zeros")
return ynew
y_filtered = medfilt1d(y_spiky, kernel_size)
diff = y_filtered - y_spiky
mean = diff.mean()
sigma = (y_spiky - mean) ** 2
sigma = np.sqrt(sigma.sum() / float(len(sigma)))
ynew = np.where(abs(diff) > threshold * sigma, y_filtered, y_spiky)
return ynew
def remove_spikes_pandas(y, window=3, threshold=3):
"""remove spikes using pandas
Taken from `https://ocefpaf.github.io/python4oceanographers/blog/2015/03/16/outlier_detection/`_
.. note:: this will not work in pandas > 0.17 one could simply do
`df.rolling(3, center=True).median()`; also
df.as_matrix() is deprecated, use df.values instead
Parameters
----------
y : array 1D
window : int (optional)
window in rolling median [3]
threshold : int (optional)
number of sigma difference with original data
Return
------
ynew : array like x/y
"""
ynew = np.zeros_like(y)
try:
import pandas as pd
except ImportError:
_logger.error("pandas not found! -> returning zeros")
return ynew
df = pd.DataFrame(y)
try:
yf = (
pd.rolling_median(df, window=window, center=True)
.fillna(method="bfill")
.fillna(method="ffill")
)
diff = yf.as_matrix() - y
mean = diff.mean()
sigma = (y - mean) ** 2
sigma = np.sqrt(sigma.sum() / float(len(sigma)))
ynew = np.where(abs(diff) > threshold * sigma, yf.as_matrix(), y)
except Exception:
yf = (
df.rolling(window, center=True)
.median()
.fillna(method="bfill")
.fillna( | method="ffill")
)
diff = yf.values - y
mean = diff.mean()
sigma = (y - mean) ** 2
| sigma = np.sqrt(sigma.sum() / float(len(sigma)))
ynew = np.where(abs(diff) > threshold * sigma, yf.values, y)
# ynew = np.array(yf.values).reshape(len(x))
return ynew
|
yshlin/tildeslash | tildeslash/blog/utils.py | Python | bsd-3-clause | 787 | 0.003812 | # -*- coding: utf-8 -*-
import re
import unicodedata
# FIXME: these patterns works for English/Chinese mixed content, but not tested among other languages
WORD_COUNT_SPLIT_PATTERN = re.compile(u'[\s\u4e00-\u9fff]')
ASIAN_CHAR_PATTERN = re.compile(u'[\u | 4e00-\u9fff]')
ENDING_CHAR_CATEGORIES = ('Po', 'Cc', 'Zs')
def word_count(content):
return len(re.split(WORD_COUNT_SPLIT_PATTERN, content))
def strip_content(content, length=100):
inde | x = 0
count = 0
for c in content:
if count >= length:
if unicodedata.category(c) in ENDING_CHAR_CATEGORIES:
break
else:
if re.match(ASIAN_CHAR_PATTERN, c):
count += 2
else:
count += 1
index += 1
return content[:index] |
wojab/python_training | check_db_connection.py | Python | apache-2.0 | 276 | 0.01087 | from fixture.orm impo | rt ORMFixture
from model.group import Group
db = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
try:
l = db. | get_contacts_in_group(Group(id="69"))
for item in l:
print(item)
print(len(l))
finally:
pass |
onponomarev/ganeti | lib/query.py | Python | bsd-2-clause | 92,353 | 0.00602 | #
#
# Copyright (C) 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for query operations
How it works:
- Add field definitions
- See how L{NODE_FIELDS} is built
- Each field gets:
- Query field definition (L{objects.QueryFieldDefinition}, use
L{_MakeField} for creating), containing:
- Name, must be lowercase and match L{FIELD_NAME_RE}
- Title for tables, must not contain whitespace and match
L{TITLE_RE}
- Value data type, e.g. L{constants.QFT_NUMBER}
- Human-readable description, must not end with punctuation or
contain newlines
- Data request type, see e.g. C{NQ_*}
- OR-ed flags, see C{QFF_*}
- A retrieval function, see L{Query.__init__} for description
- Pass list of fields through L{_PrepareFieldList} for preparation and
checks
- Instantiate L{Query} with prepared field list definition and selected fields
- Call L{Query.RequestedData} to determine what data to collect/compute
- Call L{Query.Query} or L{Query.OldStyleQuery} with collected data and use
result
- Data container must support iteration using C{__iter__}
- Items are passed to retrieval functions and can have any format
- Call L{Query.GetFields} to get list of definitions for selected fields
@attention: Retrieval functions must be idempotent. They can be called multiple
times, in any order and any number of times.
"""
import logging
import operator
import re
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import compat
from ganeti import objects
from ganeti import ht
from ganeti import runtime
from ganeti import qlang
from ganeti import jstore
from ganeti.hypervisor import hv_base
from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER,
QFT_NUMBER_FLOAT, QFT_UNIT, QFT_TIMESTAMP,
QFT_OTHER, RS_NORMAL, RS_UNKNOWN, RS_NODATA,
RS_UNAVAIL, RS_OFFLINE)
(NETQ_CONFIG,
NETQ_GROUP,
NETQ_STATS,
NETQ_INST) = range(300, 304)
# Constants for requesting data from the caller/data provider. Each property
# collected/computed separately by the data provider should have its own to
# only collect the requested data and not more.
(NQ_CONFIG,
NQ_INST,
NQ_LIVE,
NQ_GROUP,
NQ_OOB) = range(1, 6)
(IQ_CONFIG,
IQ_LIVE,
IQ_DISKUSAGE,
IQ_CONSOLE,
IQ_NODES,
IQ_NETWORKS) = range(100, 106)
(LQ_MODE,
LQ_OWNER,
LQ_PENDING) = range(10, 13)
(GQ_CONFIG,
GQ_NODE,
GQ_INST,
GQ_DISKPARAMS) = range(200, 204)
(CQ_CONFIG,
CQ_QUEUE_DRAINED,
CQ_WATCHER_PAUSE) = range(300, 303)
(JQ_ARCHIVED, ) = range(400, 401)
# Query field flags
QFF_HOSTNAME = 0x01
QFF_IP_ADDRESS = 0x02
QFF_JOB_ID = 0x04
QFF_SPLIT_TIMESTAMP = 0x08
# Next values: 0x10, 0x20, 0x40, 0x80, 0x100, 0x200
QFF_ALL = (QFF_HOSTNAME | QFF_IP_ADDRESS | QFF_JOB_ID | QFF_SPLIT_TIMESTAMP)
FIELD_NAME_RE = re.compile(r"^[a-z0-9/._]+$")
TITLE_RE = re.compile(r"^[^\s]+$")
DOC_RE = re.compile(r"^[A-Z].*[^.,?!]$")
#: Verification function for each field type
_VERIFY_FN = {
QFT_UNKNOWN: ht.TNone,
QFT_TEXT: ht.TString,
QFT_BOOL: ht.TBool,
QFT_NUMBER: ht.TInt,
QFT_NUMBER_FLOAT: ht.TFloat,
QFT_UNIT: ht.TInt,
QFT_TIMESTAMP: ht.TNumber,
QFT_OTHER: lambda _: True,
}
# Unique objects for special field statuses
_FS_UNKNOWN = object()
_FS_NODATA = object()
_FS_UNAVAIL = object()
_FS_OFFLINE = object()
#: List of all special status
_FS_ALL = compat.UniqueFrozenset([
_FS_UNKNOWN,
_FS_NODATA,
_FS_UNAVAIL,
_FS_OFFLINE,
])
#: VType to QFT mapping
_VTToQFT = {
# TODO: fix validation of empty strings
constants.VTYPE_STRING: QFT_OTHER, # since VTYPE_STRINGs can be empty
constants.VTYPE_MAYBE_STRING: QFT_OTHER,
constants.VTYPE_BOOL: QFT_BOOL,
constants.VTYPE_SIZE: QFT_UNIT,
constants.VTYPE_INT: QFT_NUMBER,
constants.VTYPE_FLOAT: QFT_NUMBER_FLOAT,
}
_SERIAL_NO_DOC = "%s object serial number, incremented on each modification"
def _GetUnknownField(ctx, item): # pylint: disable=W0613
"""Gets the contents of an unknown field.
"""
return _FS_UNKNOWN
def _GetQueryFields(fielddefs, selected):
"""Calculates the internal list of selected fields.
Unknown fields are returned as L{constants.QFT_UNKNOWN}.
@type fielddefs: dict
@p | aram fielddefs: Field definitions
@type selected: list of strings
@param selected: List of selected fields
"""
result = []
for name in selected:
try:
fdef = fielddefs[name]
except KeyError:
fdef = (_MakeField(name, name, QFT_UNKNOWN, "Unknown field '%s'" % name),
None, 0, _GetUnknownField)
assert len(fdef) == 4
result.append(fdef)
return result
def GetAllFields(fielddefs):
"""Extract L{objects.QueryFieldDefinition} from field d | efinitions.
@rtype: list of L{objects.QueryFieldDefinition}
"""
return [fdef for (fdef, _, _, _) in fielddefs]
class _FilterHints(object):
"""Class for filter analytics.
When filters are used, the user of the L{Query} class usually doesn't know
exactly which items will be necessary for building the result. It therefore
has to prepare and compute the input data for potentially returning
everything.
There are two ways to optimize this. The first, and simpler, is to assign
each field a group of data, so that the caller can determine which
computations are necessary depending on the data groups requested. The list
of referenced groups must also be computed for fields referenced in the
filter.
The second is restricting the items based on a primary key. The primary key
is usually a unique name (e.g. a node name). This class extracts all
referenced names from a filter. If it encounters any filter condition which
disallows such a list to be determined (e.g. a non-equality filter), all
names will be requested.
The end-effect is that any operation other than L{qlang.OP_OR} and
L{qlang.OP_EQUAL} will make the query more expensive.
"""
def __init__(self, namefield):
"""Initializes this class.
@type namefield: string
@param namefield: Field caller is interested in
"""
self._namefield = namefield
#: Whether all names need to be requested (e.g. if a non-equality operator
#: has been used)
self._allnames = False
#: Which names to request
self._names = None
#: Data kinds referenced by the filter (used by L{Query.RequestedData})
self._datakinds = set()
def RequestedNames(self):
"""Returns all requested values.
Returns C{None} if list of values can't be determined (e.g. encountered
non-equality operators).
@rtype: list
"""
if self._allnames or self._names is None:
return None
return utils.UniqueSequence(self._names)
def ReferencedData(self):
"""Returns all kinds of data referenced by t |
fdibaldassarre/photo-mosaic | lib/CollageImage.py | Python | gpl-3.0 | 9,160 | 0.016594 | #!/usr/bin/env python3
import os
import sys
from json import JSONEncoder
from json import JSONDecoder
from PIL import Image
from lib import ImageManager
JSON_encoder = JSONEncoder()
JSON_decoder = JSONDecoder()
MAX_IMAGES_MEM = 1 # DO NOT SET LOWER THAN 1
MAX_COLOURS_MEM = 200
DATA_IMAGE_SIZE = 'ImageSize'
DATA_COLOURS = 'Colours'
class CollageColour():
def __init__(self, base_img, sector_size):
self.base_img = base_img
self.sector_size = sector_size
self.images = {}
def getBaseImage(self):
return self.base_img
def reload(self):
self.images = {}
imgpath = self.base_img.getFilepath()
self.base_img = ImageManager.newFromPath(imgpath)
def getResizedImage(self, size, antialias=False, partition=True, memorize=True):
width, height = size
if size in self.images:
img = self.images[width, height]
img.setSectorSize(self.sector_size)
if partition and not img.isPartitioned():
img.partition()
else:
img_raw = self.base_img.getResizedCopy(size, antialias)
img = ImageManager.newFromData(img_raw)
img.setSectorSize(self.sector_size)
if partition:
img.partition()
if memorize and MAX_IMAGES_MEM > 0:
if len(self.images) > MAX_IMAGES_MEM:
# delete one element
del_key = list(self.images.keys())[0]
del self.images[del_key]
# Add this image
self.images[width, height] = img
return img
class CollageImage():
def __init__(self, size):
w | idth, height = | size
self.width = width
self.height = height
self.size = size
self.img = Image.new("RGBA", self.size, color=(255, 255, 255, 0))
self.pixelManager = self.img.load()
self.colours = {}
self.colours_size = {}
self.colours_usage = {}
def isPositionEmpty(self, position):
x, y = position
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return False
else:
return self.pixelManager[position][3] == 0
def addColourAtPosition(self, colour, position, size):
self.colours[position] = colour
self.colours_size[position] = size
if colour in self.colours_usage:
self.colours_usage[colour] += 1
else:
self.colours_usage[colour] = 1
image = colour.getResizedImage(size, partition=False)
self.img.paste(image.getImage(), position)
def getColourIndexAtPosition(self, position):
x, y = position
pos = None
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return pos
for position in self.colours:
i, j = position
w, h = self.colours_size[position]
if i <= x and x < i + w and j <= y and y < j + h:
pos = (i,j)
break
return pos
def getImageSizeAt(self, position):
return self.colours_size[position]
def removeImageAtPosition(self, position):
img_size = self.colours_size[position]
blank_img = Image.new("RGBA", img_size, color=(255, 255, 255, 0))
self.img.paste(blank_img, position)
colour = self.colours[position]
self.colours_usage[colour] -= 1
del self.colours[position]
del self.colours_size[position]
def getMaxSpaceAt(self, position, max_width, max_height):
return self.getMaxSpaceRight(position, max_width), self.getMaxSpaceBottom(position, max_height)
def getMaxSpaceRight(self, position, max_width):
x, y = position
width = 0
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return width
for i in range(max_width):
if not self.isPositionEmpty((x+i, y)):
break
else:
width += 1
return width
def getMaxSpaceBottom(self, position, max_height):
x, y = position
height = 0
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return height
for j in range(max_height):
if not self.isPositionEmpty((x, y+j)):
break
else:
height += 1
return height
def isSectionFree(self, x, y, width, height):
for i in range(width):
if self.pixelManager[x+i, y][3] != 0:
return False
for j in range(height):
if self.pixelManager[x, y+j][3] != 0:
return False
return True
def getColourUsage(self, colour):
if colour in self.colours_usage:
return self.colours_usage[colour]
else:
return 0
def save(self, savepath):
self.img.save(savepath)
def debugFillEmptySpace(self, position, colour):
width, height = self.getMaxSpaceAt(position, self.width, self.height)
self.debugFillSpace(position, (width, height), colour)
def debugFillImageAt(self, position, colour):
width, height = self.colours_size[position]
self.debugFillSpace(position, (width, height), colour)
def debugFillSpace(self, position, size, colour):
img = Image.new("RGBA", size, color=colour)
self.img.paste(img, position)
def saveResized(self, savepath, scale=1):
# Create new image with rescaled size
width, height = self.size
new_width, new_height = width*scale, height*scale
img = Image.new("RGBA", (new_width, new_height), color=(255, 255, 255, 0))
# Paint new image
reloaded_colours = []
tot_colours = len(self.colours)
actual_pos = 0
for pos in self.colours:
# Show progression
perc = round(100.0 * actual_pos / tot_colours)
sys.stdout.write('\r Progression: ' + str(perc) + ' % ')
sys.stdout.flush()
actual_pos += 1
# Resize and paste the image
x, y = pos
new_x, new_y = x*scale, y*scale
width, height = self.colours_size[x,y]
new_width, new_height = width*scale, height*scale
colour = self.colours[x,y]
if not colour in reloaded_colours:
colour.reload()
reloaded_colours.append(colour)
image = colour.getResizedImage((new_width, new_height), antialias=True, partition=False, memorize=False)
img.paste(image.getImage(), (new_x, new_y))
if len(self.colours) > MAX_COLOURS_MEM :
colour.reload() # Free the memory of the base image (otherwise I use too much RAM)
sys.stdout.write('\r Progression: 100 % \n')
sys.stdout.flush()
print("Saving")
# save
img.save(savepath)
def saveSchema(self, savepath):
data = {}
data[DATA_IMAGE_SIZE] = self.size
colours = []
for pos in self.colours:
colour = self.colours[pos]
imgname = os.path.basename(colour.getBaseImage().getFilepath())
size = self.colours_size[pos]
colour_data = (imgname, pos, size)
colours.append(colour_data)
data[DATA_COLOURS] = colours
# encode to json and save
hand = open(savepath, 'w')
# Size
size_string = JSON_encoder.encode(data[DATA_IMAGE_SIZE])
hand.write(size_string + '\n')
for colour in data[DATA_COLOURS]:
colour_string = JSON_encoder.encode(colour)
hand.write(colour_string + '\n')
hand.close()
def applyMask(self, maskpath):
mask = Image.open(maskpath).convert('RGBA')
mask = mask.resize((self.img.size), Image.ANTIALIAS)
mask_width, mask_height = mask.size
maskPixelManager = mask.load()
delete_list = []
for pos in self.colours:
x, y = pos
colour = self.colours[x,y]
colour_width, colour_height = self.colours_size[x,y]
# check if delete
colour_delete = True
for i in range(x, x+colour_width):
if i < mask_width:
for j in range(y, y+colour_width):
if j < mask_height:
if maskPixelManager[i, j][3] != 0:
colour_delete = False
break
if colour_delete:
delete_list.append(pos)
for pos in delete_list:
# NOTE/WARNING: this function makes colours_usage useless (which at this point should be useless)
colour = self.colours[pos]
self.colours_usage[colour] = 2
self.removeImageAtPosition(pos)
def new(*args, **kwargs):
coll = CollageImage(*args, **kwargs)
return coll
def newColour(*args, **kwargs):
colour = CollageColour(*args, **kwargs)
return colour
def newColourFromPath(imgpath, sector_size=None):
img = ImageManager.newFromPath(imgpath)
colour = CollageColour(img, sec |
bplower/cssef | WebInterface/WebInterface/modules/competition/views/blueteam.py | Python | gpl-3.0 | 17,106 | 0.032562 | from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.http import HttpResponseBadRequest
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.contrib import auth
from django.core.context_processors import csrf
#from django.core.files.uploadedfile import UploadedFile
from django.utils import timezone
# from models import Competition
# from models import InjectResponse
# from models import IncidentResponse
# from models import Document
# from models import Service
# from models import Inject
# from models import Score
# from models import Team
# from forms import TeamLoginForm
# from forms import InjectResponseForm
# from forms import IncidentResponseForm
# from forms import IncidentResponseReplyForm
# from forms import ServiceSelectionForm
# from utils import get_inject_display_state
# from utils import UserMessages
# from utils import getAuthValues
# from utils import save_document
#from hashlib import md5
#from cssef import LoadServs
#import settings
templatePathPrefix = "organization/templates/"
def login(request):
"""
Page for teams to login to for a competition
"""
c = {}
c["messages"] = UserMessages()
c = getAuthValues(request, c)
c.update(csrf(request))
# Checks if the user is submitting the form, or requesting the form
if request.method != "POST":
c["form"] = {'login': TeamLoginForm()}
return render_to_response('Comp/login.html', c)
username = request.POST.get('username')
password = request.POST.get('password')
compid = request.POST.get('compid')
team = auth.authenticate(username = username, password = password, compid = compid)
if team == None:
c["messages"].new_info("Incorrect team credentials.", 4321)
c["form"] = {'login': TeamLoginForm()}
return HttpResponseBadRequest(render_to_string(templatePathPrefix + 'login.html', c))
auth.login(request, team)
competition = Competition.objects.get(compid = compid)
return HttpResponseRedirect("/competitions/%s/summary/" % competition.compurl)
def logout(request, competition = None):
"""
Page for teams to logout of a competition
"""
c = getAuthValues(request, {})
if c["auth_name"] == "auth_team_blue":
auth.logout(request)
return HttpResponseRedirect("/")
def summary(request, competition = None):
"""
Display summary information for selected competition
"""
current_url = request.build_absolute_uri()
if request.build_absolute_uri()[-8:] != "summary/":
return HttpResponseRedirect(current_url + "summary/")
c = getAuthValues(request, {})
c["comp_obj"] = Competition.objects.get(compurl = competition)
c["services"] = Service.objects.filter(compid = c["comp_obj"].compid)
c["teams"] = Team.objects.filter(compid = c["comp_obj"].compid)
return render_to_response(templatePathPrefix + 'summary.html', c)
def details(request, competition = None):
"""
Display details about the selected competition
"""
c = getAuthValues(request, {})
c["comp_obj"] = Competition.objects.get(compurl = competition)
c["services"] = Service.objects.filter(compid = c["comp_obj"].compid)
c["teams"] = Team.objects.filter(compi | d = c["comp_obj"].compid)
return render_to_response(templatePathPrefix + 'details.html', c)
def ranking(request, competition = None):
"""
Display team rankings for selected competition
"""
c = getAuthValues(request, {})
c["comp_obj"] = Competition.objects.get(compurl = competition)
# If the view is disabled
if not c["comp_obj"].teams_view_ranking_enabled:
c["message"] = "This feature is disabled for this competition."
return HttpResponseForbidden(render_to_string('status | _400.html', c))
c["ranks"] = []
team_objs = Team.objects.filter(compid = c["comp_obj"].compid)
for i in team_objs:
scores_objs = Score.objects.filter(compid = c["comp_obj"].compid, teamid = i.teamid)
total = 0
for k in scores_objs:
total += k.value
c["ranks"].append({"team": i.teamname, "score": total, "place":0})
return render_to_response(templatePathPrefix + 'rankings.html', c)
def injects(request, competition = None):
"""
Display inject list for selected competition
"""
c = getAuthValues(request, {})
# If the user isn't authed as a Blue Team
if c["auth_name"] != "auth_team_blue":
c["message"] = "You must log in as a Blue Team to view this page."
return HttpResponseForbidden(render_to_string('status_400.html', c))
c["comp_obj"] = Competition.objects.get(compurl = competition)
# If the view is disabled
if not c["comp_obj"].teams_view_injects_enabled:
c["message"] = "This feature is disabled for this competition."
return HttpResponseForbidden(render_to_string('status_400.html', c))
c["inject_list"] = []
for i in Inject.objects.filter(compid = request.user.compid, dt_delivery__lte = timezone.now()):
c["inject_list"].append({
"inject": i,
"files": Document.objects.filter(inject = i),
"display_state": get_inject_display_state(request.user, i)
})
return render_to_response(templatePathPrefix + 'injects.html', c)
def injects_respond(request, competition = None, ijctid = None):
"""
Displays a specific inject and provides either upload or text entry for inject response
"""
c = getAuthValues(request, {})
# If the user isn't authed as a Blue Team
if c["auth_name"] != "auth_team_blue":
c["message"] = "You must log in as a Blue Team to view this page."
return HttpResponseForbidden(render_to_string('status_400.html', c))
c["comp_obj"] = Competition.objects.get(compurl = competition)
# If the view is disabled
if not c["comp_obj"].teams_view_injects_enabled:
c["message"] = "This feature is disabled for this competition."
return HttpResponseForbidden(render_to_string('status_400.html', c))
c.update(csrf(request))
# If we're not getting POST data, serve the page normally
if request.method != "POST":
ijct_obj = Inject.objects.get(compid = c["comp_obj"].compid, ijctid = ijctid)
if not ijct_obj.require_response:
return HttpResponseRedirect('/competitions/%s/injects/' % (competition))
c["inject"] = {
"ijct_obj": ijct_obj,
"files": Document.objects.filter(inject = ijctid),
"display_state": get_inject_display_state(request.user, ijct_obj)
}
c["response_list"] = []
for i in InjectResponse.objects.filter(compid = c["comp_obj"].compid, teamid = request.user.teamid, ijctid = ijctid):
c["response_list"].append({
"response": i,
"files": Document.objects.filter(injectresponse = i)
})
if c["inject"]["ijct_obj"].dt_response_close <= timezone.now():
c["response_locked"] = True
else:
c["response_locked"] = False
c["responseform"] = InjectResponseForm()
return render_to_response(templatePathPrefix + 'injects_view_respond.html', c)
# Check if we're allowed to take the submission (time restrictions)
ijct_obj = Inject.objects.get(compid = c["comp_obj"].compid, ijctid = ijctid)
if not ijct_obj.require_response:
return HttpResponseRedirect('/competitions/%s/injects/' % (competition))
if ijct_obj.dt_response_close <= timezone.now():
# Very clever person - submission form was closed, but they're attempting to POST anyway
return HttpResponseRedirect('/competitions/%s/injects/%s/' % (competition, ijctid))
# Determine if we're handling text entry or file upload
tmp_dict = request.POST.copy().dict()
tmp_dict.pop('csrfmiddlewaretoken', None)
tmp_dict.pop('docfile', None)
tmp_dict['compid'] = request.user.compid
tmp_dict['teamid'] = request.user.teamid
tmp_dict['ijctid'] = int(ijctid)
ijct_resp_obj = InjectResponse(**tmp_dict)
ijct_resp_obj.save()
# Checks if we were given a file
if 'docfile' in request.FILES:
save_document(request.FILES['docfile'], settings.CONTENT_INJECT_REPONSE_PATH, ijct_resp_obj)
return HttpResponseRedirect('/competitions/%s/injects/%s/' % (competition, ijctid))
def servicestatus(request, competition = None):
"""
Display current service status for selected team in selected competition
"""
c = getAuthValues(request, {})
# If the user isn't authed as a Blue Team
if c["auth_name"] != "auth_team_blue":
c["message"] = "You must log in as a Blue Team to view this page."
return HttpResponseForbidden(render_to_string('status_400.html |
pcmoritz/ray-1 | python/ray/client_builder.py | Python | apache-2.0 | 6,749 | 0 | import os
import importlib
import logging
from dataclasses import dataclass
import sys
from typing import Any, Dict, Optional, Tuple
from ray.ray_constants import RAY_ADDRESS_ENVIRONMENT_VARIABLE
from ray.job_config import JobConfig
import ray.util.client_connect
logger = logging.getLogger(__name__)
@dataclass
class ClientContext:
"""
Basic context manager for a ClientBuilder connection.
"""
dashboard_url: Optional[str]
python_version: str
ray_version: str
ray_commit: str
protocol_version: Optional[str]
_num_clients: int
def __enter__(self) -> "ClientContext":
return self
def __exit__(self, *exc) -> None:
self.disconnect()
def disconnect(self) -> None:
"""
Disconnect Ray. This either disconnects from the remote Client Server
or shuts the current driver down.
"""
if ray.util.client.ray.is_connected():
# This is only a client connected to a server.
ray.util.client_connect.disconnect()
ray._private.client_mode_hook._explicitly_disable_client_mode()
elif ray.worker.global_worker.node is None:
# Already disconnected.
return
elif ray.worker.global_worker.node.is_head():
logger.debug(
"The current Ray Cluster is scoped to this process. "
"Disconnecting is not po | ssible as it will shutdown the "
"cluster.")
else:
# This is only a driver connected to an existing cluster.
ray.shutdown()
class ClientBuilder:
"""
Builder for a Ray Client connection. This class can be subclassed by
custom builder classes to modify connection behavior to include additional
features or altered semantics. One example is the ``_LocalClientBuilder``.
"""
|
def __init__(self, address: Optional[str]) -> None:
self.address = address
self._job_config = JobConfig()
def env(self, env: Dict[str, Any]) -> "ClientBuilder":
"""
Set an environment for the session.
Args:
env (Dict[st, Any]): A runtime environment to use for this
connection. See :ref:`runtime-environments` for what values are
accepted in this dict.
"""
self._job_config.set_runtime_env(env)
return self
def namespace(self, namespace: str) -> "ClientBuilder":
"""
Sets the namespace for the session.
Args:
namespace (str): Namespace to use.
"""
self._job_config.set_ray_namespace(namespace)
return self
def connect(self) -> ClientContext:
"""
Begin a connection to the address passed in via ray.client(...).
Returns:
ClientInfo: Dataclass with information about the setting. This
includes the server's version of Python & Ray as well as the
dashboard_url.
"""
client_info_dict = ray.util.client_connect.connect(
self.address, job_config=self._job_config)
dashboard_url = ray.get(
ray.remote(ray.worker.get_dashboard_url).remote())
return ClientContext(
dashboard_url=dashboard_url,
python_version=client_info_dict["python_version"],
ray_version=client_info_dict["ray_version"],
ray_commit=client_info_dict["ray_commit"],
protocol_version=client_info_dict["protocol_version"],
_num_clients=client_info_dict["num_clients"])
class _LocalClientBuilder(ClientBuilder):
def connect(self) -> ClientContext:
"""
Begin a connection to the address passed in via ray.client(...).
"""
connection_dict = ray.init(
address=self.address, job_config=self._job_config)
return ClientContext(
dashboard_url=connection_dict["webui_url"],
python_version="{}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]),
ray_version=ray.__version__,
ray_commit=ray.__commit__,
protocol_version=None,
_num_clients=1)
def _split_address(address: str) -> Tuple[str, str]:
"""
Splits address into a module string (scheme) and an inner_address.
"""
if "://" not in address:
address = "ray://" + address
# NOTE: We use a custom splitting function instead of urllib because
# PEP allows "underscores" in a module names, while URL schemes do not
# allow them.
module_string, inner_address = address.split("://", maxsplit=1)
return (module_string, inner_address)
def _get_builder_from_address(address: Optional[str]) -> ClientBuilder:
if address == "local":
return _LocalClientBuilder(None)
if address is None:
try:
# NOTE: This is not placed in `Node::get_temp_dir_path`, because
# this file is accessed before the `Node` object is created.
cluster_file = os.path.join(ray._private.utils.get_user_temp_dir(),
"ray_current_cluster")
with open(cluster_file, "r") as f:
address = f.read().strip()
except FileNotFoundError:
# `address` won't be set and we'll create a new cluster.
pass
return _LocalClientBuilder(address)
module_string, inner_address = _split_address(address)
try:
module = importlib.import_module(module_string)
except Exception:
raise RuntimeError(
f"Module: {module_string} does not exist.\n"
f"This module was parsed from Address: {address}") from None
assert "ClientBuilder" in dir(module), (f"Module: {module_string} does "
"not have ClientBuilder.")
return module.ClientBuilder(inner_address)
def client(address: Optional[str] = None) -> ClientBuilder:
"""
Creates a ClientBuilder based on the provided address. The address can be
of the following forms:
* None: Connects to or creates a local cluster and connects to it.
* ``"local"``: Creates a new cluster locally and connects to it.
* ``"IP:Port"``: Connects to a Ray Client Server at the given address.
* ``"module://inner_address"``: load module.ClientBuilder & pass
inner_address
"""
env_address = os.environ.get(RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if env_address and address is None:
logger.debug(
f"Using address ({env_address}) instead of auto-detection "
f"because {RAY_ADDRESS_ENVIRONMENT_VARIABLE} is set.")
address = env_address
return _get_builder_from_address(address)
|
Parsl/parsl | parsl/tests/test_python_apps/test_garbage_collect.py | Python | apache-2.0 | 1,044 | 0.002874 | import parsl
import time
from parsl.app.app import python_app
@python_app
def slow_double(x):
import time
time.sleep(0.1)
return x * 2
def test_garbage_coll | ect():
""" Launches an app with a dependency and waits till it's done and asserts that
the internal refs were wiped
"""
x = slow_double(slow_double(10))
if x.done() is False:
assert parsl.dfk().tasks[x.tid]['app_fu'] == x, "Tasks table should have app_fu ref before done"
x.result()
if parsl.dfk().checkpoint_mode is | not None:
# We explicit call checkpoint if checkpoint_mode is enabled covering
# cases like manual/periodic where checkpointing may be deferred.
parsl.dfk().checkpoint()
time.sleep(0.2) # Give enough time for task wipes to work
assert x.tid not in parsl.dfk().tasks, "Task record should be wiped after task completion"
if __name__ == '__main__':
from parsl.tests.configs.htex_local_alternate import config
parsl.load(config)
# parsl.load()
test_garbage_collect()
|
rakanalh/django-pushy | pushy/tasks.py | Python | mit | 4,276 | 0 | import datetime
import logging
import celery
from django.conf import settings
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils import timezone
from .models import (
PushNotification,
Device,
get_filtered_devices_queryset
)
from .exceptions import (
PushInvalidTokenException,
PushException
)
from .dispatchers import get_dispatcher
logger = logging.getLogger(__name__)
@celery.shared_task(
queue=getattr(settings, 'PUSHY_QUEUE_DEFAULT_NAME', None)
)
def check_pending_push_notifications():
pending_notifications = PushNotification.objects.filter(
sent=PushNotification.PUSH_NOT_SENT
)
for pending_notification in pending_notifications:
create_push_notification_groups.apply_async(kwargs={
'notification': pending_notification.to_dict()
})
@celery.shared_task(
queue=getattr(settings, 'PUSHY_QUEUE_DEFAULT_NAME', None)
)
def create_push_notification_groups(notification):
devices = get_filtered_devices_queryset(notification)
date_started = timezone.now()
if devices.count() > 0:
count = devices.count()
limit = getattr(settings, 'PUSHY_DEVICE_KEY_LIMIT', 1000)
celery.chord(
send_push_notification_group.s(notification, offset, limit)
for offset in range(0, count, limit)
)(notify_push_notification_sent.si(notification))
if not notification['id']:
return
try:
notification = PushNotification.objects.get(pk=notification['id'])
notification.sent = PushNotification.PUSH_IN_PROGRESS
notification.date_started = date_started
notification.save()
except PushNotification.DoesNotExist:
return
@celery.shared_task(
queue=getattr(settings, 'PUSHY_QUEUE_DEFAULT_NAME', None)
)
def send_push_notification_group(notification, offset=0, limit=1000):
devices = get_filtered_devices_queryset(notification)
devices = devices[offset:offset + limit]
for device in devices:
send_single_push_notification(device, notification['payload'])
return True
@celery.shared_task(
queue=get | attr(settings, 'PUSHY_QUEUE_DEFAULT_NAME', None)
)
def send_single_push_notification(dev | ice, payload):
# The task can be called in two ways:
# 1) from send_push_notification_group directly with a device instance
# 2) As a task using .delay or apply_async with a device id
if isinstance(device, int):
try:
device = Device.objects.get(pk=device)
except Device.DoesNotExist:
return False
dispatcher = get_dispatcher(device.type)
try:
canonical_id = dispatcher.send(device.key, payload)
if not canonical_id:
return
with transaction.atomic():
device.key = canonical_id
device.save()
except IntegrityError:
device.delete()
except PushInvalidTokenException:
logger.debug('Token for device {} does not exist, skipping'.format(
device.id
))
device.delete()
except PushException:
logger.exception("An error occured while sending push notification")
return
@celery.shared_task(
queue=getattr(settings, 'PUSH_QUEUE_DEFAULT_NAME', None),
)
def notify_push_notification_sent(notification):
if not notification['id']:
return False
try:
notification = PushNotification.objects.get(pk=notification['id'])
notification.date_finished = timezone.now()
notification.sent = PushNotification.PUSH_SENT
notification.save()
except PushNotification.DoesNotExist:
logger.exception("Notification {} does not exist".format(notification))
return False
@celery.shared_task(
queue=getattr(settings, 'PUSH_QUEUE_DEFAULT_NAME', None)
)
def clean_sent_notifications():
max_age = getattr(settings, 'PUSHY_NOTIFICATION_MAX_AGE', None)
if not max_age or not isinstance(max_age, datetime.timedelta):
raise ValueError('Notification max age value is not defined.')
delete_before_date = timezone.now() - max_age
PushNotification.objects.filter(
sent=PushNotification.PUSH_SENT,
date_finished__lt=delete_before_date
).delete()
|
ionelmc/python-darkslide | docs/conf.py | Python | apache-2.0 | 1,275 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sphinx_py3doc_enhanced_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'Darkslide'
year = '2015-2021'
author = 'Ionel Cristian Mărieș'
copyright = '{0}, {1}'.format(year, author)
version = release = '6.0.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/ionelmc/python-darkslide/issues/%s', '#'),
'pr': ('https://github.com/ionelmc/python-darkslide/pull/%s', 'PR #'),
}
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': ' | https://github.com/ionelmc/ | python-darkslide/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
ciudadanointeligente/deldichoalhecho-uycheck | promises/migrations/0001_initial.py | Python | gpl-3.0 | 9,083 | 0.007266 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'promises_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=512)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from='name')),
))
db.send_create_signal(u'promises', ['Category'])
# Adding model 'Promise'
db.create_table(u'promises_promise', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=512)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['popolo.Person'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(related_name='promises', null=True, to=orm['promises.Category'])),
))
db.send_create_signal(u'promises', ['Promise'])
# Adding model 'InformationSource'
db.create_table(u'promises_informationsource', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=512)),
('date', self.gf('django.db.models.fields.DateField')()),
('promise', self.gf('django.db.models.fields.related.ForeignKey')(related_name='information_sources', to=orm['promises.Promise'])),
))
db.send_create_signal(u'promises', ['InformationSource'])
# Adding model 'VerificationDocument'
db.create_table(u'promises_verificationdocument', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=512)),
('date', self.gf('django.db.models.fields.DateField')()),
('promise', self.gf('django.db.models.fields.related.ForeignKey')(related_name='verification_documents', null=True, to=orm['promises.Promise'])),
))
db.send_create_signal(u'promises', ['VerificationDocument'])
# Adding model 'Fulfillment'
db.create_table(u'promises_fulfillment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('promise', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['promises.Promise'], unique=True)),
('percentage', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('notes', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
))
db.send_create_signal(u'promises', ['Fulfillment'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table(u'promises_category')
# Deleting model 'Promise'
db.delete_table(u'promises_promise')
# Deleting model 'InformationSource'
db.delete_table(u'promises_informationsource')
# Deleting model 'VerificationDocument'
db.delete_table(u'promises_verificationdocument')
# Deleting model 'Fulfillment'
db.delete_table(u'promises_fulfillment')
models = {
u'popolo.person': {
'Meta': {'object_name': 'Person'},
'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'birth_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'created_at': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'death_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('dja | ngo.db.models.fields.CharField', [], {'max_length': '128'}),
'patronymic_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': ' | ()'}),
'sort_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'start_date': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'updated_at': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'})
},
u'promises.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name'"})
},
u'promises.fulfillment': {
'Meta': {'object_name': 'Fulfillment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'percentage': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'promise': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['promises.Promise']", 'unique': 'True'})
},
u'promises.informationsource': {
'Meta': {'object_name': 'InformationSource'},
'date': ('django.db.models.fields.DateField', [], {}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'promise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'information_sources'", 'to': u"orm['promises.Promise']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'promises.promise': {
'Meta': {'object_name': 'Promise'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'promises'", 'null': 'True', 'to': u"orm['promises.Category']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.field |
DefaultUser/PyTIBot | lib/channelwatcher/abstract.py | Python | gpl-3.0 | 1,797 | 0 | # -*- coding: utf-8 -*-
# PyTIBot - IRC Bot using python and the twisted library
# Copyright (C) <2017-2021> <Sebastian Schmidt>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod
class ChannelWatcher(object):
"""
Abstract base class for watching activity on a channel
"""
__metaclass__ = ABCMeta
supported_backends = []
def __init__(self, bot, channel, config):
self.bot = bot
self.channel = channel
@abstractmethod
def topic(self, user, topic):
pass
@abstractmethod
def nick(self, oldnick, newnick):
pass
@abstractmethod
def join(self, user):
pass
@abstractmethod
def part(self, user):
pass
@abstractmethod |
def quit(self, user, quitMessage):
pass
@abstractmethod
def kick(self, kickee, kicker, message):
pass
@abstractmethod
def notice(self, user, message):
pass
@abstractmethod
def action(self, user, data):
pass
@abstractmethod
def msg(self, user, message):
pass
| @abstractmethod
def stop(self):
pass
@abstractmethod
def connectionLost(self, reason):
pass
|
endlessm/glib | .gitlab-ci/check-todos.py | Python | lgpl-2.1 | 2,831 | 0.00142 | #!/usr/bin/env python3
#
# Copyright © 2019 Endless Mobile, Inc.
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Original author: Philip Withnall
"""
Checks that a merge request doesn’t add any instances of the string ‘todo’
(in uppercase), or similar keywords. It may remove instances of that keyword,
or move them around, according to the logic of `git log -S`.
"""
import argparse
import re
import subprocess
import sys
# We have to specify these keywords obscurely to avoid the script matching
# itself. The keyword ‘fixme’ (in upper case) is explicitly allowed because
# that’s conventionally used as a way of marking a workaround which needs to
# be merged for now, but is to be grepped for and reverted or reworked later.
BANNED_KEYWORDS = [
'TO' + 'DO',
'X' + 'XX',
'W' + 'IP',
]
def main():
parser = argparse.ArgumentParser(
description='Check a range of commits to ensure they don’t contain '
'banned keywords.')
parser.add_argument('commits',
help='SHA to diff from, or range of commits to diff')
args = parser.parse_args()
banned_words_seen = set()
seen_in_log = False
seen_in_diff = False
# Check the log messages for banned words.
log_process = subprocess.run(
['git', 'log', '--no-color', args.commits + '..HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8',
check=True)
log_lines = log_process.stdout.strip().split('\n')
for line in log_lines:
for keyword in BANNED_KEYWORDS:
if re.search('(^|\W+){}(\W+|$)'.format(keyword), line):
banned_words_seen.add(keyword)
seen_in_log = True
# Check the diff for banned words.
diff_process = subprocess.run(
['git', 'diff', '-U0', '--no-color', args.commits],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8',
check=True)
diff_lines = diff_process.stdout.strip().split('\n')
for line in diff_lines:
if not line.startswith('+ '):
continue
for keyword in BANNED_KEYWORDS:
if re.search('(^|\W+){}(\W+|$)'.format(keyword), line):
banned_words_seen.add(keyword)
seen_in_diff = True
if banned_words_seen:
if seen_in_log and seen_in_diff:
where = 'commit message and diff'
elif seen_in_log:
where = 'commit message'
elif seen_in_diff:
where = 'commit d | iff'
print('Saw banned keywords in a {}: {}. '
'This | indicates the branch is a work in progress and should not '
'be merged in its current '
'form.'.format(where, ', '.join(banned_words_seen)))
sys.exit(1)
if __name__ == '__main__':
main()
|
linkedin/WhereHows | metadata-ingestion/src/datahub_provider/operators/datahub.py | Python | apache-2.0 | 1,848 | 0.001082 | from typing import List, Union
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub_provider.hooks.datahub import (
DatahubGenericHook,
DatahubKafkaHook,
DatahubRestHook,
)
class DatahubBaseOperator(BaseOpera | tor):
"""
The DatahubBaseOperator is used as a base operator all DataHub operators.
"""
ui_color = "#4398c8"
hook: Union[DatahubRestHook, DatahubKafkaHook]
# mypy is not a fan of this. Newer versions of Airflow support proper typing for the decorator
# using PEP 612. However, there is not yet a good way to inherit the types of the kwargs from
# the superclass.
@apply_defaults # type: ignor | e[misc]
def __init__( # type: ignore[no-untyped-def]
self,
*,
datahub_conn_id: str,
**kwargs,
):
super().__init__(**kwargs)
self.datahub_conn_id = datahub_conn_id
self.generic_hook = DatahubGenericHook(datahub_conn_id)
class DatahubEmitterOperator(DatahubBaseOperator):
"""
Emits a Metadata Change Event to DataHub using either a DataHub
Rest or Kafka connection.
:param datahub_conn_id: Reference to the DataHub Rest or Kafka Connection.
:type datahub_conn_id: str
"""
# See above for why these mypy type issues are ignored here.
@apply_defaults # type: ignore[misc]
def __init__( # type: ignore[no-untyped-def]
self,
mces: List[MetadataChangeEvent],
datahub_conn_id: str,
**kwargs,
):
super().__init__(
datahub_conn_id=datahub_conn_id,
**kwargs,
)
self.mces = mces
def execute(self, context):
self.generic_hook.get_underlying_hook().emit_mces(self.mces)
|
santoshphilip/eppy | tests/sketch.py | Python | mit | 593 | 0.008432 | import glob
import os
from eppy.iddcurrent import iddcurrent
from eppy.modeleditor import IDF
from eppy.pytest_helpers import IDD_FILES
from io import StringIO
import eppy.snippet as snippet
iddsnippet = iddcurrent.iddtxt
idfsnippet = snippet.idfsnippet
iddfhandle = StringIO(iddcurrent.iddtxt)
IDF.setiddname(iddfhandle)
idd = os.path.join(ID | D_FILES, "Energy+V8_1_0.idd")
OUTPUT_DIR = "C:\EnergyPlusV8-5-0\ExampleFiles\loopdiagrams"
idfs = glob.glob(OUTPUT_DIR + "\*.idf")
dots = glob.glob(OUTPUT_DIR + "\*.dot")
for idf in idfs:
os.remove(idf)
for dot in dots:
os.remove(dot) | |
aecay/weihnachtsgurke | wng/metadata.py | Python | gpl-3.0 | 571 | 0.001751 | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for | the project.
package = 'weihnachtsgurke'
project = "PYCCLE-TCP search tool"
project_no_spaces = project.replace(' ', '')
version = '0.3'
description = 'A program for searching for strings of text in the PYCCLE-TCP corpus'
authors = ["Aaron Ecay"]
author | s_string = ', '.join(authors)
emails = ["aaronecay@gmail.com"]
license = 'GPL v3'
copyright = '2016 University of York'
url = "https://weihnachtsgurke.readthedocs.io/en/latest/"
|
blitzagency/flowbee | setup.py | Python | mit | 1,607 | 0.002489 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
import flowbee
here = os.path.abspath(os.path.dirname(__file__))
def strip_comments(l):
return l.strip()
# ret | urn l.split('#', 1)[0].strip()
def reqs(*f):
return list(filter(None, [strip_comments(l) for l in open(
os.path.join(os.getcwd(), 'requirements', *f)).readlines()]))
install_requires = reqs('default.txt')
tests_require = []
docs_extras = reqs('docs.txt')
testing_extras = test | s_require + reqs('testing.txt')
readme = open(os.path.join(here, 'README.rst')).read()
history = open(os.path.join(here, 'HISTORY.rst')).read().replace('.. :changelog:', '')
setup(
name='flowbee',
version=flowbee.__version__,
description='',
long_description=readme + '\n\n' + history,
author='Adam Venturella <aventurella@blitzagency.com>',
author_email='aventurella@blitzagency.com',
packages=find_packages(),
package_dir={'flowbee': 'flowbee'},
include_package_data=True,
install_requires=install_requires,
extras_require={
'testing': testing_extras,
'docs': docs_extras,
},
tests_require=tests_require,
license="BSD",
zip_safe=False,
keywords='',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
]
)
|
SUSE/azure-sdk-for-python | azure-batch/azure/batch/models/account_list_node_agent_skus_options.py | Python | mit | 2,070 | 0.000483 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AccountListNodeAgentSkusOptions(Model):
"""Additional parameters for the Account_list_node_agent_skus operation.
:param filter: An OData $filter clause.
:type filter: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 results will be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in sec | onds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_req | uest_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, filter=None, max_results=1000, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
self.filter = filter
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
|
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Volume/Testing/Python/VolumePicker.py | Python | gpl-3.0 | 9,122 | 0.001754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class VolumePicker(vtk.test.Testing.vtkTest):
def testVolumePicker(self):
# volume render a medical data set
# renderer and interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin)
# read the volume
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetImageRange(1, 93)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetDataSpacing(3.2, 3.2, 1.5)
#---------------------------------------------------------
# set up the volume rendering
rayCastFunction = vtk.vtkVolumeRayCastCompositeFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetInputConnection(v16.GetOutputPort())
volumeMapper.SetVolumeRayCastFunction(rayCastFunction)
volumeColor = vtk.vtkColorTransferFunction()
volumeColor.AddRGBPoint(0, 0.0, 0.0, 0.0)
volumeColor.AddRGBPoint(180, 0.3, 0.1, 0.2)
volumeColor.AddRGBPoint(1000, 1.0, 0.7, 0.6)
volumeColor.AddRGBPoint(2000, 1.0, 1.0, 0.9)
volumeScalarOpacity = vtk.vtkPiecewiseFunction()
volumeScalarOpacity.AddPoint(0, 0.0)
volumeScalarOpacity.AddPoint(180, 0.0)
volumeScalarOpacity.AddPoint(1000, 0.2)
volumeScalarOpacity.AddPoint(2000, 0.8)
volumeGradientOpacity = vtk.vtkPiecewiseFunction()
volumeGradientOpacity.AddPoint(0, 0.0)
volumeGradientOpacity.AddPoint(90, 0.5)
volumeGradientOpacity.AddPoint(100, 1.0)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(volumeColor)
volumeProperty.SetScalarOpacity(volumeScalarOpacity)
volumeProperty.SetGradientOpacity(volumeGradientOpacity)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(0.6)
volumeProperty.SetDiffuse(0.6)
volumeProperty.SetSpecular(0.1)
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
#---------------------------------------------------------
# Do the surface rendering
boneExtractor = vtk.vtkMarchingCubes()
boneExtractor.SetInputConnection(v16.GetOutputPort())
boneExtractor.SetValue(0, 1150)
boneNormals = vtk.vtkPolyDataNormals()
boneNormals.SetInputConnection(boneExtractor.GetOutputPort())
boneNormals.SetFeatureAngle(60.0)
boneStripper = vtk.vtkStripper()
boneStripper.SetInputConnection(boneNormals.GetOutputPort())
boneMapper = vtk.vtkPolyDataMapper()
boneMapper.SetInputConnection(boneStripper.GetOutputPort())
boneMapper.ScalarVisibilityOff()
boneProperty = vtk.vtkProperty()
boneProperty.SetColor(1.0, 1.0, 0.9)
bone = vtk.vtkActor()
bone.SetMapper(boneMapper)
bone.SetProperty(boneProperty)
#---------------------------------------------------------
# Create an image actor
table = vtk.vtkLookupTable()
table.SetRange(0, 2000)
table.SetRampToLinear()
table.SetValueRange(0, 1)
table.SetHueRange(0, 0)
table.SetSaturationRange(0, 0)
mapToColors = vtk.vtkImageMapToColors()
mapToColors.SetInputConnection(v16.GetOutputPort())
mapToColors.SetLookupTable(table)
imageActor = vtk.vtkImageActor()
imageActor.GetMapper().SetInputConnection(mapToColors.GetOutputPort())
imageActor.SetDisplayExtent(32, 32, 0, 63, 0, 92)
#---------------------------------------------------------
# make a transform and some clipping planes
transform = vtk.vtkTransform()
transform.RotateWXYZ(-20, 0.0, -0.7, 0.7)
volume.SetUserTransform(transform)
bone.SetUserTransform(transform)
imageActor.SetUserTransform(transform)
c = volume.GetCenter()
volumeClip = vtk.vtkPlane()
volumeClip.SetNormal(0, 1, 0)
volumeClip.SetOrigin(c)
boneClip = vtk.vtkPlane()
boneClip.SetNormal(0, 0, 1)
boneClip.SetOrigin(c)
volumeMapper.AddClippingPlane(volumeClip)
boneMapper.AddClippingPlane(boneClip)
#---------------------------------------------------------
ren.AddViewProp(volume)
ren.AddViewProp(bone)
ren.AddViewProp(imageActor)
camera = ren.GetActiveCamera()
camera.SetFocalPoint(c)
camera.SetPosition(c[0] + 500, c[1] - 100, c[2] - 100)
camera.SetViewUp(0, 0, -1)
ren.ResetCameraClippingRange()
renWin.Render()
#---------------------------------------------------------
# the cone should point along the Z axis
coneSource = vtk.vtkConeSource()
coneSource.CappingOn()
coneSource.SetHeight(12)
coneSource.SetRadius(5)
coneSource.SetResolution(31)
coneSource.SetCenter(6, 0, 0)
coneSource.SetDirection(-1, 0, 0)
#---------------------------------------------------------
picker = vtk.vtkVolumePicker()
picker.SetTolerance(1.0e-6)
picker.SetVolumeOpacityIsovalue(0.01)
# This should usually be left alone, but is used here to increase coverage
picker.UseVolumeGradientOpacityOn()
# A function to point an actor along a vector
def PointCone(actor, n):
if n[0] < 0.0:
actor.RotateWXYZ(180, 0, 1, 0)
actor.RotateWXYZ(180, (n[0] - 1.0) * 0.5, n[1] * 0.5, n[2] * 0.5)
else:
actor.RotateWXYZ(180, (n[0] + 1.0) * 0.5, n[1] * 0.5, n[2] * 0.5)
# Pick the actor
picker.Pick(192, 103, 0, ren)
#print picker
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor1 = vtk.vtkActor()
coneActor1.PickableOff()
coneMapper1 = vtk.vtkDataSetMapper()
coneMapper1.SetInputConnection(coneSource.GetOutputPort())
coneActor1.SetMapper(coneMapper1)
coneActor1.GetProperty().SetColor(1, 0, 0)
coneActor1.SetPosition(p)
PointCone(coneActor1, n)
| ren.AddViewProp(coneActor1)
# Pick the volume
picker.Pick(90, 180, 0, ren)
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor2 = vtk.vtkActor()
coneActor2.PickableOff | ()
coneMapper2 = vtk.vtkDataSetMapper()
coneMapper2.SetInputConnection(coneSource.GetOutputPort())
coneActor2.SetMapper(coneMapper2)
coneActor2.GetProperty().SetColor(1, 0, 0)
coneActor2.SetPosition(p)
PointCone(coneActor2, n)
ren.AddViewProp(coneActor2)
# Pick the image
picker.Pick(200, 200, 0, ren)
p = picker.GetPickPosition()
n = picker.GetPickNormal()
coneActor3 = vtk.vtkActor()
coneActor3.PickableOff()
coneMapper3 = vtk.vtkDataSetMapper()
coneMapper3.SetInputConnection(coneSource.GetOutputPort())
coneActor3.SetMapper(coneMapper3)
coneActor3.GetProperty().SetColor(1, 0, 0)
coneActor3.SetPosition(p)
PointCone(coneActor3, n)
ren.AddViewProp(coneActor3) |
forallsystems/21stbadgepathways | default_site/views.py | Python | gpl-2.0 | 4,452 | 0.010332 | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import views as django_auth_views
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.db.models import Count
from users.models import *
from organizations.models import *
from badges.models import *
from common.forms import *
def custom_login(request,
template_name='login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=CustomAuthenticationForm):
if request.user.is_authenticated():
return HttpResponseRedirect("/dashboard/")
"""Displays the login form and handles the login action."""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(data=request.POST)
if form.is_valid():
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Heavier security check -- redirects to http://example.com should
# not be allowed, but things like /view/?param=http://example.com
# should be allowed. This regex checks if there is a '//' *before* a
# question mark.
elif '//' in redirect_to and re.match(r'[^\?]*//', redirect_to):
redirect_to = settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
django_auth_views.auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
request.session.set_test_cookie()
pathway_list = Pathway.get_pathway_list()
return render_to_response(template_name, {
'form': form,
'pathway_list':pathway_list,
redirect_field_name: redirect_to,
}, context_instance=RequestContext(request))
@login_required
def init_login(request):
request.session['USER_ROLE'] = request.user.get_profile().get_role()
org = Organization.get_user_organization(request.user.id)
request.session['USER_ORGANIZATION_ID'] = org.id
request.session['USER_ORGANIZATION_TYPE'] = org.type
request.session['USER_ORGANIZATION_NAME'] = org.name
request.session['USER_NAME'] = request.user.get_full_name()
return HttpResponseRedirect("/dashboard/")
@login_required
def dashboard(request, templateName='studentDashboard.html'):
try:
user_role = request.session['USER_ROLE']
if user_role in (Role.DISTRICTADMIN, Role.SCHOOLADMIN):
return render(request,'adminDashboard.html',{'first_name':request.user.first_name,'last_name':request.user.last_name})
elif user_role == Role.STUDENT:
num_awards = Award.get_user_num_awards(request.user.id)
points_balance = PointsBalance.get_user_points_balance(request.user.id)
pathway_list = Pathway.get_user_pathway_list(request.user.id)
#determine student's age
allow_backpack = 0
sp = request.user.get_profile().get_student_profile()
if sp.get_age() >= 13 and request.user.email!='':
allow_backpack = 1
return render(request,templateName,{'first_name':request.user.first_name,
'last_name':request.user.last_name,
'num_awards':num_awards,
'allow_backpack':allow_backpack,
'points_balance':points_balance,
'pathway_list':pathway_list,
'base_url':request.get_host()})
except:
import sys
pri | nt sys.exc_info()
pass
return render(request,'error.html')
@login_required
def dashboard_browse_passport(request):
return dashboard(request,templateName='studentDashboardBrowse.html')
| |
walterbender/followme | rc_skip_last.py | Python | gpl-3.0 | 1,984 | 0 | """
Follow Me activity for Sugar
Copyright (C) 2010 Peter Hewitt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/license | s/>.
"""
class RC():
def _ | _init__(self, nr, nc):
self.nr = nr
self.nc = nc
def inc_r(self, ind):
r, c = self.row_col(ind)
r += 1
if r == self.nr:
r = 0
if r == (self.nr - 1) and c == (self.nc - 1):
r = 0
return self.indx(r, c)
def dec_r(self, ind):
r, c = self.row_col(ind)
r -= 1
if r < 0:
r = self.nr - 1
if r == (self.nr - 1) and c == (self.nc - 1):
r = self.nr - 2
return self.indx(r, c)
def inc_c(self, ind):
r, c = self.row_col(ind)
c += 1
if c == self.nc:
c = 0
if r == (self.nr - 1) and c == (self.nc - 1):
c = 0
return self.indx(r, c)
def dec_c(self, ind):
r, c = self.row_col(ind)
c -= 1
if c < 0:
c = self.nc - 1
if r == (self.nr - 1) and c == (self.nc - 1):
c = self.nc - 2
return self.indx(r, c)
def row_col(self, ind):
i = 0
for r in range(self.nr):
for c in range(self.nc):
if i == ind:
return r, c
i += 1
def indx(self, r, c):
return r * self.nc + c
|
benosteen/RDFDatabank | docs/using_databank_api/DatabankDemo.py | Python | mit | 1,544 | 0.005181 | #Databank API demo
import urllib2
import base64
import urllib
from lib.multipartform import MultiPartForm
#===============================================================================
#Using urllib2 to create a package in Databank
url = "http://databank-vm1.oerc.ox.ac.uk/test/datasets"
req = urllib2.Request(url)
USER = "admin"
PASS = "test"
identifier = "TestSubmission"
auth = 'Basic ' + base64.urlsafe_b64encode("%s:%s" % (USER, PASS))
req.add_header('Authorization', auth)
req.add_header('Accept', 'application/JSON')
req.add_data(urllib.urlencode({'id': identifier}))
# To verify the method is POST
req.get_method()
ans = | urllib2.urlopen(req)
ans.read()
ans.msg
ans.code
#===============================================================================
#Using urllib2 to post a file in Databank
#Add a file
form = MultiPartForm()
filename = "solrconfig.xml"
filepath = "data/unicode07.xml"
form.add_file('file', filename, fileHandle=open(filepath))
# Build the request
url2 = "http://databank-vm1.oerc.ox.ac.uk/test/datasets/TestSubmission"
req2 = urllib2.Re | quest(url2)
auth = 'Basic ' + base64.urlsafe_b64encode("admin:test")
req2.add_header('Authorization', auth)
req2.add_header('Accept', 'application/JSON')
body = str(form)
req2.add_header('Content-type', form.get_content_type())
req2.add_header('Content-length', len(body))
req2.add_data(body)
print
print 'OUTGOING DATA:'
print req2.get_data()
ans2 = urllib2.urlopen(req2)
print
print 'SERVER RESPONSE:'
ans2.read()
|
teeple/pns_server | work/install/Python-2.7.4/Lib/test/pydoc_mod.py | Python | gpl-2.0 | 439 | 0.006834 | """This is a test module for test_pydoc"""
__author__ = "Ben | jamin Peterson"
__credits__ = "Nobody"
__version__ = "1.2.3.4"
class A:
"""Hello and goodbye"""
def __init__():
| """Wow, I have no function!"""
pass
class B(object):
NO_MEANING = "eggs"
pass
def doc_func():
"""
This function solves all of the world's problems:
hunger
lack of Python
war
"""
def nodoc_func():
pass
|
danielsunzhongyuan/my_leetcode_in_python | sum_of_two_integers_371.py | Python | apache-2.0 | 536 | 0.024254 | """
Calculate the sum of two integers a and b, but you are not allowed to use the operato | r + and -.
Example:
Given a = 1 and b = 2, return 3.
"""
class Solution(object):
def ge | tSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
while a != 0 and b != 0:
a, b = a^b, (a&b)<<1
if a > 1<<31 or b > 1<<31:
a %= 1<<31
b %= 1<<31
return a or b
if __name__ == "__main__":
a = Solution()
print a.getSum(-14, 16)
|
Masood-M/yalih | req/yara-3.9.0/docs/conf.py | Python | apache-2.0 | 8,265 | 0.007018 | # -*- coding: utf-8 -*-
#
# yara documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 11:04:03 2014.
#
# This file is execfile()d with the current directory set to its
# co | ntaining dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If | extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'yara'
copyright = u'2014-2019, VirusTotal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9'
# The full version, including alpha/beta/rc tags.
release = '3.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'yaradoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'yara.tex', u'yara Documentation',
u'Victor M. Alvarez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yara', u'yara Documentation',
[u'Victor M. Alvarez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'yara', u'yara Documentation',
u'Victor M. Alvarez', 'yara', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do |
qingshuimonk/bhtsa | demo/electionday_analysis.py | Python | mit | 1,900 | 0.003684 | from nltk.twitter import Query, credsfromfile
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import datetime as dt
import os
import pickle
import sys
sys.path.append("../bhtsa")
from twitter_senti_analyzer import senti_score_time
# settings
oauth = credsfromfile()
client = Query(**oauth)
twtNum = 1000
startTime = [2016, 11, 8, 12, 0]
step = 30
step_num = 48
path = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'model')
f = open(os.path.join(path, 'NBClassifier.pickle'), 'r')
NBC = pickle.load(f)
origin = dt.datetime(s | tartTime[0], startTime[1], startTime[2], startTime[3], startTime[4])
dates = []
for i in range(step_num):
next_val = origin + dt.timedelta(minutes=step*i)
dates.append(next_val)
hilary_score = senti_ | score_time('hilary clinton', client, NBC, twtNum, startTime, step, step_num, 1)
trump_score = senti_score_time('donald trump', client, NBC, twtNum, startTime, step, step_num, 1)
hilary_mean = np.mean(hilary_score, axis=0)
hilary_upper = hilary_mean + np.std(hilary_score, axis=0)*0.1
hilary_lower = hilary_mean - np.std(hilary_score, axis=0)*0.1
trump_mean = np.mean(trump_score, axis=0)
trump_upper = trump_mean + np.std(trump_score, axis=0)*0.1
trump_lower = trump_mean - np.std(trump_score, axis=0)*0.1
fig, ax = plt.subplots()
plt.xticks(rotation=70)
plt.plot(dates, np.mean(hilary_score, axis=0), color='blue', linewidth=2, label='Hilary Clinton')
plt.plot(dates, np.mean(trump_score, axis=0), color='red', linewidth=2, label='Donald Trump')
ax.fill_between(dates, hilary_lower, hilary_upper, facecolor='yellow', alpha=0.5)
ax.fill_between(dates, trump_lower, trump_upper, facecolor='yellow', alpha=0.5)
ax.xaxis.set_major_formatter(DateFormatter('%d %H:%M'))
plt.xlabel('days')
plt.ylabel('score')
plt.title('Sentiment Score of Candidates')
plt.legend(loc="lower left")
plt.grid(True)
plt.show()
|
tensorflow/tensor2tensor | tensor2tensor/data_generators/paraphrase_ms_coco_test.py | Python | apache-2.0 | 3,095 | 0.002585 | # coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in complian | ce with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# W | ITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.data_generators.paraphrase_ms_coco."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
from tensor2tensor.data_generators import paraphrase_ms_coco
import tensorflow.compat.v1 as tf
class ParaphraseGenerationProblemTest(tf.test.TestCase):
def testCombinationPairs(self):
inputs = ["A", "B", "C"]
expected_combination = [("A", "B"), ("A", "C"), ("B", "C")]
actual_combination = paraphrase_ms_coco.create_combination(inputs)
self.assertEqual(actual_combination, expected_combination)
@mock.patch("tensor2tensor.data_generators"
".paraphrase_ms_coco.ParaphraseGenerationProblem.prepare_data",
return_value=[("sentence1", "sentence2")])
@mock.patch("tensor2tensor.data_generators"
".paraphrase_ms_coco.ParaphraseGenerationProblem.bidirectional")
def testBidirectionalTrue(self, data, bidirectional):
paraphrase_problem = paraphrase_ms_coco.ParaphraseGenerationProblem()
paraphrase_problem.bidirectional = True
expected_generated_data = [{"inputs": "sentence1", "targets": "sentence2"},
{"inputs": "sentence2", "targets": "sentence1"}]
actual_generated_data = list(paraphrase_problem
.generate_samples("data_dir",
"tmp_dir",
"dataset_split"))
self.assertEqual(actual_generated_data, expected_generated_data)
@mock.patch("tensor2tensor.data_generators"
".paraphrase_ms_coco.ParaphraseGenerationProblem.prepare_data",
return_value=[("sentence1", "sentence2")])
@mock.patch("tensor2tensor.data_generators"
".paraphrase_ms_coco.ParaphraseGenerationProblem.bidirectional")
def testBidirectionalFalse(self, data, bidirectional):
paraphrase_problem = paraphrase_ms_coco.ParaphraseGenerationProblem()
paraphrase_problem.bidirectional = False
expected_generated_data = [{"inputs": "sentence1", "targets": "sentence2"}]
actual_generated_data = list(paraphrase_problem
.generate_samples("data_dir",
"tmp_dir",
"dataset_split"))
self.assertEqual(actual_generated_data, expected_generated_data)
if __name__ == "__main__":
tf.test.main()
|
hjfreyer/marry-fuck-kill | backend/html_handlers.py | Python | apache-2.0 | 8,480 | 0.007547 | #!/usr/bin/env python
#
# Copyright 2010 Hunter Freyer and Michael Kelly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import ezt
import json
import logging
import urllib2
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
import core
import ezt_util
import models
def _LogRequest(page_name, req):
"""Logs a reqest object in a a relatively human-readable way.
Args:
page_name: A string identifying this page. Used in log entry for easy
greppability.
req: A WSGI request object.
"""
arg_dict = dict([(arg, req.get(arg)) for arg in req.arguments()])
logging.info('%s page params: %s', page_name, arg_dict)
def GetUserData(url_base):
# This must be in this module to have access to the current user.
nickname = ''
user = users.get_current_user()
if user:
nickname = user.nickname()
if users.is_current_user_admin():
is_current_user_admin = True
else:
is_current_user_admin = None
return dict(nickname=nickname,
login_url=users.create_login_url(url_base),
logout_url=users.create_logout_url(url_base),
is_current_user_admin=is_current_user_admin)
class RequestHandler(webapp.RequestHandler):
def error(self, code):
super(RequestHandler, self).error(code)
if code == 404:
template_values = dict(page='')
template_values.update(GetUserData('/about'))
ezt_util.WriteTemplate('notfound.html', template_values,
self.response.out)
class MainPageHandler(RequestHandler):
def get(self):
rand = models.Triple.get_next_id(self.request, self.response)
if not rand:
self.redirect('/about')
else:
RenderVotePage(self, str(rand))
class AboutHandler(RequestHandler):
def get(self):
template_values = dict(page='about')
template_values.update(GetUserData('/about'))
ezt_util.WriteTemplate('about.html', template_values, self.response.out)
class VoteHandler(RequestHandler):
def get(self, triple_id):
RenderVotePage(self, triple_id)
def RenderVotePage(handler, triple_id):
if not triple_id.isdigit():
handler.error(404)
return
triple = models.Triple.get_by_id(long(triple_id))
if not triple:
handler.error(404)
return
prev_id = handler.request.get('prev')
display_prev_id = None
prev_names = ['', '', '']
prev_urls = ['', '', '']
logging.info('Vote page for %s. Prev = %s', triple_id, prev_id)
if prev_id:
prev_triple = models.Triple.get_by_id(int(prev_id))
if prev_triple is not None:
prev_entities = [prev_triple.one, prev_triple.two, prev_triple.three]
prev_names = [e.name for e in prev_entities]
prev_urls = core.GetStatsUrlsForTriple(prev_triple)
display_prev_id = prev_id
one = triple.one
two = triple.two
three = triple.three
template_values = dict(page='vote',
triple=triple,
triple_id=triple_id,
e1_name=one.name.encode('utf-8'),
e1_url=one.image_url,
e2_name=two.name.encode('utf-8'),
e2_url=two.image_url,
e3_name=three.name.encode('utf-8'),
e3_url=three.image_url,
prev_id=display_prev_id,
prev_e1_name=prev_names[0].encode('ut | f-8'),
prev_e2_name=prev_names[1].encode('utf-8'),
prev_e3_name=prev_names[2].encode('utf-8'),
prev_e1_stat_url=prev_urls[0],
prev_e2_stat_url=prev_urls[1],
prev_e3_st | at_url=prev_urls[2])
template_values.update(GetUserData('/vote/' + triple_id))
ezt_util.WriteTemplate('vote.html', template_values, handler.response.out)
class VoteSubmitHandler(RequestHandler):
def post(self):
_LogRequest('Vote', self.request)
action = self.request.get('action')
if action == 'submit':
core.MakeAssignment(triple_id=self.request.get('key'),
v1=self.request.get('v1'),
v2=self.request.get('v2'),
v3=self.request.get('v3'),
user=users.get_current_user(),
user_ip=self.request.remote_addr)
query_suffix = '?prev=%s' % self.request.get('key')
rand = models.Triple.get_next_id(self.request, self.response)
self.redirect('/vote/%s%s' % (str(rand), query_suffix))
class MakeSubmitHandler(RequestHandler):
def post(self):
"""Handles a request to make a new Triple.
We expect the following parameters:
n1, n2, n3: Names of the new triples
u1, u2, u3: The URLs of the thumbnails of the new triples.
q1, q2, q3: The queries that generated the new trples.
ou1, ou2, ou3: The original URLs of the new triples.
"""
# TODO(mjkelly): When we have a new client, check the 'sig' values we get.
# That will allow us to avoid repeating the search on the server side.
_LogRequest('Make', self.request)
entities = []
for n in range(1, 4):
entities.append({'n': self.request.get('n' + str(n)),
'u': self.request.get('u' + str(n)),
'q': self.request.get('q' + str(n)),
'ou': self.request.get('ou' + str(n))})
triple = core.MakeTriple(entities,
creator=users.get_current_user(),
creator_ip=self.request.remote_addr)
self.redirect('/vote/%s?new' % triple.key().id())
class MakeHandler(RequestHandler):
def get(self):
template_values = dict(page='make')
template_values.update(GetUserData('/make'))
ezt_util.WriteTemplate('maker.html', template_values, self.response.out)
class EntityImageHandler(RequestHandler):
def get(self, entity_id):
try:
entity = models.Entity.get(urllib2.unquote(entity_id))
except db.BadKeyError:
self.error(404)
return
self.response.headers['Content-Type'] = 'image/jpg';
self.response.out.write(entity.data)
class MyMfksHandler(RequestHandler):
def get(self):
template_values = dict(page='mymfks')
user = users.get_current_user()
query = models.Triple.all().filter('creator =', user).order('time')
triples = [t for t in query]
items = []
for t in triples:
stats = core.GetStatsUrlsForTriple(t)
item = ezt_util.EztItem(key=str(t.key().id()),
triple=t,
one_stats=stats[0],
two_stats=stats[1],
three_stats=stats[2])
items.append(item)
template_values.update(GetUserData('/mymfks'))
template_values.update(dict(triples=items))
ezt_util.WriteTemplate('mymfks.html', template_values, self.response.out)
class ImageSearchHandler(RequestHandler):
def get(self):
_LogRequest('ImageSearch', self.request)
query = self.request.get('q')
images = core.ImageSearch(query, self.request.remote_addr)
images_dicts = []
for img in images:
d = img._asdict()
# The goal here is just to ensure that we can check that we once returned
# this URL as a result for a search. We're not attempting to associate
# the URL with specific search terms, or with a time period.
#
# If someone puts in enough effort to abuse this somehow, we can think
# about locking it down more.
d.update(dict(sig=core.Sign(*img)))
images_dicts.append(d)
resu |
chriskiehl/python-stix | stix/extensions/marking/tlp.py | Python | bsd-3-clause | 1,713 | 0.001751 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import stix
from stix.data_marking import MarkingStructure
import stix.bindings.extensions.marking.tlp as tlp_binding
@stix.register_extension
class TLPMarkingStructure(MarkingStructure):
_binding = tlp_binding
_binding_class = tlp_binding.TLPMarkingStructureType
_namespace = 'http://data-marking.mitre.org/extensions/MarkingStructure#TLP-1'
_XSI_TYPE = "tlpMarking:TLPMarkingStructureType"
def __init__(self, color=None):
super(TLPMarkingStructure, self).__init__()
self.color = color
def to_obj(self, return_obj=None, ns_info=None):
super(TLPMarkingStructure, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = self._binding_class()
MarkingStructure.to_obj(self, return_obj=return_obj, ns_info=ns_info)
return_obj.color = self.color
return return_obj
def to_d | ict(self):
| d = MarkingStructure.to_dict(self)
if self.color:
d['color'] = self.color
return d
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
MarkingStructure.from_obj(obj, return_obj=return_obj)
return_obj.color = obj.color
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
MarkingStructure.from_dict(d, return_obj)
return_obj.color = d.get('color')
return return_obj
|
openstack/tacker | tacker/tests/unit/api/v1/test_router.py | Python | apache-2.0 | 1,832 | 0 | # Copyright (c) 2014-2018 China Mobile (SuZhou) Software Technology Co.,Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limi | tations
# under the License.
from unittest import mock
from oslo_serialization import jsonutils
from tacker.api.v1.router import APIRouter
from tacker.api.v1.router import Index
from tacker.tests import base
from tacker import wsgi
class TestIndex(base.BaseTestCase): |
def test_index(self):
request = wsgi.Request.blank(
"/test/", body=b"{'name': 'tacker'}", method='POST',
headers={'Content-Type': "application/json"})
index_cls = Index({"version": "v1"})
result = index_cls(request)
expect_body = {'resources': [
{'collection': 'v1',
'links': [
{'href': 'http://localhost/test/v1',
'rel': 'self'}],
'name': 'version'}]}
self.assertEqual(expect_body, jsonutils.loads(result.body))
self.assertEqual('application/json', result.content_type)
@mock.patch('tacker.api.v1.router.APIRouter.factory', return_value=None)
class TestAPIRouter(base.BaseTestCase):
def test_api_factory(self, factory_mock):
result = APIRouter().factory({})
factory_mock.assert_called_once_with({})
self.assertEqual(None, result)
|
lywen52/quantproject | strategy/__init__.py | Python | apache-2.0 | 109 | 0.009174 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 14:11 | :38 2017
@author: | lywen
"""
|
msettles/expHTS | expHTS/extract_unmapped_reads.py | Python | apache-2.0 | 7,685 | 0.001952 | #!/usr/bin/env python
'''
Extract reads which aren't mapped from a SAM or SAM.gz file.
Behavior for PE:
-Write out PE only if both do not map (if either of the pair maps, neither is retained)
Behavior for SE:
-Write out SE if they don't map
Iterate over a SAM or SAM.gz file. take everything where the 3rd and
4th flag bit are set to 1 and write reads out to files.
0x1 template having multiple segments in sequencing
0x2 each segment properly aligned according to the aligner
0x4 segment unmapped
0x8 next segment in the template unmapped
0x10 SEQ being reverse complemented
0x20 SEQ of the next segment in the template being reversed
0x40 the first segment in the template
0x80 the last segment in the template
0x100 secondary alignment
0x200 not passing quality controls
0x400 PCR or optical duplicate
TODO:
1) Add support for retaining both reads if one of a pair don't map but the other does
2) Add support for retaining the pair (or SE) if a read maps with low mapq
Note:
It is necessary to double check that both pairs of the PE read really exist in the SAM
file just in case it somehow gets disordered. This is taken care of by keeping the PE
reads in a set of dictionaries and then deleting them once the pair is written.
In the case where a read is somehow labeled as paired, but the pair doesn't exist, the
read is NOT written.
'''
import sys
import os
from optparse import OptionParser # http://docs.python.org/library/optparse.html
import gzip
usage = "usage: %prog [options] -o output_base inputfile.SAM"
parser = OptionParser(usage=usage, version="%prog 2.0.1")
parser.add_option('-u', '--uncompressed', help="leave output files uncompressed",
action="store_true", dest="uncompressed")
parser.add_option('-o', '--output_base', help="output file basename",
action="store", type="str", dest="output_base", default="screened")
parser.add_option('-t', '--tab-seperated', help="seperated out the output in tab",
action="store_true", dest="tabSeperated")
parser.add_option('-v', '--verbose', help="verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args() # uncomment this line for command line support
if len(args) == 1:
infile = args[0]
# Start opening input/output files:
if not os.path.exists(infile):
print >> sys.stderr, "Error, can't find input file %s" % infile
sys.exit()
if infile.split(".")[-1] == "gz":
insam = gzip.open(infile, 'rb')
else:
insam = open(infile, 'r')
else:
# reading from stdin
insam = sys.stdin
base = options.output_base
PE1 = {}
PE2 = {}
contig_map = {}
interleaved = False
def writeread(ID, r1, r2):
if interleaved:
if options.tabSeperated is True:
# read1
print ID + "\t" + r1[0] + "\t" + r1[1] + "\t" + r2[0] + "\t" + r2[1] + "\n"
else:
| print "@" + ID + "#0/1"
print r1[0]
print '+\n' + r1[1]
# read2
print "@" + ID + "#0/2"
print r2[0]
print '+\n' + r2[1]
else:
# read1
outPE1.write("@" + ID | + "#0/1" '\n')
outPE1.write(r1[0] + '\n')
outPE1.write('+\n' + r1[1] + '\n')
# read2
outPE2.write("@" + ID + "#0/2" '\n')
outPE2.write(r2[0] + '\n')
outPE2.write('+\n' + r2[1] + '\n')
i = 0
PE_written = 0
SE_written = 0
SE_open = False
PE_open = False
line2 = []
for line in insam:
# Comment/header lines start with @
if line[0] != "@" and len(line.strip().split()) > 2:
line2 = line.strip().split()
flag = int(line2[1])
if (flag & 0x100): # secondary alignment
continue
i += 1
# Handle SE:
# unapped SE reads have 0x1 set to 0, and 0x4 (third bit) set to 1
if (flag & 0x1 == 0) and (flag & 0x4):
ID = line2[0].split("#")[0]
if not SE_open:
if base == "stdout":
interleaved = True
elif options.uncompressed:
outSE = open(base + "_SE.fastq", 'w')
else:
outSE = gzip.open(base + "_SE.fastq.gz", 'wb')
SE_open = True
# interleaved just means to stdout in this case
if (interleaved):
if options.tabSeperated is True:
print ID + "\t" + line2[9] + "\t" + line2[10] + "\n"
else:
print "@" + ID
print line2[9]
print '+\n' + line2[10]
else:
outSE.write("@" + ID + '\n')
outSE.write(line2[9] + '\n')
outSE.write('+\n' + line2[10] + '\n')
SE_written += 1
continue
# Handle PE:
# logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped, 0x80 the last segment in the template
if ((flag & 0x1) and (flag & 0x4) and (flag & 0x8)):
if not PE_open:
if base == "stdout":
interleaved = True
elif options.uncompressed:
outPE1 = open(base + "_PE1.fastq", 'w')
outPE2 = open(base + "_PE2.fastq", 'w')
else:
outPE1 = gzip.open(base + "_PE1.fastq.gz", 'wb')
outPE2 = gzip.open(base + "_PE2.fastq.gz", 'wb')
PE_open = True
if (flag & 0x40): # is this PE1 (first segment in template)
# PE1 read, check that PE2 is in dict and write out
ID = line2[0].split("#")[0]
r1 = [line2[9], line2[10]] # sequence + qual
if ID in PE2:
writeread(ID, r1, PE2[ID])
del PE2[ID]
PE_written += 1
else:
PE1[ID] = r1
continue
elif (flag & 0x80): # is this PE2 (last segment in template)
# PE2 read, check that PE1 is in dict and write out
ID = line2[0].split("#")[0]
r2 = [line2[9], line2[10]]
if ID in PE1:
writeread(ID, PE1[ID], r2)
del PE1[ID]
PE_written += 1
else:
PE2[ID] = r2
continue
# was mapped, count it up
# if line2 != []:
# contig = line2[2]
# if contig in contig_map.keys():
# if (flag & 0x1 == 0): # SE
# contig_map[contig]["SE"] += 1
# elif (flag & 0x40): # PE, Just count the first in the pair
# contig_map[contig]["PE"] += 1
# else:
# contig_map[contig] = {}
# if (flag & 0x1 == 0): # SE
# contig_map[contig]["SE"] = 1
# contig_map[contig]["PE"] = 0
# elif (flag & 0x40): # PE, Just count the first in the pair
# contig_map[contig]["SE"] = 0
# contig_map[contig]["PE"] = 1
# for k in contig_map.keys():
# print >> sys.stderr, "\tFound %s: percent: %.2f, PE mapped: %s, SE mapped: %s" % (k, (2*PE_written+SE_written)/i, contig_map[k]["PE"], contig_map[k]["SE"])
print >> sys.stderr, "Records processed: %s | PE_written: %s | SE_written: %s | Discarded: %s " % (i, PE_written, SE_written, i-(PE_written*2+SE_written))
if base != "stdout":
if PE_open:
outPE1.close()
outPE2.close()
if SE_open:
outSE.close()
|
ozmartian/vidcutter | vidcutter/__main__.py | Python | gpl-3.0 | 17,036 | 0.002935 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
#
# VidCutter - media cutter & joiner
#
# copyright © 2018 Pete Alexandrou
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import logging
import logging.handlers
import os
import shutil
import signal
import sys
import traceback
from typing import Callable, Optional
from PyQt5.QtCore import (pyqtSlot, QCommandLineOption, QCommandLineParser, QDir, QFileInfo, QProcess,
QProcessEnvironment, QSettings, QSize, QStandardPaths, QTimerEvent, Qt)
from PyQt5.QtGui import (QCloseEvent, QContextMenuEvent, QDragEnterEvent, QDropEvent, QGuiApplication, QMouseEvent,
QResizeEvent, QSurfaceFormat, qt_set_sequence_auto_mnemonic)
from PyQt5.QtWidgets import qApp, QMainWindow, QMessageBox, QSizePolicy
from vidcutter.videoconsole import ConsoleHandler, ConsoleWidget, VideoLogger
from vidcutter.videocutter import VideoCutter
from vidcutter.libs.singleapplication import SingleApplication
from vidcutter.libs.widgets import VCMessageBox
import vidcutter
import vidcutter.libs.mpv as mpv
if sys.platform == 'win32':
from vidcutter.libs.taskbarprogress import TaskbarProgress
# noinspection PyUnresolvedReferences
from PyQt5.QtWinExtras import QWinTaskbarButton
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
class MainWindow(QMainWindow):
EXIT_CODE_REBOOT = 666
TEMP_PROJECT_FILE = 'vidcutter_reboot.vcp'
WORKING_FOLDER = os.path.join(QDir.tempPath(), 'vidcutter')
def __init__(self):
super(MainWindow, self).__init__()
self.video, self.resizeTimer = '', 0
self.parse_cmdline()
self.init_settings()
self.init_logger()
self.init_scale()
self.init_cutter()
self.setWindowTitle(qApp.applicationName())
self.setContentsMargins(0, 0, 0, 0)
self.statusBar().showMessage('Ready')
self.statusBar().setStyleSheet('border: none; padding: 0; margin: 0;')
self.setAcceptDrops(True)
self.show()
if sys.platform == 'win32' and TaskbarProgress.isValidWinVer():
self.win_taskbar_button = QWinTaskbarButton(self)
self.win_taskbar_button.setWindow(self.windowHandle())
self.win_taskbar_button.progress().setVisible(True)
self.win_taskbar_button.progress().setValue(0)
self.console.setGeometry(int(self.x() - (self.width() / 2)), self.y() + int(self.height() / 3), 750, 300)
if not self.video and os.path.isfile(os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)):
self.video = os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)
if self.video:
self.file_opener(self.video)
def init_scale(self) -> None:
screen_size = qApp.desktop().availableGeometry(-1)
self.scale = 'LOW' if screen_size.width() <= 1024 else 'NORMAL'
self.setMinimumSize(self.get_size(self.scale))
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
@pyqtSlot(str)
def file_opener(self, filename: str) -> None:
try:
if QFileInfo(filename).suffix() == 'vcp':
self.cutter.openProject(project_file=filename)
if filename == os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE):
os.remove(os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE))
else:
self.cutter.loadMedia(filename)
except (FileNotFoundError, PermissionError):
QMessageBox.critical(self, 'Error loading file', sys.exc_info()[0])
logging.exception('Error loading file')
qApp.restoreOverrideCursor()
self.restart()
@staticmethod
def get_size(mode: str='NORMAL') -> QSize:
modes = {
'LOW': QSize(800, 425),
'NORMAL': QSize(930, 680),
'HIGH': QSize(1850, 1300)
}
return modes[mode]
def init_logger(self) -> None:
try:
log_path = self.get_app_config_path()
except AttributeError:
if sys.platform == 'win32':
log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower())
elif sys.platform == 'darwin':
log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName().lower())
else:
log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower())
os.makedirs(log_path, exist_ok=True)
self.console = ConsoleWidget(self)
self.consoleLogger = ConsoleHandler(self.console)
handlers = [logging.handlers.RotatingFileHandler(os.path.join(log_path, '%s.log'
% qApp.applicationName().lower()),
maxBytes=1000000, backupCount=1),
self.consoleLogger]
if self.parser.isSet(self.debug_option) or self.verboseLogs:
# noinspection PyTypeChecker
handlers.append(logging.StreamHandler())
logging.setLoggerClass(VideoLogger)
logging.basicConfig(handlers=handlers,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M',
level=logging.INFO)
logging.captureWarnings(capture=True)
sys.excepthook = MainWindow.log_uncaught_exceptions
if os.getenv('DEBUG', False):
logging.info('appconfig folder: {}'.format(log_path))
def init_settings(self) -> None:
try:
settings_path = self.get_a | pp_confi | g_path()
except AttributeError:
if sys.platform == 'win32':
settings_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower())
elif sys.platform == 'darwin':
settings_path = os.path.join(QDir.homePath(), 'Library', 'Preferences',
qApp.applicationName().lower())
else:
settings_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower())
os.makedirs(settings_path, exist_ok=True)
settings_file = '{}.ini'.format(qApp.applicationName().lower())
self.settings = QSettings(os.path.join(settings_path, settings_file), QSettings.IniFormat)
if self.settings.value('geometry') is not None:
self.restoreGeometry(self.settings.value('geometry'))
if self.settings.value('windowState') is not None:
self.restoreState(self.settings.value('windowState'))
self.theme = self.settings.value('theme', 'light', type=str)
self.startupvol = self.settings.value('volume', 100, type=int)
self.verboseLogs = self.settings.value('verboseLogs', 'off', type=str) in {'on', 'true'}
@staticmethod
def log_uncaught_exceptions(cls, exc, tb) -> None:
logging.critical(''.join(traceback.format_tb(tb)))
logging.critical('{0}: {1}'.format(cls, exc))
def parse_cmdline(self) -> None:
self.parser = QCommandLineParser()
self.parser.setApplicationDescription('\nVidCutter - the simplest + fastest media cutter & joiner')
self.parser.addPositionalArgument('video', 'Preload video file', '[video]' |
fharenheit/template-spark-app | src/main/python/ml/sql_transformer.py | Python | apache-2.0 | 1,382 | 0 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import SQLTransformer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("SQLTransformerExample")\
.getOrCreate()
# $example on$
df = spark.createDa | taFrame([
(0, 1.0, 3.0),
(2, 2.0, 5.0)
], ["id", "v1", "v2"])
sqlTrans = SQLTransformer(
statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
sqlTrans.transform(df).show()
# $example off$
spark.stop()
|
chrisdjscott/Atoman | atoman/gui/filterSettings/cropSphereSettingsDialog.py | Python | mit | 4,143 | 0.006034 |
"""
Contains GUI forms for the crop sphere filter.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import functools
from PySide2 import QtCore, QtWidgets
from . import base
from ...filtering.filters import cropSphereFilter
################################################################################
class CropSphereSettingsDialog(base.GenericSettingsDialog):
"""
Crop sphere filter settings form.
"""
def __init__(self, mainWindow, title, parent=None):
super(CropSphereSettingsDialog, self).__init__(title, parent, "Crop sphere")
self.setMinimumWidth(300)
self._settings = cropSphereFilter.CropSphereFilterSettings()
self.xCentreSpinBox = QtWidgets.QDoubleSpinBox()
self.xCentreSpinBox.setSingleStep(0.01)
self.xCentreSpinBox.setMinimum(-9999.0)
self.xCentreSpinBox.setMaximum( 9999.0)
self.xCentreSpinBox.setValue(self._settings.getSetting("xCentre"))
self.xCentreSpinBox.setToolTip("Centre of crop region (x)")
self.xCentreSpinBox.valueChanged.connect(functools.partial(self._settings.updateSetting, "xCentre"))
self.yCentreSpinBox = QtWidgets.QDoubleSpinBox()
self.yCentreSpinBox.setSingleStep(0.01)
self.yCentreSpinBox.setMinimum(-9999.0)
self.yCentreSpinBox.setMaximum( 9999.0)
self.yCentreSpinBox.setValue(self._settings.getSetting("yCentre"))
self.yCentreSpinBox.setToolTip("Centre of crop region ( | y)")
self.yCentreSpinBox.valueChanged.connect(functools.partial(self._settings.updateSetting, "yCentre"))
self.zCentreSpinBox = QtWidgets.QDoubleSpinBox()
self.zCentreSpinBox.setSingleStep(0.01)
self.zCentreSpinBox.setMinimum(-9999.0)
self.zCentreSpinBox.setMaximum( 9999.0)
self.zCentreSpinBox.setValue(self._settings.getSetting("zCentre"))
self.zCentreSpinBox.setToolTip("Centre of | crop region (z)")
self.zCentreSpinBox.valueChanged.connect(functools.partial(self._settings.updateSetting, "zCentre"))
self.contentLayout.addRow("Centre (x)", self.xCentreSpinBox)
self.contentLayout.addRow("Centre (y)", self.yCentreSpinBox)
self.contentLayout.addRow("Centre (z)", self.zCentreSpinBox)
# radius
self.radiusSpinBox = QtWidgets.QDoubleSpinBox()
self.radiusSpinBox.setSingleStep(1)
self.radiusSpinBox.setMinimum(0.0)
self.radiusSpinBox.setMaximum(9999.0)
self.radiusSpinBox.setValue(self._settings.getSetting("radius"))
self.radiusSpinBox.setToolTip("Radius of sphere")
self.radiusSpinBox.valueChanged.connect(functools.partial(self._settings.updateSetting, "radius"))
self.contentLayout.addRow("Radius", self.radiusSpinBox)
# invert selection
self.invertCheckBox = QtWidgets.QCheckBox()
self.invertCheckBox.setChecked(self._settings.getSetting("invertSelection"))
self.invertCheckBox.setToolTip("Invert selection: Select to view atoms within the sphere")
self.invertCheckBox.stateChanged.connect(self.invertChanged)
self.contentLayout.addRow("Invert selection", self.invertCheckBox)
# set to centre
self.setToLatticeButton = QtWidgets.QPushButton('Set to lattice centre')
self.setToLatticeButton.setAutoDefault(0)
self.setToLatticeButton.setToolTip('Set to lattice centre')
self.setToLatticeButton.clicked.connect(self.setToLattice)
self.contentLayout.addRow(self.setToLatticeButton)
def invertChanged(self, state):
"""Invert setting changed."""
invert = False if state == QtCore.Qt.Unchecked else True
self._settings.updateSetting("invertSelection", invert)
def setToLattice(self):
"""Set centre to lattice centre."""
self.xCentreSpinBox.setValue(self.pipelinePage.inputState.cellDims[0] / 2.0)
self.yCentreSpinBox.setValue(self.pipelinePage.inputState.cellDims[1] / 2.0)
self.zCentreSpinBox.setValue(self.pipelinePage.inputState.cellDims[2] / 2.0)
|
gnuworldman/verschemes | tests/test_pep440.py | Python | gpl-3.0 | 12,943 | 0.000232 | # -*- coding: utf-8 -*-
"""PEP 440 verschemes tests"""
import unittest
from verschemes.pep440 import Pep440Version
class Pep440VersionTestCase(unittest.TestCase):
def test_one_segment(self):
version = Pep440Version(release1=4)
self.assertEqual("4", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(4, version.release1)
self.assertEqual(0, version.release2)
self.assertEqual(0, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(None, version.pre_release)
self.assertEqual(None, version.post_release)
self.assertEqual(None, version.development)
def test_two_segments(self):
version = Pep440Version(release1=8, release2=2)
self.assertEqual("8.2", str(version))
self.assertEqual(8, version.release1)
self.assertEqual(2, version.release2)
def test_three_segments(self):
version = Pep440Version(None, 3, 11, 8)
self.assertEqual("3.11.8", str(version))
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
def test_four_segments(self):
version = Pep440Version(release1=7, release2=1, release3=26,
release4=5)
self.assertEqual("7.1.26.5", str(version))
self.assertEqual(7, version.release1)
self.assertEqual(1, version.release2)
self.assertEqual(26, version.release3)
self.assertEqual(5, version.release4)
def test_epoch(self):
version = Pep440Version(4, 3, 11, 8)
self.assertEqual("4!3.11.8", str(version))
self.assertEqual(4, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
def test_pre_release(self):
version = Pep440Version(None, 3, 11, 8, pre_release=('a', 2))
self.assertEqual("3.11.8a2", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(None, version.post_release)
self.assertEqual(None, version.development)
def test_post_release(self):
version = Pep440Version(None, 3, 11, 8, pre_release=('a', 2))
self.assertEqual("3.11.8a2", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(None, version.post_release)
self.assertEqual(None, version.development)
def test_pre_and_post_release(self):
version = Pep440Version(2, 3, 11, 8, pre_release=('a', 2),
post_release=4)
self.assertEqual("2!3.11.8a2.post4", str(version))
self.assertEqual(2, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(4, version.post_release)
self.assertEqual(None, version.development)
def test_development(self):
version = Pep440Version(release1=2112, development=90125)
self.assertEqual("2112.dev90125", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(2112, version.release1)
self.assertEqual(0, version.release2)
self.assertEqual(0, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(None, version.pre_release)
self.assertEqual(None, version.post_release)
self.assertEqual(90125, version.development)
def test_pre_release_and_development(self):
version = Pep440Version(None, 3, 11, 8, pre_release=('a', 2),
development=36)
self.assertEqual("3.11.8a2.dev36", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(None, version.post_release)
| self.assertEqual(36, version.development)
def test_pre_and_post_release_and_development(self):
version = Pep440Version(1, 3, 11, 8, pre_release=('a', 2),
post_release=5, development=74)
self.assertEqual("1!3 | .11.8a2.post5.dev74", str(version))
self.assertEqual(1, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(5, version.post_release)
self.assertEqual(74, version.development)
def test_development_only(self):
version = Pep440Version(development=666)
self.assertEqual("0.dev666", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(0, version.release1)
self.assertEqual(0, version.release2)
self.assertEqual(0, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(None, version.pre_release)
self.assertEqual(None, version.post_release)
self.assertEqual(666, version.development)
def test_init_string(self):
version = Pep440Version("6.48.2")
self.assertEqual(0, version.epoch)
self.assertEqual(6, version.release1)
self.assertEqual(48, version.release2)
self.assertEqual(2, version.release3)
self.assertEqual("6.48.2", str(version))
def test_init_string_epoch(self):
version = Pep440Version("1!6.48.2")
self.assertEqual(1, version.epoch)
self.assertEqual(6, version.release1)
self.assertEqual(48, version.release2)
self.assertEqual(2, version.release3)
self.assertEqual("1!6.48.2", str(version))
def test_init_string_alpha_separator(self):
indicators = ("a", "A", "alpha", "alpHA", "Alpha", "AlPHa", "ALPHA")
expected = "1.2a3"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}3".format(indicator))))
self.assertEqual(expected,
st |
saifuddin779/data-collector | indeed/user_agents.py | Python | mit | 3,965 | 0.017402 | user_agents = [
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37 | .0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 ( | Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.10 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.10 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.10 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/530.4 (KHTML, like Gecko) Chrome/2.0.172.0 Safari/530.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; eu) AppleWebKit/530.4 (KHTML, like Gecko) Chrome/2.0.172.0 Safari/530.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/530.4 (KHTML, like Gecko) Chrome/2.0.172.0 Safari/530.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.0 Safari/530.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/528.11 (KHTML, like Gecko) Chrome/2.0.157.0 Safari/528.11',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/528.9 (KHTML, like Gecko) Chrome/2.0.157.0 Safari/528.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/528.11 (KHTML, like Gecko) Chrome/2.0.157.0 Safari/528.11',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/528.10 (KHTML, like Gecko) Chrome/2.0.157.0 Safari/528.10',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/528.8 (KHTML, like Gecko) Chrome/2.0.156.1 Safari/528.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/528.8 (KHTML, like Gecko) Chrome/2.0.156.1 Safari/528.8',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/528.8 (KHTML, like Gecko) Chrome/2.0.156.1 Safari/528.8',
]
|
spreg-git/pysal | pysal/esda/mapclassify.py | Python | bsd-3-clause | 50,683 | 0.000572 | """
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = ['Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval',
'Fisher_Jenks', 'Fisher_Jenks_Sampled', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers']
from pysal.common import *
K = 5 # default number of classes in any map scheme with this as an argument
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
implicit : array
(n,1), quantile values
Examples
--------
>>> x = np.arange(1000)
>>> quantile(x)
array([ 249.75, 499.5 , 749.25, 999. ])
>>> quantile(x, k = 3)
array([ 333., 666., 999.])
>>>
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> quantile(y)
array([ 1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
return np.unique(q)
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = range(2, 8)
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and print a warning if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
print 'warning: value not in bin: ', val
print 'bins: ', bins
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts: int
number of elements of x falling in each bin
Examples
--------
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-sys.maxint]
left.extend(bins[0:-1])
right = bins
cuts = zip(left, right)
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds)
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
import pysal
np.random.seed(10)
dat = pysal.open(pysal.examples.get_path('calempdensity.csv'))
cal = np.array([record[-1] for record in dat])
return cal
def natural_breaks(values, k=5, itmax=100):
"""
natural breaks helper function
"""
values = np.array(values)
n = len(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
print 'Warning: Not enough unique values in array to form k classes'
print "Warning: setting k to %d" % uvk
k = uvk
sids = np.random.permutation(range(len(uv)))[0:k]
seeds = uv[sids]
seeds.sort()
diffs = abs(np.matrix([values - seed for seed in seeds]))
c0 = diffs.argmin(axis=0)
c0 = np.array(c0)[0]
solving = True
solved = False
rk = range(k)
it = 0
while solving:
# get centroids of clusters
seeds = [np.median(values[c0 == c]) for c in rk]
seeds.sort()
# for each value find closest centroid
diffs = abs(np.matrix([values - seed for seed in seeds]))
# assign value to that centroid
c1 = diffs.argmin(axis=0)
c1 = np.array(c1)[0]
#compare new classids to previous
d = abs(c1 - c0)
if d.sum() == 0:
solving = False
solved = True
else:
c0 = c1
it += 1
if it == itmax:
solving = False
class_ids = c1
cuts = [max(values[c1 == c]) for c in rk]
return sids, seeds, diffs, class_ids, solved, it, cuts
def _fisher_jenks_mean | s(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
The original Python | code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
mat1 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat2.append(temp)
for i in range(1, classes + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(values) + 1):
mat2[j][i] = float('inf')
v |
jmvrbanac/symantecssl | tests/unit/test_auth.py | Python | apache-2.0 | 926 | 0 | from __future__ import absolute_import, division, print_function
import pretend
import pytest
from symantecssl.auth import SymantecAuth
@pytest.mark.parametrize(
("body", "expected"),
[
("", {}),
("foo=bar", {"foo": ["bar"]}),
("foo=ba | r&wat=yes", {"foo": ["bar"], "wat": ["yes"]}),
],
)
def test_auth_on_post(body, expected):
request = pretend.stub(
method="POST",
body=body,
prepare_b | ody=pretend.call_recorder(lambda data, files: None)
)
auth = SymantecAuth("testuser", "p@ssw0rd")
expected = expected.copy()
expected.update({"username": ["testuser"], "password": ["p@ssw0rd"]})
assert auth(request) is request
assert request.prepare_body.calls == [pretend.call(expected, None)]
def test_auth_on_get():
request = pretend.stub(method="GET")
auth = SymantecAuth("testuser", "p@ssw0rd")
assert auth(request) is request
|
kaka19ace/kkutil | kkutil/config/__init__.py | Python | mit | 77 | 0 | #!/u | sr/bin/env python
# -*- coding: utf-8 -*-
#
fr | om .config import Config
|
daniestevez/gr-satellites | python/hier/rms_agc.py | Python | gpl-3.0 | 2,393 | 0 | # -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: RMS AGC
# Author: Daniel Estevez
# Description: AGC using RMS
# GNU Radio version: 3.8.0.0
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
class rms_agc(gr.hier_block2):
def __init__(self, alpha=1e-2, reference=0.5):
gr.hier_block2.__init__(
self,
'RMS AGC',
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
##################################################
# Parameters
################################################## |
self.alpha = alpha
self.reference = reference
##################################################
# Blocks
##################################################
self.blocks_rms_xx_0 = blocks.rms_cf(alpha)
self.blocks_multiply_const_vxx_0 = (
blocks.multiply_const_ff(1.0/reference))
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_divide_xx_0 = blocks.divide_cc(1)
self.blocks_add_const_ | vxx_0 = blocks.add_const_ff(1e-20)
##################################################
# Connections
##################################################
self.connect(
(self.blocks_add_const_vxx_0, 0),
(self.blocks_float_to_complex_0, 0))
self.connect((self.blocks_divide_xx_0, 0), (self, 0))
self.connect(
(self.blocks_float_to_complex_0, 0),
(self.blocks_divide_xx_0, 1))
self.connect(
(self.blocks_multiply_const_vxx_0, 0),
(self.blocks_add_const_vxx_0, 0))
self.connect(
(self.blocks_rms_xx_0, 0),
(self.blocks_multiply_const_vxx_0, 0))
self.connect((self, 0), (self.blocks_divide_xx_0, 0))
self.connect((self, 0), (self.blocks_rms_xx_0, 0))
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
self.blocks_rms_xx_0.set_alpha(self.alpha)
def get_reference(self):
return self.reference
def set_reference(self, reference):
self.reference = reference
self.blocks_multiply_const_vxx_0.set_k(1.0/self.reference)
|
CivicKnowledge/metaeditor | editor/management/commands/create_roots.py | Python | mit | 1,850 | 0.001622 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from editor.models import Source, Format, Category
class Command(BaseCommand):
help = 'Adds root category for Source, Format and Category models.'
def _change_root(self, model_class, verbosity=1):
ROOT_NAME = '!ROOT!'
try:
source_root = model_class.objects.get(name=ROOT_NAME)
if verbosity > 0:
self.stdout.write(
'Root node for %s model already exists. Using it.' % model_class)
except mod | el_class.DoesNotExist:
if verbosity > 0:
self.stdout.write(
'Root node for %s model does not exist. Creati | ng...' % model_class)
source_root = model_class(name=ROOT_NAME)
source_root.save()
if verbosity > 0:
self.stdout.write(
'Move existing roots to children of the global root.')
# move existing source roots to children of just created root
for node in model_class.objects.filter(parent__isnull=True).exclude(id=source_root.id):
assert node.parent is None
node.parent = source_root
node.save()
model_class.objects.rebuild()
# validate new root was not changed
qs = model_class.objects.filter(parent__isnull=True)
assert qs.count(), 1
assert qs[0].name == ROOT_NAME
def handle(self, *args, **options):
verbosity = options.get('verbosity', 1)
if verbosity > 0:
self.stdout.write('Starting to create global root...')
self._change_root(Category, verbosity=verbosity)
self._change_root(Format, verbosity=verbosity)
self._change_root(Source, verbosity=verbosity)
if verbosity > 0:
self.stdout.write('Done.')
|
JBonsink/GSOC-2013 | experiments/ManualTopoBTExperiment.py | Python | gpl-3.0 | 6,305 | 0.005234 | """
Manual Topology Experiment with Background Traffic
"""
from __future__ import print_function, division
import settings
from core.ns3.NS3Config import BackgroundTrafficConfig
from core.ns3.Topology import ManualTopologyNet
from experiments import experiment_factory
from core.configure import gen_anomaly_dot
import ns3
import copy
ManualTopoExperiment = experiment_factory('ManualTopoExperiment', BaseClass)
zeros = lambda s:[[0 for i in xrange(s[1])] for j in xrange(s[0])]
def get_inet_adj_mat(fname):
fid = open(fname, 'r')
i = -1
while True:
i += 1
line = fid.readline()
if not line: break
if i == 0:
totnode, totlink = [int(s.strip()) for s in | line.rsplit()]
adj_mat = zeros([totnode, totnode])
continue
if i <= totnode: # ignore the position information
continue
_from, _to, _lineBuffer = [s.strip() for s in line.rsplit()]
adj_mat[int(_from)][int(_to)] = 1
fid.close()
return adj_mat
class ManualTopoBTExperiment(ManualTopoExperiment):
""" This is a extension of Manual topology experiment it
add background traffic to the network.
"""
# routing p | rotocol list, 'type':priority
routing_helper_list = {
'static':0,
'nix':5,
# 'olsr':10,
}
def initparser(self, parser):
ManualTopoExperiment.initparser(self, parser)
parser.set_defaults(back_traf="net_config/back_traf.py",
)
parser.add_option('--back_traf', dest="back_traf",
help='confgiuration files for back ground traffic',
)
parser.add_option('--dot_file', default='net_config/ManualTopoBTConf.dot',
help='the configured dot file')
@staticmethod
def load_para(f_name, encap=None, **kwargs):
"""load parameters.
- **f_name**: is the path of the configuration file
- **encap**: is the additional operation done to the data, for example,
the default value encap=Namespace is to change parameters from dict
to Namespace class.
- **kwargs**: contains some additional parameters
"""
s = kwargs
execfile(f_name, s)
return s if encap is None else encap(s)
def transform_to_net_desc(self, net_settings):
"""add topology from topoloy file"""
net_desc = copy.copy(net_settings) # shallow copy
net_desc['node_type'] = 'NNode'
net_desc['node_para'] = {}
net_desc['topo'] = get_inet_adj_mat(settings.ROOT + '/' + self.options.topology_file)
return net_desc
def load_exper_settings(self, ns):
""" load experiment related settings
**server_id_set**:
**botmaster_id_set**:
**client_id_set**:
**SERVER_ADDR**:
**NETWORK_BASE**:
**IP_MASK**:
"""
from util import CIDR_to_subnet_mask
# botnet related configuration
self.server_id_set = ns['server_id_set']
self.botmaster_id_set = ns['botmaster_id_set']
self.client_id_set = ns['client_id_set']
# parse the server address
if len(ns['server_addr']) == 0:
self.SERVER_ADDR = ''
self.NETWORK_BASE = ''
self.IP_MASK = ''
else:
self.SERVER_ADDR, self.NETWORK_BASE, self.IP_MASK = CIDR_to_subnet_mask(ns['server_addr'][0]);
def gen_back_traf_dot(self, net_settings):
"""generate background traffic DOT file
*input*: is the a dict of net_settings
*output*: DOT_FILE_PATH
"""
# get back_traf parameter
back_traf = self.load_para(
f_name = settings.ROOT + '/' + self.options.back_traf,
sim_t = self.options.simtime,
)
# call the SADIT/Configure module to generate the dot file specifying the background
# traffic pattern.
dot_file = settings.ROOT + '/' + self.options.dot_file
gen_anomaly_dot(
back_traf['ANO_LIST'],
self.transform_to_net_desc(net_settings),
back_traf['NORM_DESC'],
dot_file
)
return dot_file
def setup(self):
BaseClass.setup(self)
net_settings = self.load_para(f_name = settings.ROOT + '/' + self.options.net_settings)
self.load_exper_settings(net_settings)
# Generate dot file that describe the background traffic.
dot_file = self.gen_back_traf_dot(net_settings)
self.net = ManualTopologyNet(
# os.path.abspath(self.options.topology_file),
settings.ROOT + '/' + self.options.topology_file,
self.options.topology_type,
self.NodeCreator,
net_settings,
)
bg_cofig = BackgroundTrafficConfig(dot_file, self.net)
bg_cofig.config_onoff_app()
self.net.set_trace()
self._install_cmds(srv_addr = self.SERVER_ADDR)
self.print_srv_addr()
self._set_server_info()
self.start_nodes()
def setup(self):
BaseClass.setup(self)
# net_settings = self.load_net_settings()
net_settings = self.load_para(
f_name = settings.ROOT + '/' + self.options.net_settings,
)
self.load_exper_settings(net_settings)
# Generate dot file that describe the background traffic.
dot_file = self.gen_back_traf_dot(net_settings)
ns3.LogComponentEnable("OnOffApplication", ns3.LOG_LEVEL_INFO)
ns3.LogComponentEnable("V4PingApplication", ns3.LOG_LEVEL_INFO)
self.load_exper_settings(net_settings)
self.net = ManualTopologyNet(
# os.path.abspath(self.options.topology_file),
settings.ROOT + '/' + self.options.topology_file,
self.options.topology_type,
self.NodeCreator,
net_settings,
)
bg_cofig = BackgroundTrafficConfig(dot_file, self.net)
bg_cofig.config_onoff_app()
self.net.set_trace()
self._install_cmds(srv_addr = self.SERVER_ADDR)
self.print_srv_addr()
self._set_server_info()
self.start_nodes()
|
pedrotari7/advent_of_code | py/2017/1B.py | Python | mit | 142 | 0.021127 | with open(' | 1.in', 'r') as f:
a = f.read()
step = len(a) / 2
print sum([int(d) for i,d in enumerate(a) if d == a[(i+step)%len | (a)]]) |
askalbania/piernik | python/plot_tsl.py | Python | gpl-3.0 | 1,065 | 0.018779 | #!/usr/bin/python
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
import argparse
remove_comments = re.compile("(?!\#)", re.VERBOSE)
parser = argparse.ArgumentParser()
parser.add_argument("-f", nargs=1, default=None)
parser.add_argument("files", nargs='*')
args = parser.parse_args()
if len(args.files) < 1:
parser.error("I need at least one | tsl file")
data = []
for fn in args.files:
f = open(fn,"rb")
tab = [line.strip() for line in f.readlines()]
f.close()
header = np.array(tab[0][1:].split())
if args.f == None:
print ("There are following fields available in %s" % fn)
print header
else:
field = args.f[0]
fno = np.where(header == field)[0][0]
tab = np.array([
map(np.float64, line.split()) for line in filter(remove_comments.match, tab)
])
data.append(tab)
fig = plt.figur | e()
ax = fig.add_subplot(111)
for i, fn in enumerate(data):
ax.plot(fn[:, 1], fn[:, fno], label=args.files[i])
ax.legend()
plt.ylabel(field)
plt.xlabel(header[1])
plt.draw()
plt.show()
|
ngageoint/gamification-server | gamification/core/migrations/0007_auto__add_field_project_url.py | Python | mit | 10,068 | 0.007847 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.url'
db.add_column(u'core_project', 'url',
self.gf('django.db.models.fields.TextField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.url'
db.delete_column(u'core_project', 'url')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'badges.badge': {
'Meta': {'object_name': 'Badge'},
'icon': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'badges.projectbadge': {
'Meta': {'object_name': 'ProjectBadge'},
'awardLevel': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badges.Badge']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multipleAwards': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Project']"}),
'tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '400', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': u"orm['badges.ProjectBadgeToUser']", 'to': u"orm['auth.User']"})
},
u'badges.projectbadgetouser': {
'Meta': {'object_name': 'ProjectBadgeToUser'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'projectbadge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badges.ProjectBadge']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.points': {
'Meta': {'object_name': 'Points'},
'date_awarded': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'projectbadge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badges.ProjectBadge']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['au | th.User']"}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'core.project': {
'Meta': {'ordering': "('-created_at',)", 'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allowed_api_hosts': | ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'background_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'properties': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'query_token': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'supervisors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'supervisors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Team']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.