code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import errno
import logging
import os
import pickle
import shutil
import sys
import tempfile
import textwrap
import six
from ipalib.install import certmonger, sysrestore
from ipapython import ipautil
from ipapython.ipautil import (
format_netloc, ipa_generate_password, run, user_input)
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipalib import api, errors, x509
from ipalib.constants import DOMAIN_LEVEL_0
from ipalib.util import (
validate_domain_name,
no_matching_interface_for_ip_address_warning,
)
import ipaclient.install.ntpconf
from ipaserver.install import (
adtrust, bindinstance, ca, dns, dsinstance,
httpinstance, installutils, kra, krbinstance,
ntpinstance, otpdinstance, custodiainstance, replication, service,
sysupgrade)
from ipaserver.install.installutils import (
IPA_MODULES, BadHostError, get_fqdn, get_server_ip_address,
is_ipa_configured, load_pkcs12, read_password, verify_fqdn,
update_hosts_file)
if six.PY3:
unicode = str
try:
from ipaserver.install import adtrustinstance
_server_trust_ad_installed = True
except ImportError:
_server_trust_ad_installed = False
NoneType = type(None)
logger = logging.getLogger(__name__)
SYSRESTORE_DIR_PATH = paths.SYSRESTORE
def validate_dm_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
# TODO: Check https://fedorahosted.org/389/ticket/47849
# Actual behavior of setup-ds.pl is that it does not accept white
# space characters in password when called interactively but does when
# provided such password in INF file. But it ignores leading and trailing
# white spaces in INF file.
# Disallow leading/trailing whaitespaces
if password.strip() != password:
raise ValueError('Password must not start or end with whitespace.')
def validate_admin_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
def read_cache(dm_password):
"""
Returns a dict of cached answers or empty dict if no cache file exists.
"""
if not os.path.isfile(paths.ROOT_IPA_CACHE):
return {}
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
installutils.decrypt_file(paths.ROOT_IPA_CACHE,
fname,
dm_password,
top_dir)
except Exception as e:
shutil.rmtree(top_dir)
raise Exception("Decryption of answer cache in %s failed, please "
"check your password." % paths.ROOT_IPA_CACHE)
try:
with open(fname, 'rb') as f:
try:
optdict = pickle.load(f)
except Exception as e:
raise Exception("Parse error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
except IOError as e:
raise Exception("Read error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
finally:
shutil.rmtree(top_dir)
# These are the only ones that may be overridden
try:
del optdict['external_cert_files']
except KeyError:
pass
return optdict
def write_cache(options):
"""
Takes a dict as input and writes a cached file of answers
"""
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
with open(fname, 'wb') as f:
pickle.dump(options, f)
installutils.encrypt_file(fname,
paths.ROOT_IPA_CACHE,
options['dm_password'],
top_dir)
except IOError as e:
raise Exception("Unable to cache command-line options %s" % str(e))
finally:
shutil.rmtree(top_dir)
def read_host_name(host_default, no_host_dns=False):
print("Enter the fully qualified domain name of the computer")
print("on which you're setting up server software. Using the form")
print("<hostname>.<domainname>")
print("Example: master.example.com.")
print("")
print("")
if host_default == "":
host_default = "master.example.com"
host_name = user_input("Server host name", host_default, allow_empty=False)
print("")
verify_fqdn(host_name, no_host_dns)
return host_name
def read_domain_name(domain_name, unattended):
print("The domain name has been determined based on the host name.")
print("")
if not unattended:
domain_name = str(user_input("Please confirm the domain name",
domain_name))
print("")
return domain_name
def read_realm_name(domain_name, unattended):
print("The kerberos protocol requires a Realm name to be defined.")
print("This is typically the domain name converted to uppercase.")
print("")
if unattended:
return domain_name.upper()
realm_name = str(user_input("Please provide a realm name",
domain_name.upper()))
upper_dom = realm_name.upper()
if upper_dom != realm_name:
print("An upper-case realm name is required.")
if not user_input("Do you want to use " + upper_dom +
" as realm name?", True):
raise ScriptError(
"An upper-case realm name is required. Unable to continue.")
else:
realm_name = upper_dom
print("")
return realm_name
def read_dm_password():
print("Certain directory server operations require an administrative user.")
print("This user is referred to as the Directory Manager and has full "
"access")
print("to the Directory for system management tasks and will be added to "
"the")
print("instance of directory server created for IPA.")
print("The password must be at least 8 characters long.")
print("")
# TODO: provide the option of generating a random password
dm_password = read_password("Directory Manager",
validator=validate_dm_password)
return dm_password
def read_admin_password():
print("The IPA server requires an administrative user, named 'admin'.")
print("This user is a regular system account used for IPA server "
"administration.")
print("")
# TODO: provide the option of generating a random password
admin_password = read_password("IPA admin",
validator=validate_admin_password)
return admin_password
def check_dirsrv(unattended):
(ds_unsecure, ds_secure) = dsinstance.check_ports()
if not ds_unsecure or not ds_secure:
msg = ("IPA requires ports 389 and 636 for the Directory Server.\n"
"These are currently in use:\n")
if not ds_unsecure:
msg += "\t389\n"
if not ds_secure:
msg += "\t636\n"
raise ScriptError(msg)
def common_cleanup(func):
def decorated(installer):
success = False
try:
func(installer)
success = True
except KeyboardInterrupt:
ds = installer._ds
print("\nCleaning up...")
if ds:
print("Removing configuration for %s instance" % ds.serverid)
ds.stop()
if ds.serverid:
try:
dsinstance.remove_ds_instance(ds.serverid)
except ipautil.CalledProcessError:
logger.error("Failed to remove DS instance. You "
"may need to remove instance data "
"manually")
raise ScriptError()
finally:
if not success and installer._installation_cleanup:
# Do a cautious clean up as we don't know what failed and
# what is the state of the environment
try:
installer._fstore.restore_file(paths.HOSTS)
except Exception:
pass
return decorated
def remove_master_from_managed_topology(api_instance, options):
try:
# we may force the removal
server_del_options = dict(
force=True,
ignore_topology_disconnect=options.ignore_topology_disconnect,
ignore_last_of_role=options.ignore_last_of_role
)
replication.run_server_del_as_cli(
api_instance, api_instance.env.host, **server_del_options)
except errors.ServerRemovalError as e:
raise ScriptError(str(e))
except Exception as e:
# if the master was already deleted we will just get a warning
logger.warning("Failed to delete master: %s", e)
@common_cleanup
def install_check(installer):
options = installer
dirsrv_pkcs12_file = installer._dirsrv_pkcs12_file
http_pkcs12_file = installer._http_pkcs12_file
pkinit_pkcs12_file = installer._pkinit_pkcs12_file
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
external_cert_file = installer._external_cert_file
external_ca_file = installer._external_ca_file
http_ca_cert = installer._ca_cert
tasks.check_ipv6_stack_enabled()
tasks.check_selinux_status()
if options.master_password:
msg = ("WARNING:\noption '-P/--master-password' is deprecated. "
"KDC master password of sufficient strength is autogenerated "
"during IPA server installation and should not be set "
"manually.")
print(textwrap.fill(msg, width=79, replace_whitespace=False))
installer._installation_cleanup = True
print("\nThe log file for this installation can be found in "
"/var/log/ipaserver-install.log")
if (not options.external_ca and not options.external_cert_files and
is_ipa_configured()):
installer._installation_cleanup = False
raise ScriptError(
"IPA server is already configured on this system.\n"
"If you want to reinstall the IPA server, please uninstall "
"it first using 'ipa-server-install --uninstall'.")
client_fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
if client_fstore.has_files():
installer._installation_cleanup = False
raise ScriptError(
"IPA client is already configured on this system.\n"
"Please uninstall it before configuring the IPA server, "
"using 'ipa-client-install --uninstall'")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# This will override any settings passed in on the cmdline
if os.path.isfile(paths.ROOT_IPA_CACHE):
if options.dm_password is not None:
dm_password = options.dm_password
else:
dm_password = read_password("Directory Manager", confirm=False)
if dm_password is None:
raise ScriptError("Directory Manager password required")
try:
cache_vars = read_cache(dm_password)
options.__dict__.update(cache_vars)
if cache_vars.get('external_ca', False):
options.external_ca = False
options.interactive = False
except Exception as e:
raise ScriptError("Cannot process the cache file: %s" % str(e))
# We only set up the CA if the PKCS#12 options are not given.
if options.dirsrv_cert_files:
setup_ca = False
else:
setup_ca = True
options.setup_ca = setup_ca
if not setup_ca and options.ca_subject:
raise ScriptError(
"--ca-subject cannot be used with CA-less installation")
if not setup_ca and options.subject_base:
raise ScriptError(
"--subject-base cannot be used with CA-less installation")
if not setup_ca and options.setup_kra:
raise ScriptError(
"--setup-kra cannot be used with CA-less installation")
print("======================================="
"=======================================")
print("This program will set up the FreeIPA Server.")
print("")
print("This includes:")
if setup_ca:
print(" * Configure a stand-alone CA (dogtag) for certificate "
"management")
if not options.no_ntp:
print(" * Configure the Network Time Daemon (ntpd)")
print(" * Create and configure an instance of Directory Server")
print(" * Create and configure a Kerberos Key Distribution Center (KDC)")
print(" * Configure Apache (httpd)")
if options.setup_kra:
print(" * Configure KRA (dogtag) for secret management")
if options.setup_dns:
print(" * Configure DNS (bind)")
if options.setup_adtrust:
print(" * Configure Samba (smb) and winbind for managing AD trusts")
if not options.no_pkinit:
print(" * Configure the KDC to enable PKINIT")
if options.no_ntp:
print("")
print("Excluded by options:")
print(" * Configure the Network Time Daemon (ntpd)")
if installer.interactive:
print("")
print("To accept the default shown in brackets, press the Enter key.")
print("")
if not options.external_cert_files:
# Make sure the 389-ds ports are available
check_dirsrv(not installer.interactive)
if not options.no_ntp:
try:
ipaclient.install.ntpconf.check_timedate_services()
except ipaclient.install.ntpconf.NTPConflictingService as e:
print(("WARNING: conflicting time&date synchronization service '%s'"
" will be disabled" % e.conflicting_service))
print("in favor of ntpd")
print("")
except ipaclient.install.ntpconf.NTPConfigurationError:
pass
# Check to see if httpd is already configured to listen on 443
if httpinstance.httpd_443_configured():
raise ScriptError("Aborting installation")
if not options.setup_dns and installer.interactive:
if ipautil.user_input("Do you want to configure integrated DNS "
"(BIND)?", False):
options.setup_dns = True
print("")
# check bind packages are installed
if options.setup_dns:
# Don't require an external DNS to say who we are if we are
# setting up a local DNS server.
options.no_host_dns = True
# check the hostname is correctly configured, it must be as the kldap
# utilities just use the hostname as returned by getaddrinfo to set
# up some of the standard entries
if options.host_name:
host_default = options.host_name
else:
host_default = get_fqdn()
try:
if not installer.interactive or options.host_name:
verify_fqdn(host_default, options.no_host_dns)
host_name = host_default
else:
host_name = read_host_name(host_default, options.no_host_dns)
except BadHostError as e:
raise ScriptError(e)
host_name = host_name.lower()
logger.debug("will use host_name: %s\n", host_name)
if not options.domain_name:
domain_name = read_domain_name(host_name[host_name.find(".")+1:],
not installer.interactive)
logger.debug("read domain_name: %s\n", domain_name)
try:
validate_domain_name(domain_name)
except ValueError as e:
raise ScriptError("Invalid domain name: %s" % unicode(e))
else:
domain_name = options.domain_name
domain_name = domain_name.lower()
if not options.realm_name:
realm_name = read_realm_name(domain_name, not installer.interactive)
logger.debug("read realm_name: %s\n", realm_name)
try:
validate_domain_name(realm_name, entity="realm")
except ValueError as e:
raise ScriptError("Invalid realm name: {}".format(unicode(e)))
else:
realm_name = options.realm_name.upper()
if not options.subject_base:
options.subject_base = installutils.default_subject_base(realm_name)
if not options.ca_subject:
options.ca_subject = \
installutils.default_ca_subject_dn(options.subject_base)
if options.http_cert_files:
if options.http_pin is None:
options.http_pin = installutils.read_password(
"Enter Apache Server private key unlock",
confirm=False, validate=False, retry=False)
if options.http_pin is None:
raise ScriptError(
"Apache Server private key unlock password required")
http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12(
cert_files=options.http_cert_files,
key_password=options.http_pin,
key_nickname=options.http_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
http_pkcs12_info = (http_pkcs12_file.name, http_pin)
if options.dirsrv_cert_files:
if options.dirsrv_pin is None:
options.dirsrv_pin = read_password(
"Enter Directory Server private key unlock",
confirm=False, validate=False, retry=False)
if options.dirsrv_pin is None:
raise ScriptError(
"Directory Server private key unlock password required")
dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12(
cert_files=options.dirsrv_cert_files,
key_password=options.dirsrv_pin,
key_nickname=options.dirsrv_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, dirsrv_pin)
if options.pkinit_cert_files:
if options.pkinit_pin is None:
options.pkinit_pin = read_password(
"Enter Kerberos KDC private key unlock",
confirm=False, validate=False, retry=False)
if options.pkinit_pin is None:
raise ScriptError(
"Kerberos KDC private key unlock password required")
pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12(
cert_files=options.pkinit_cert_files,
key_password=options.pkinit_pin,
key_nickname=options.pkinit_cert_name,
ca_cert_files=options.ca_cert_files,
realm_name=realm_name)
pkinit_pkcs12_info = (pkinit_pkcs12_file.name, pkinit_pin)
if (options.http_cert_files and options.dirsrv_cert_files and
http_ca_cert != dirsrv_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and Directory Server SSL "
"certificate are not signed by the same CA certificate")
if (options.http_cert_files and
options.pkinit_cert_files and
http_ca_cert != pkinit_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and PKINIT KDC "
"certificate are not signed by the same CA certificate")
if not options.dm_password:
dm_password = read_dm_password()
if dm_password is None:
raise ScriptError("Directory Manager password required")
else:
dm_password = options.dm_password
if not options.master_password:
master_password = ipa_generate_password()
else:
master_password = options.master_password
if not options.admin_password:
admin_password = read_admin_password()
if admin_password is None:
raise ScriptError("IPA admin password required")
else:
admin_password = options.admin_password
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
# make sure host name specified by user is used instead of default
host=host_name,
)
if setup_ca:
# we have an IPA-integrated CA
cfg['ca_host'] = host_name
# Create the management framework config file and finalize api
target_fname = paths.IPA_DEFAULT_CONF
fd = open(target_fname, "w")
fd.write("[global]\n")
fd.write("host=%s\n" % host_name)
fd.write("basedn=%s\n" % ipautil.realm_to_suffix(realm_name))
fd.write("realm=%s\n" % realm_name)
fd.write("domain=%s\n" % domain_name)
fd.write("xmlrpc_uri=https://%s/ipa/xml\n" % format_netloc(host_name))
fd.write("ldap_uri=ldapi://%%2fvar%%2frun%%2fslapd-%s.socket\n" %
installutils.realm_to_serverid(realm_name))
if setup_ca:
fd.write("enable_ra=True\n")
fd.write("ra_plugin=dogtag\n")
fd.write("dogtag_version=10\n")
else:
fd.write("enable_ra=False\n")
fd.write("ra_plugin=none\n")
fd.write("mode=production\n")
fd.close()
# Must be readable for everyone
os.chmod(target_fname, 0o644)
api.bootstrap(**cfg)
api.finalize()
if setup_ca:
ca.install_check(False, None, options)
if options.setup_kra:
kra.install_check(api, None, options)
if options.setup_dns:
dns.install_check(False, api, False, options, host_name)
ip_addresses = dns.ip_addresses
else:
ip_addresses = get_server_ip_address(host_name,
not installer.interactive, False,
options.ip_addresses)
# check addresses here, dns module is doing own check
no_matching_interface_for_ip_address_warning(ip_addresses)
instance_name = "-".join(realm_name.split("."))
dirsrv = services.knownservices.dirsrv
if (options.external_cert_files
and dirsrv.is_installed(instance_name)
and not dirsrv.is_running(instance_name)):
logger.debug('Starting Directory Server')
services.knownservices.dirsrv.start(instance_name)
if options.setup_adtrust:
adtrust.install_check(False, options, api)
# installer needs to update hosts file when DNS subsystem will be
# installed or custom addresses are used
if options.ip_addresses or options.setup_dns:
installer._update_hosts_file = True
print()
print("The IPA Master Server will be configured with:")
print("Hostname: %s" % host_name)
print("IP address(es): %s" % ", ".join(str(ip) for ip in ip_addresses))
print("Domain name: %s" % domain_name)
print("Realm name: %s" % realm_name)
print()
if setup_ca:
ca.print_ca_configuration(options)
print()
if options.setup_dns:
print("BIND DNS server will be configured to serve IPA domain with:")
print("Forwarders: %s" % (
"No forwarders" if not options.forwarders
else ", ".join([str(ip) for ip in options.forwarders])
))
print('Forward policy: %s' % options.forward_policy)
print("Reverse zone(s): %s" % (
"No reverse zone" if options.no_reverse or not dns.reverse_zones
else ", ".join(str(rz) for rz in dns.reverse_zones)
))
print()
if not options.setup_adtrust:
# If domain name and realm does not match, IPA server will not be able
# to establish trust with Active Directory. Print big fat warning.
realm_not_matching_domain = (domain_name.upper() != realm_name)
if realm_not_matching_domain:
print("WARNING: Realm name does not match the domain name.\n"
"You will not be able to establish trusts with Active "
"Directory unless\nthe realm name of the IPA server matches "
"its domain name.\n\n")
if installer.interactive and not user_input(
"Continue to configure the system with these values?", False):
raise ScriptError("Installation aborted")
options.realm_name = realm_name
options.domain_name = domain_name
options.dm_password = dm_password
options.master_password = master_password
options.admin_password = admin_password
options._host_name_overridden = bool(options.host_name)
options.host_name = host_name
options.ip_addresses = ip_addresses
installer._fstore = fstore
installer._sstore = sstore
installer._dirsrv_pkcs12_file = dirsrv_pkcs12_file
installer._http_pkcs12_file = http_pkcs12_file
installer._pkinit_pkcs12_file = pkinit_pkcs12_file
installer._dirsrv_pkcs12_info = dirsrv_pkcs12_info
installer._http_pkcs12_info = http_pkcs12_info
installer._pkinit_pkcs12_info = pkinit_pkcs12_info
installer._external_cert_file = external_cert_file
installer._external_ca_file = external_ca_file
installer._ca_cert = http_ca_cert
@common_cleanup
def install(installer):
options = installer
fstore = installer._fstore
sstore = installer._sstore
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
http_ca_cert = installer._ca_cert
realm_name = options.realm_name
domain_name = options.domain_name
dm_password = options.dm_password
master_password = options.master_password
admin_password = options.admin_password
host_name = options.host_name
ip_addresses = options.ip_addresses
setup_ca = options.setup_ca
# Installation has started. No IPA sysrestore items are restored in case of
# failure to enable root cause investigation
installer._installation_cleanup = False
if installer.interactive:
print("")
print("The following operations may take some minutes to complete.")
print("Please wait until the prompt is returned.")
print("")
# set hostname (transient and static) if user instructed us to do so
if options._host_name_overridden:
tasks.backup_hostname(fstore, sstore)
tasks.set_hostname(host_name)
if installer._update_hosts_file:
update_hosts_file(ip_addresses, host_name, fstore)
# Create a directory server instance
if not options.external_cert_files:
# Configure ntpd
if not options.no_ntp:
ipaclient.install.ntpconf.force_ntpd(sstore)
ntp = ntpinstance.NTPInstance(fstore)
if not ntp.is_configured():
ntp.create_instance()
if options.dirsrv_cert_files:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password, dirsrv_pkcs12_info,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
else:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
ntpinstance.ntp_ldap_enable(host_name, ds.suffix, realm_name)
else:
api.Backend.ldap2.connect()
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel)
installer._ds = ds
ds.init_info(
realm_name, host_name, domain_name, dm_password,
options.subject_base, options.ca_subject, 1101, 1100, None,
setup_pkinit=not options.no_pkinit)
krb = krbinstance.KrbInstance(fstore)
if not options.external_cert_files:
krb.create_instance(realm_name, host_name, domain_name,
dm_password, master_password,
setup_pkinit=not options.no_pkinit,
pkcs12_info=pkinit_pkcs12_info,
subject_base=options.subject_base)
else:
krb.init_info(realm_name, host_name,
setup_pkinit=not options.no_pkinit,
subject_base=options.subject_base)
if setup_ca:
if not options.external_cert_files and options.external_ca:
# stage 1 of external CA installation
options.realm_name = realm_name
options.domain_name = domain_name
options.master_password = master_password
options.dm_password = dm_password
options.admin_password = admin_password
options.host_name = host_name
options.reverse_zones = dns.reverse_zones
cache_vars = {n: options.__dict__[n] for o, n in installer.knobs()
if n in options.__dict__}
write_cache(cache_vars)
ca.install_step_0(False, None, options)
else:
# Put the CA cert where other instances expect it
x509.write_certificate(http_ca_cert, paths.IPA_CA_CRT)
os.chmod(paths.IPA_CA_CRT, 0o444)
if not options.no_pkinit:
x509.write_certificate(http_ca_cert, paths.KDC_CA_BUNDLE_PEM)
else:
with open(paths.KDC_CA_BUNDLE_PEM, 'w'):
pass
os.chmod(paths.KDC_CA_BUNDLE_PEM, 0o444)
x509.write_certificate(http_ca_cert, paths.CA_BUNDLE_PEM)
os.chmod(paths.CA_BUNDLE_PEM, 0o444)
# we now need to enable ssl on the ds
ds.enable_ssl()
if setup_ca:
ca.install_step_1(False, None, options)
otpd = otpdinstance.OtpdInstance()
otpd.create_instance('OTPD', host_name,
ipautil.realm_to_suffix(realm_name))
custodia = custodiainstance.CustodiaInstance(host_name, realm_name)
custodia.create_instance()
# Create a HTTP instance
http = httpinstance.HTTPInstance(fstore)
if options.http_cert_files:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
pkcs12_info=http_pkcs12_info, subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
else:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
tasks.restore_context(paths.CACHE_IPA_SESSIONS)
ca.set_subject_base_in_config(options.subject_base)
# configure PKINIT now that all required services are in place
krb.enable_ssl()
# Apply any LDAP updates. Needs to be done after the configuration file
# is created. DS is restarted in the process.
service.print_msg("Applying LDAP updates")
ds.apply_updates()
# Restart krb after configurations have been changed
service.print_msg("Restarting the KDC")
krb.restart()
if options.setup_kra:
kra.install(api, None, options)
if options.setup_dns:
dns.install(False, False, options)
else:
# Create a BIND instance
bind = bindinstance.BindInstance(fstore)
bind.setup(host_name, ip_addresses, realm_name,
domain_name, (), 'first', (),
zonemgr=options.zonemgr,
no_dnssec_validation=options.no_dnssec_validation)
bind.create_file_with_system_records()
if options.setup_adtrust:
adtrust.install(False, options, fstore, api)
# Set the admin user kerberos password
ds.change_admin_password(admin_password)
# Call client install script
service.print_msg("Configuring client side components")
try:
args = [paths.IPA_CLIENT_INSTALL, "--on-master", "--unattended",
"--domain", domain_name, "--server", host_name,
"--realm", realm_name, "--hostname", host_name]
if options.no_dns_sshfp:
args.append("--no-dns-sshfp")
if options.ssh_trust_dns:
args.append("--ssh-trust-dns")
if options.no_ssh:
args.append("--no-ssh")
if options.no_sshd:
args.append("--no-sshd")
if options.mkhomedir:
args.append("--mkhomedir")
run(args, redirect_output=True)
print()
except Exception:
raise ScriptError("Configuration of client side components failed!")
# Everything installed properly, activate ipa service.
services.knownservices.ipa.enable()
print("======================================="
"=======================================")
print("Setup complete")
print("")
print("Next steps:")
print("\t1. You must make sure these network ports are open:")
print("\t\tTCP Ports:")
print("\t\t * 80, 443: HTTP/HTTPS")
print("\t\t * 389, 636: LDAP/LDAPS")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
print("\t\tUDP Ports:")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
if not options.no_ntp:
print("\t\t * 123: ntp")
print("")
print("\t2. You can now obtain a kerberos ticket using the command: "
"'kinit admin'")
print("\t This ticket will allow you to use the IPA tools (e.g., ipa "
"user-add)")
print("\t and the web user interface.")
if not services.knownservices.ntpd.is_running():
print("\t3. Kerberos requires time synchronization between clients")
print("\t and servers for correct operation. You should consider "
"enabling ntpd.")
print("")
if setup_ca:
print(("Be sure to back up the CA certificates stored in " +
paths.CACERT_P12))
print("These files are required to create replicas. The password for "
"these")
print("files is the Directory Manager password")
if os.path.isfile(paths.ROOT_IPA_CACHE):
os.remove(paths.ROOT_IPA_CACHE)
@common_cleanup
def uninstall_check(installer):
options = installer
tasks.check_selinux_status()
installer._installation_cleanup = False
if not is_ipa_configured():
print("WARNING:\nIPA server is not configured on this system. "
"If you want to install the\nIPA server, please install "
"it using 'ipa-server-install'.")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
)
# We will need at least api.env, finalize api now. This system is
# already installed, so the configuration file is there.
api.bootstrap(**cfg)
api.finalize()
if installer.interactive:
print("\nThis is a NON REVERSIBLE operation and will delete all data "
"and configuration!\nIt is highly recommended to take a backup of "
"existing data and configuration using ipa-backup utility "
"before proceeding.\n")
if not user_input("Are you sure you want to continue with the "
"uninstall procedure?", False):
raise ScriptError("Aborting uninstall operation.")
try:
api.Backend.ldap2.connect(autobind=True)
domain_level = dsinstance.get_domain_level(api)
except Exception:
msg = ("\nWARNING: Failed to connect to Directory Server to find "
"information about replication agreements. Uninstallation "
"will continue despite the possible existing replication "
"agreements.\n\n"
"If this server is the last instance of CA, KRA, or DNSSEC "
"master, uninstallation may result in data loss.\n\n"
)
print(textwrap.fill(msg, width=80, replace_whitespace=False))
if (installer.interactive and not user_input(
"Are you sure you want to continue with the uninstall "
"procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
dns.uninstall_check(options)
if domain_level == DOMAIN_LEVEL_0:
rm = replication.ReplicationManager(
realm=api.env.realm,
hostname=api.env.host,
dirman_passwd=None,
conn=api.Backend.ldap2
)
agreements = rm.find_ipa_replication_agreements()
if agreements:
other_masters = [a.get('cn')[0][4:] for a in agreements]
msg = (
"\nReplication agreements with the following IPA masters "
"found: %s. Removing any replication agreements before "
"uninstalling the server is strongly recommended. You can "
"remove replication agreements by running the following "
"command on any other IPA master:\n" % ", ".join(
other_masters)
)
cmd = "$ ipa-replica-manage del %s\n" % api.env.host
print(textwrap.fill(msg, width=80, replace_whitespace=False))
print(cmd)
if (installer.interactive and
not user_input("Are you sure you want to continue with"
" the uninstall procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
remove_master_from_managed_topology(api, options)
api.Backend.ldap2.disconnect()
installer._fstore = fstore
installer._sstore = sstore
@common_cleanup
def uninstall(installer):
fstore = installer._fstore
sstore = installer._sstore
rv = 0
print("Shutting down all IPA services")
try:
services.knownservices.ipa.stop()
except Exception:
# Fallback to direct ipactl stop only if system command fails
try:
run([paths.IPACTL, "stop"], raiseonerr=False)
except Exception:
pass
ntpinstance.NTPInstance(fstore).uninstall()
kra.uninstall()
ca.uninstall()
dns.uninstall()
httpinstance.HTTPInstance(fstore).uninstall()
krbinstance.KrbInstance(fstore).uninstall()
dsinstance.DsInstance(fstore=fstore).uninstall()
if _server_trust_ad_installed:
adtrustinstance.ADTRUSTInstance(fstore).uninstall()
custodiainstance.CustodiaInstance().uninstall()
otpdinstance.OtpdInstance().uninstall()
tasks.restore_hostname(fstore, sstore)
fstore.restore_all_files()
try:
os.remove(paths.ROOT_IPA_CACHE)
except Exception:
pass
try:
os.remove(paths.ROOT_IPA_CSR)
except Exception:
pass
# ipa-client-install removes /etc/ipa/default.conf
sstore._load()
ipaclient.install.ntpconf.restore_forced_ntpd(sstore)
# Clean up group_exists (unused since IPA 2.2, not being set since 4.1)
sstore.restore_state("install", "group_exists")
services.knownservices.ipa.disable()
# remove upgrade state file
sysupgrade.remove_upgrade_file()
if fstore.has_files():
logger.error('Some files have not been restored, see '
'%s/sysrestore.index', SYSRESTORE_DIR_PATH)
has_state = False
for module in IPA_MODULES: # from installutils
if sstore.has_state(module):
logger.error('Some installation state for %s has not been '
'restored, see %s/sysrestore.state',
module, SYSRESTORE_DIR_PATH)
has_state = True
rv = 1
if has_state:
logger.error('Some installation state has not been restored.\n'
'This may cause re-installation to fail.\n'
'It should be safe to remove %s/sysrestore.state '
'but it may\n'
'mean your system hasn\'t be restored to its '
'pre-installation state.', SYSRESTORE_DIR_PATH)
# Note that this name will be wrong after the first uninstall.
dirname = dsinstance.config_dirname(
installutils.realm_to_serverid(api.env.realm))
dirs = [dirname, paths.PKI_TOMCAT_ALIAS_DIR, paths.HTTPD_ALIAS_DIR]
ids = certmonger.check_state(dirs)
if ids:
logger.error('Some certificates may still be tracked by '
'certmonger.\n'
'This will cause re-installation to fail.\n'
'Start the certmonger service and list the '
'certificates being tracked\n'
' # getcert list\n'
'These may be untracked by executing\n'
' # getcert stop-tracking -i <request_id>\n'
'for each id in: %s', ', '.join(ids))
# Remove the cert renewal lock file
try:
os.remove(paths.IPA_RENEWAL_LOCK)
except OSError as e:
if e.errno != errno.ENOENT:
logger.warning("Failed to remove file %s: %s",
paths.IPA_RENEWAL_LOCK, e)
print("Removing IPA client configuration")
try:
result = run([paths.IPA_CLIENT_INSTALL, "--on-master",
"--unattended", "--uninstall"],
raiseonerr=False, redirect_output=True)
if result.returncode not in [0, 2]:
raise RuntimeError("Failed to configure the client")
except Exception:
rv = 1
print("Uninstall of client side components failed!")
sys.exit(rv)
def init(installer):
installer.unattended = not installer.interactive
installer.domainlevel = installer.domain_level
installer._installation_cleanup = True
installer._ds = None
installer._dirsrv_pkcs12_file = None
installer._http_pkcs12_file = None
installer._pkinit_pkcs12_file = None
installer._dirsrv_pkcs12_info = None
installer._http_pkcs12_info = None
installer._pkinit_pkcs12_info = None
installer._external_cert_file = None
installer._external_ca_file = None
installer._ca_cert = None
installer._update_hosts_file = False
|
apophys/freeipa
|
ipaserver/install/server/install.py
|
Python
|
gpl-3.0
| 44,536
|
"""
Copyright (C) 2017-2021 Vanessa Sochat.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from django.conf.urls import url
from rest_framework_swagger.views import get_swagger_view
swagger_view = get_swagger_view(title="Singularity Registry API", url="")
urlpatterns = [url(r"^$", swagger_view)]
|
singularityhub/sregistry
|
shub/apps/api/urls/swagger.py
|
Python
|
mpl-2.0
| 454
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .i18n import get_kuma_languages
@never_cache
def _error_page(request, status):
"""
Render error pages with jinja2.
Sometimes, an error is raised by a middleware, and the request is not
fully populated with a user or language code. Add in good defaults.
"""
if not hasattr(request, 'user'):
request.user = AnonymousUser()
if not hasattr(request, 'LANGUAGE_CODE'):
request.LANGUAGE_CODE = 'en-US'
return render(request, '%d.html' % status, status=status)
@never_cache
@csrf_exempt
@require_POST
def set_language(request):
lang_code = request.POST.get("language")
response = HttpResponse(status=204)
if lang_code and lang_code in get_kuma_languages():
response.set_cookie(key=settings.LANGUAGE_COOKIE_NAME,
value=lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
)
return response
handler403 = lambda request, exception=None: _error_page(request, 403)
handler404 = lambda request, exception=None: _error_page(request, 404)
handler500 = lambda request, exception=None: _error_page(request, 500)
@never_cache
def rate_limited(request, exception):
"""Render a rate-limited exception."""
response = render(request, '429.html', status=429)
response['Retry-After'] = '60'
return response
|
SphinxKnight/kuma
|
kuma/core/views.py
|
Python
|
mpl-2.0
| 1,861
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import sys
import subprocess
import re
from common import list2cmdline
from common import sshexec
from common import SSH
subprocess.list2cmdline = list2cmdline
__author__ = "Drew Bonasera"
__license__ = "MPL 2.0"
TYPE = "Metadata"
NAME = "ExifTool"
# These are overwritten by the config file
HOST = ("MultiScanner", 22, "User")
KEY = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), 'etc', 'id_rsa')
PATHREPLACE = "X:\\"
# Entries to be removed from the final results
REMOVEENTRY = ["ExifTool Version Number", "File Name", "Directory", "File Modification Date/Time",
"File Creation Date/Time", "File Access Date/Time", "File Permissions"]
DEFAULTCONF = {
'cmdline': ["-t"],
"path": "C:\\exiftool.exe",
"key": KEY,
'host': HOST,
"replacement path": PATHREPLACE,
'remove-entry': REMOVEENTRY,
'ENABLED': True
}
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if os.path.isfile(conf["path"]):
if 'replacement path' in conf:
del conf['replacement path']
return True
if SSH:
return True
else:
return False
def scan(filelist, conf=DEFAULTCONF):
if os.path.isfile(conf["path"]):
local = True
elif SSH:
local = False
cmdline = conf["cmdline"]
results = []
output = ""
cmd = cmdline
for item in filelist:
cmd.append('"' + item + '" ')
cmd.insert(0, conf["path"])
host, port, user = conf["host"]
if local:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
e.returncode
else:
try:
output = sshexec(host, list2cmdline(cmd), port=port, username=user, key_filename=conf["key"])
except Exception as e:
# TODO: log exception
return None
output = output.decode("utf-8", errors="ignore")
output = output.replace('\r', '')
reader = output.split('\n')
data = {}
fname = filelist[0]
for row in reader:
row = row.split('\t')
try:
if row[0].startswith('======== '):
if data:
results.append((fname, data))
data = {}
fname = row[0][9:]
if re.match('[A-Za-z]:/', fname):
# why exif tools, whyyyyyyyy
fname = fname.replace('/', '\\')
continue
except Exception as e:
# TODO: log exception
pass
try:
if row[0] not in conf['remove-entry']:
data[row[0]] = row[1]
except Exception as e:
# TODO: log exception
continue
if data:
results.append((fname, data))
data = {}
reader = None
# Gather metadata
metadata = {}
output = output.replace('\r', '')
reader = output.split('\n')
for row in reader:
row = row.split('\t')
if row and row[0] == "ExifTool Version Number":
metadata["Program version"] = row[1]
break
metadata["Name"] = NAME
metadata["Type"] = TYPE
return (results, metadata)
|
jmlong1027/multiscanner
|
modules/Metadata/ExifToolsScan.py
|
Python
|
mpl-2.0
| 3,516
|
import contextlib
import functools
import logging
import psycopg2.extras
from psycopg2 import Error as Psycopg2Error
_logger = logging.getLogger(__name__)
def retry_on_psycopg2_error(func):
"""
Decorator that retries 3 times after Postgres error, in particular if
the connection was not valid anymore because the database was restarted
"""
@functools.wraps(func)
def wrapper_retry(*args, **kwargs):
retry = 0
while retry < 4:
try:
result = func(*args, **kwargs)
except Psycopg2Error:
retry += 1
if retry > 3:
raise
else:
_logger.warning(f'Retry query for {func.__name__} ({retry})')
continue
break
return result
return wrapper_retry
@functools.lru_cache()
def dbconnection(dsn):
"""Creates an instance of _DBConnection and remembers the last one made."""
return _DBConnection(dsn)
class _DBConnection:
""" Wraps a PostgreSQL database connection that reports crashes and tries
its best to repair broken connections.
NOTE: doesn't always work, but the failure scenario is very hard to
reproduce. Also see https://github.com/psycopg/psycopg2/issues/263
"""
def __init__(self, *args, **kwargs):
self.conn_args = args
self.conn_kwargs = kwargs
self._conn = None
self._connect()
def _connect(self):
if self._conn is None:
self._conn = psycopg2.connect(*self.conn_args, **self.conn_kwargs)
self._conn.autocommit = True
def _is_usable(self):
""" Checks whether the connection is usable.
:returns boolean: True if we can query the database, False otherwise
"""
try:
self._conn.cursor().execute("SELECT 1")
except psycopg2.Error:
return False
else:
return True
@contextlib.contextmanager
def _connection(self):
""" Contextmanager that catches tries to ensure we have a database
connection. Yields a Connection object.
If a :class:`psycopg2.DatabaseError` occurs then it will check whether
the connection is still usable, and if it's not, close and remove it.
"""
try:
self._connect()
yield self._conn
except psycopg2.Error as e:
_logger.critical('AUTHZ DatabaseError: {}'.format(e))
if not self._is_usable():
with contextlib.suppress(psycopg2.Error):
self._conn.close()
self._conn = None
raise e
@contextlib.contextmanager
def transaction_cursor(self, cursor_factory=None):
""" Yields a cursor with transaction.
"""
with self._connection() as transaction:
with transaction:
with transaction.cursor(cursor_factory=cursor_factory) as cur:
yield cur
@contextlib.contextmanager
def cursor(self, cursor_factory=None):
""" Yields a cursor without transaction.
"""
with self._connection() as conn:
with conn.cursor(cursor_factory=cursor_factory) as cur:
yield cur
def fetch_all(self, sql):
with self.cursor(
cursor_factory=psycopg2.extras.RealDictCursor) as cur:
cur.execute(sql)
return cur.fetchall()
def fetch_one(self, sql):
with self.cursor(
cursor_factory=psycopg2.extras.RealDictCursor) as cur:
cur.execute(sql)
return cur.fetchone()
|
DatapuntAmsterdam/datapunt_geosearch
|
web/geosearch/datapunt_geosearch/db.py
|
Python
|
mpl-2.0
| 3,665
|
import ujson as json
from pprint import pprint
import msgpack
import logging
from cifsdk.constants import PYVERSION
import os
TRACE = os.environ.get('CIFSDK_CLIENT_MSG_TRACE')
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
if TRACE:
logger.setLevel(logging.DEBUG)
MAP = {
1: 'ping',
2: 'ping_write',
3: 'indicators_create',
4: 'indicators_search',
5: 'indicators_delete',
6: 'tokens_search',
7: 'tokens_create',
8: 'tokens_delete',
9: 'tokens_edit',
}
class Msg(object):
PING = 1
PING_WRITE = 2
INDICATORS_CREATE = 3
INDICATORS_SEARCH = 4
INDICATORS_DELETE = 5
TOKENS_SEARCH = 6
TOKENS_CREATE = 7
TOKENS_DELETE = 8
TOKENS_EDIT = 9
def __init__(self, *args, **kwargs):
for k in kwargs:
if isinstance(kwargs[k], str):
try:
kwargs[k] = kwargs[k].encode('utf-8')
except UnicodeDecodeError:
pass
self.id = kwargs.get('id')
self.client_id = kwargs.get('client_id')
self.mtype = kwargs.get('mtype')
self.token = kwargs.get('token')
self.data = kwargs.get('data')
self.null = ''.encode('utf-8')
# from str to int
def mtype_to_int(self, mtype):
for m in MAP:
if MAP[m] == mtype:
return m
def __repr__(self):
m = {
'id': self.id,
'mtype': self.mtype,
'token': self.token,
'data': self.data,
}
return json.dumps(m)
def recv(self, s):
m = s.recv_multipart()
if len(m) == 6:
id, client_id, null, token, mtype, data = m
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
return id, client_id, token.decode('utf-8'), mtype, data.decode('utf-8')
elif len(m) == 5:
id, null, token, mtype, data = m
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
return id, token.decode('utf-8'), mtype, data.decode('utf-8')
elif len(m) == 4:
id, token, mtype, data = m
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
return id, token.decode('utf-8'), mtype, data.decode('utf-8')
elif len(m) == 3:
id, mtype, data = m
try:
mtype = msgpack.unpackb(mtype)
mtype = MAP[mtype]
except msgpack.exceptions.ExtraData:
pass
return id, mtype, data.decode('utf-8')
else:
mtype, data = m
return mtype, data.decode("utf-8")
def to_list(self):
m = []
if self.id:
m.append(self.id)
if self.client_id:
m.append(self.client_id)
if len(m) > 0:
m.append(self.null)
if self.token:
if isinstance(self.token, str):
self.token = self.token.encode('utf-8')
if PYVERSION == 2:
if isinstance(self.token, unicode):
self.token = self.token.encode('utf-8')
m.append(self.token)
if self.mtype:
if isinstance(self.mtype, bytes):
self.mtype = self.mtype_to_int(self.mtype.decode('utf-8'))
m.append(msgpack.packb(self.mtype))
if isinstance(self.data, dict):
self.data = [self.data]
if isinstance(self.data, list):
self.data = json.dumps(self.data)
if isinstance(self.data, str):
self.data = self.data.encode('utf-8')
if PYVERSION == 2:
if isinstance(self.data, unicode):
self.data = self.data.encode('utf-8')
m.append(self.data)
return m
def send(self, s):
m = self.to_list()
logger.debug('sending...')
s.send_multipart(m)
|
csirtgadgets/bearded-avenger-sdk-py
|
cifsdk/msg.py
|
Python
|
mpl-2.0
| 3,924
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Document'
db.create_table('witness_document', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_update_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
))
db.send_create_signal('witness', ['Document'])
# Adding model 'DocumentVersion'
db.create_table('witness_documentversion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_update_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('document', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['witness.Document'])),
('number', self.gf('django.db.models.fields.CharField')(max_length=64)),
('title', self.gf('django.db.models.fields.CharField')(max_length=128)),
('text', self.gf('django.db.models.fields.TextField')()),
('yes_action_text', self.gf('django.db.models.fields.CharField')(max_length=64)),
('no_action_text', self.gf('django.db.models.fields.CharField')(max_length=64)),
('is_retired', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('witness', ['DocumentVersion'])
# Adding model 'Decision'
db.create_table('witness_decision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_update_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('document_version', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['witness.DocumentVersion'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('ip_address', self.gf('django.db.models.fields.CharField')(max_length=64)),
('text_hash', self.gf('django.db.models.fields.CharField')(max_length=128)),
('action_text', self.gf('django.db.models.fields.CharField')(max_length=64)),
('is_yes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_no', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('witness', ['Decision'])
def backwards(self, orm):
# Deleting model 'Document'
db.delete_table('witness_document')
# Deleting model 'DocumentVersion'
db.delete_table('witness_documentversion')
# Deleting model 'Decision'
db.delete_table('witness_decision')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'witness.decision': {
'Meta': {'object_name': 'Decision'},
'action_text': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'document_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['witness.DocumentVersion']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'is_no': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_yes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'text_hash': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'witness.document': {
'Meta': {'object_name': 'Document'},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'witness.documentversion': {
'Meta': {'object_name': 'DocumentVersion'},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['witness.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_retired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'no_action_text': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'yes_action_text': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['witness']
|
khchen428/witness
|
witness/migrations/0001_initial.py
|
Python
|
mpl-2.0
| 9,590
|
import io
import os
import sys
from atomicwrites import atomic_write
from copy import deepcopy
from multiprocessing import Pool, cpu_count
from six import ensure_text
from . import jsonlib
from . import vcs
from .item import (ConformanceCheckerTest,
CrashTest,
ManifestItem,
ManualTest,
PrintRefTest,
RefTest,
SupportFile,
TestharnessTest,
VisualTest,
WebDriverSpecTest)
from .log import get_logger
from .sourcefile import SourceFile
from .typedata import TypeData
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from logging import Logger
from typing import Any
from typing import Container
from typing import Dict
from typing import IO
from typing import Iterator
from typing import Iterable
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from typing import Union
CURRENT_VERSION = 8 # type: int
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
class InvalidCacheError(Exception):
pass
item_classes = {u"testharness": TestharnessTest,
u"reftest": RefTest,
u"print-reftest": PrintRefTest,
u"crashtest": CrashTest,
u"manual": ManualTest,
u"wdspec": WebDriverSpecTest,
u"conformancechecker": ConformanceCheckerTest,
u"visual": VisualTest,
u"support": SupportFile} # type: Dict[Text, Type[ManifestItem]]
def compute_manifest_items(source_file):
# type: (SourceFile) -> Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]
rel_path_parts = source_file.rel_path_parts
new_type, manifest_items = source_file.manifest_items()
file_hash = source_file.hash
return rel_path_parts, new_type, set(manifest_items), file_hash
if MYPY:
ManifestDataType = Dict[Any, TypeData]
else:
ManifestDataType = dict
class ManifestData(ManifestDataType):
def __init__(self, manifest):
# type: (Manifest) -> None
"""Dictionary subclass containing a TypeData instance for each test type,
keyed by type name"""
self.initialized = False # type: bool
for key, value in item_classes.items():
self[key] = TypeData(manifest, value)
self.initialized = True
self.json_obj = None # type: None
def __setitem__(self, key, value):
# type: (Text, TypeData) -> None
if self.initialized:
raise AttributeError
dict.__setitem__(self, key, value)
def paths(self):
# type: () -> Set[Text]
"""Get a list of all paths containing test items
without actually constructing all the items"""
rv = set() # type: Set[Text]
for item_data in self.values():
for item in item_data:
rv.add(os.path.sep.join(item))
return rv
def type_by_path(self):
# type: () -> Dict[Tuple[Text, ...], Text]
rv = {}
for item_type, item_data in self.items():
for item in item_data:
rv[item] = item_type
return rv
class Manifest(object):
def __init__(self, tests_root, url_base="/"):
# type: (Text, Text) -> None
assert url_base is not None
self._data = ManifestData(self) # type: ManifestData
self.tests_root = tests_root # type: Text
self.url_base = url_base # type: Text
def __iter__(self):
# type: () -> Iterator[Tuple[Text, Text, Set[ManifestItem]]]
return self.itertypes()
def itertypes(self, *types):
# type: (*Text) -> Iterator[Tuple[Text, Text, Set[ManifestItem]]]
for item_type in (types or sorted(self._data.keys())):
for path in self._data[item_type]:
rel_path = os.sep.join(path)
tests = self._data[item_type][path]
yield item_type, rel_path, tests
def iterpath(self, path):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(path.split(os.path.sep))
for type_tests in self._data.values():
i = type_tests.get(tpath, set())
assert i is not None
for test in i:
yield test
def iterdir(self, dir_name):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(dir_name.split(os.path.sep))
tpath_len = len(tpath)
for type_tests in self._data.values():
for path, tests in type_tests.items():
if path[:tpath_len] == tpath:
for test in tests:
yield test
def update(self, tree, parallel=True):
# type: (Iterable[Tuple[Text, Optional[Text], bool]], bool) -> bool
"""Update the manifest given an iterable of items that make up the updated manifest.
The iterable must either generate tuples of the form (SourceFile, True) for paths
that are to be updated, or (path, False) for items that are not to be updated. This
unusual API is designed as an optimistaion meaning that SourceFile items need not be
constructed in the case we are not updating a path, but the absence of an item from
the iterator may be used to remove defunct entries from the manifest."""
logger = get_logger()
changed = False
# Create local variable references to these dicts so we avoid the
# attribute access in the hot loop below
data = self._data
types = data.type_by_path()
remaining_manifest_paths = set(types)
to_update = []
for path, file_hash, updated in tree:
path_parts = tuple(path.split(os.path.sep))
is_new = path_parts not in remaining_manifest_paths
if not updated and is_new:
# This is kind of a bandaid; if we ended up here the cache
# was invalid but we've been using it anyway. That's obviously
# bad; we should fix the underlying issue that we sometimes
# use an invalid cache. But at least this fixes the immediate
# problem
raise InvalidCacheError
if not updated:
remaining_manifest_paths.remove(path_parts)
else:
assert self.tests_root is not None
source_file = SourceFile(self.tests_root,
path,
self.url_base,
file_hash)
hash_changed = False # type: bool
if not is_new:
if file_hash is None:
file_hash = source_file.hash
remaining_manifest_paths.remove(path_parts)
old_type = types[path_parts]
old_hash = data[old_type].hashes[path_parts]
if old_hash != file_hash:
hash_changed = True
del data[old_type][path_parts]
if is_new or hash_changed:
to_update.append(source_file)
if to_update:
logger.debug("Computing manifest update for %s items" % len(to_update))
changed = True
# 25 items was derived experimentally (2020-01) to be approximately the
# point at which it is quicker to create a Pool and parallelize update.
pool = None
if parallel and len(to_update) > 25 and cpu_count() > 1:
# On Python 3 on Windows, using >= MAXIMUM_WAIT_OBJECTS processes
# causes a crash in the multiprocessing module. Whilst this enum
# can technically have any value, it is usually 64. For safety,
# restrict manifest regeneration to 48 processes on Windows.
#
# See https://bugs.python.org/issue26903 and https://bugs.python.org/issue40263
processes = cpu_count()
if sys.platform == "win32" and processes > 48:
processes = 48
pool = Pool(processes)
# chunksize set > 1 when more than 10000 tests, because
# chunking is a net-gain once we get to very large numbers
# of items (again, experimentally, 2020-01)
chunksize = max(1, len(to_update) // 10000)
logger.debug("Doing a multiprocessed update. CPU count: %s, "
"processes: %s, chunksize: %s" % (cpu_count(), processes, chunksize))
results = pool.imap_unordered(compute_manifest_items,
to_update,
chunksize=chunksize
) # type: Iterator[Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]]
else:
results = map(compute_manifest_items, to_update)
for result in results:
rel_path_parts, new_type, manifest_items, file_hash = result
data[new_type][rel_path_parts] = manifest_items
data[new_type].hashes[rel_path_parts] = file_hash
# Make sure to terminate the Pool, to avoid hangs on Python 3.
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.Pool
if pool is not None:
pool.terminate()
if remaining_manifest_paths:
changed = True
for rel_path_parts in remaining_manifest_paths:
for test_data in data.values():
if rel_path_parts in test_data:
del test_data[rel_path_parts]
return changed
def to_json(self, caller_owns_obj=True):
# type: (bool) -> Dict[Text, Any]
"""Dump a manifest into a object which can be serialized as JSON
If caller_owns_obj is False, then the return value remains
owned by the manifest; it is _vitally important_ that _no_
(even read) operation is done on the manifest, as otherwise
objects within the object graph rooted at the return value can
be mutated. This essentially makes this mode very dangerous
and only to be used under extreme care.
"""
out_items = {
test_type: type_paths.to_json()
for test_type, type_paths in self._data.items() if type_paths
}
if caller_owns_obj:
out_items = deepcopy(out_items)
rv = {"url_base": self.url_base,
"items": out_items,
"version": CURRENT_VERSION} # type: Dict[Text, Any]
return rv
@classmethod
def from_json(cls, tests_root, obj, types=None, callee_owns_obj=False):
# type: (Text, Dict[Text, Any], Optional[Container[Text]], bool) -> Manifest
"""Load a manifest from a JSON object
This loads a manifest for a given local test_root path from an
object obj, potentially partially loading it to only load the
types given by types.
If callee_owns_obj is True, then ownership of obj transfers
to this function when called, and the caller must never mutate
the obj or anything referred to in the object graph rooted at
obj.
"""
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(tests_root, url_base=obj.get("url_base", "/"))
if not hasattr(obj, "items"):
raise ManifestError
for test_type, type_paths in obj["items"].items():
if test_type not in item_classes:
raise ManifestError
if types and test_type not in types:
continue
if not callee_owns_obj:
type_paths = deepcopy(type_paths)
self._data[test_type].set_json(type_paths)
return self
def load(tests_root, manifest, types=None):
# type: (Text, Union[IO[bytes], Text], Optional[Container[Text]]) -> Optional[Manifest]
logger = get_logger()
logger.warning("Prefer load_and_update instead")
return _load(logger, tests_root, manifest, types)
__load_cache = {} # type: Dict[Text, Manifest]
def _load(logger, # type: Logger
tests_root, # type: Text
manifest, # type: Union[IO[bytes], Text]
types=None, # type: Optional[Container[Text]]
allow_cached=True # type: bool
):
# type: (...) -> Optional[Manifest]
manifest_path = (manifest if isinstance(manifest, str)
else manifest.name)
if allow_cached and manifest_path in __load_cache:
return __load_cache[manifest_path]
if isinstance(manifest, str):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with io.open(manifest, "r", encoding="utf-8") as f:
rv = Manifest.from_json(tests_root,
jsonlib.load(f),
types=types,
callee_owns_obj=True)
except IOError:
return None
except ValueError:
logger.warning("%r may be corrupted", manifest)
return None
else:
rv = Manifest.from_json(tests_root,
jsonlib.load(manifest),
types=types,
callee_owns_obj=True)
if allow_cached:
__load_cache[manifest_path] = rv
return rv
def load_and_update(tests_root, # type: Union[Text, bytes]
manifest_path, # type: Union[Text, bytes]
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[Union[Text, bytes]]
cache_root=None, # type: Optional[Union[Text, bytes]]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
# This function is now a facade for the purposes of type conversion, so that
# the external API can accept paths as text or (utf8) bytes, but internal
# functions always use Text.
metadata_path_text = ensure_text(metadata_path) if metadata_path is not None else None
cache_root_text = ensure_text(cache_root) if cache_root is not None else None
return _load_and_update(ensure_text(tests_root),
ensure_text(manifest_path),
url_base,
update=update,
rebuild=rebuild,
metadata_path=metadata_path_text,
cache_root=cache_root_text,
working_copy=working_copy,
types=types,
write_manifest=write_manifest,
allow_cached=allow_cached,
parallel=parallel)
def _load_and_update(tests_root, # type: Text
manifest_path, # type: Text
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[Text]
cache_root=None, # type: Optional[Text]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
logger = get_logger()
manifest = None
if not rebuild:
try:
manifest = _load(logger,
tests_root,
manifest_path,
types=types,
allow_cached=allow_cached)
except ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
except ManifestError:
logger.warning("Failed to load manifest, rebuilding")
if manifest is not None and manifest.url_base != url_base:
logger.info("Manifest url base did not match, rebuilding")
manifest = None
if manifest is None:
manifest = Manifest(tests_root, url_base)
rebuild = True
update = True
if rebuild or update:
logger.info("Updating manifest")
for retry in range(2):
try:
tree = vcs.get_tree(tests_root, manifest, manifest_path, cache_root,
working_copy, rebuild)
changed = manifest.update(tree, parallel)
break
except InvalidCacheError:
logger.warning("Manifest cache was invalid, doing a complete rebuild")
rebuild = True
else:
# If we didn't break there was an error
raise
if write_manifest and changed:
write(manifest, manifest_path)
tree.dump_caches()
return manifest
def write(manifest, manifest_path):
# type: (Manifest, Text) -> None
dir_name = os.path.dirname(manifest_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with atomic_write(manifest_path, overwrite=True) as f:
# Use ',' instead of the default ', ' separator to prevent trailing
# spaces: https://docs.python.org/2/library/json.html#json.dump
jsonlib.dump_dist(manifest.to_json(caller_owns_obj=True), f)
f.write("\n")
|
KiChjang/servo
|
tests/wpt/web-platform-tests/tools/manifest/manifest.py
|
Python
|
mpl-2.0
| 18,236
|
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 2017-2019 - Edoardo Morassutto <edoardo.morassutto@gmail.com>
# Copyright 2017 - Luca Versari <veluca93@gmail.com>
# Copyright 2018 - William Di Luigi <williamdiluigi@gmail.com>
import json
from datetime import datetime
from werkzeug.exceptions import HTTPException, BadRequest
from werkzeug.wrappers import Response
from ..handler_params import HandlerParams
from ..config import Config
from ..database import Database
from ..logger import Logger
class BaseHandler:
@staticmethod
def raise_exc(cls, code, message):
"""
Raise an HTTPException with a code and a message sent in a json like
{
"code": code
"message": message
}
:param cls: HTTPException of the error, for example NotFound, BadRequest, NotAuthorized
:param code: A brief message for the exception, like MISSING_PARAMETER
:param message: A longer description of the error
:return: Nothing, raise the provided exception with the correct response
"""
response = Response()
response.mimetype = "application/json"
response.status_code = cls.code
response.data = json.dumps({
"code": code,
"message": message
})
Logger.warning(cls.__name__.upper(), code + ": " + message)
raise cls(response=response)
def handle(self, endpoint, route_args, request):
"""
Handle a request in the derived handler. The request is routed to the correct method using *endpoint*
:param endpoint: A string with the name of the class method to call with (route_args, request) as parameters,
this method should return a Response or call self.raise_exc. *NOTE*: the method MUST be implemented in the
derived class
:param route_args: The route parameters, the parameters extracted from the matching route in the URL
:param request: The Request object, request.args contains the query parameters of the request
:return: Return a Response if the request is successful, an HTTPException if an error occurred
"""
try:
data = BaseHandler._call(self.__getattribute__(endpoint), route_args, request)
response = Response()
if data is not None:
response.code = 200
response.mimetype = "application/json"
response.data = json.dumps(data)
else:
response.code = 204
return response
except HTTPException as e:
return e
def parse_body(self, request):
"""
Parse the body part of the request in JSON
:param request: The request to be parsed
:return: A dict with the content of the body
"""
return request.form
@staticmethod
def get_end_time(user_extra_time):
"""
Compute the end time for a user
:param user_extra_time: Extra time specific for the user in seconds
:return: The timestamp at which the contest will be finished for this user
"""
start = Database.get_meta("start_time", type=int)
if start is None:
return None
contest_duration = Database.get_meta("contest_duration", type=int, default=0)
contest_extra_time = Database.get_meta("extra_time", type=int, default=0)
if user_extra_time is None:
user_extra_time = 0
return start + contest_duration + contest_extra_time + user_extra_time
@staticmethod
def get_window_end_time(user_extra_time, start_delay):
"""
Compute the end time for a window started after `start_delay` and with `extra_time` delay for the user.
Note that this time may exceed the contest end time, additional checks are required.
:param user_extra_time: Extra time specific for the user in seconds
:param start_delay: The time (in seconds) after the start of the contest of when the window started
:return: The timestamp at which the window ends. If the contest has no window None is returned.
"""
if start_delay is None:
return None
start = Database.get_meta("start_time", type=int)
if start is None:
return None
window_duration = Database.get_meta("window_duration", None, type=int)
if window_duration is None:
return None
if user_extra_time is None:
user_extra_time = 0
return start + user_extra_time + start_delay + window_duration
@staticmethod
def format_dates(dct, fields=["date"]):
"""
Given a dict, format all the *fields* fields from int to iso format. The original dict is modified
:param dct: dict to format
:param fields: list of the names of the fields to format
:return: The modified dict
"""
for k, v in dct.items():
if isinstance(v, dict):
dct[k] = BaseHandler.format_dates(v, fields)
elif isinstance(v, list):
for item in v:
BaseHandler.format_dates(item, fields)
elif k in fields and v is not None:
dct[k] = datetime.fromtimestamp(v).isoformat()
return dct
@staticmethod
def _call(method, route_args, request):
"""
This function is MAGIC!
It takes a method, reads it's parameters and automagically fetch from the request the values. Type-annotation
is also supported for a simple type validation.
The values are fetched, in order, from:
- route_args
- request.form
- general_attrs
- default values
If a parameter is required but not sent a BadRequest (MISSING_PARAMETERS) error is thrown, if a parameter cannot
be converted to the annotated type a BadRequest (FORMAT_ERROR) is thrown.
:param method: Method to be called
:param route_args: Arguments of the route
:param request: Request object
:return: The return value of method
"""
kwargs = {}
params = HandlerParams.get_handler_params(method)
general_attrs = {
'_request': request,
'_route_args': route_args,
'_file': {
"content": BaseHandler._get_file_content(request),
"name": BaseHandler._get_file_name(request)
},
'_ip': BaseHandler.get_ip(request)
}
missing_parameters = []
for name, data in params.items():
if name in route_args and name[0] != "_":
kwargs[name] = route_args[name]
elif name in request.form and name[0] != "_":
kwargs[name] = request.form[name]
elif name in general_attrs:
kwargs[name] = general_attrs[name]
elif name == "file" and general_attrs["_file"]["name"] is not None:
kwargs[name] = general_attrs["_file"]
elif data["required"]:
missing_parameters.append(name)
if len(missing_parameters) > 0:
BaseHandler.raise_exc(BadRequest, "MISSING_PARAMETERS",
"The missing parameters are: " + ", ".join(missing_parameters))
for key, value in kwargs.items():
type = params[key]["type"]
if type is None: continue
try:
kwargs[key] = type(value)
except ValueError:
BaseHandler.raise_exc(BadRequest, "FORMAT_ERROR",
"The parameter %s cannot be converted to %s" % (key, type.__name__))
Logger.debug(
"HTTP",
"Received request from %s for endpoint %s%s" %
(
general_attrs['_ip'],
method.__name__,
", with parameters " + ", ".join(
"=".join((kv[0], str(kv[1]))) for kv in kwargs.items()
if not kv[0].startswith("_") and not kv[0] == "file"
) if len(kwargs) > 0 else ""
)
)
return method(**kwargs)
@staticmethod
def _get_file_name(request):
"""
Extract the name of the file from the multipart body
:param request: The Request object
:return: The filename in the request
"""
if "file" not in request.files:
return None
return request.files["file"].filename
@staticmethod
def _get_file_content(request):
"""
Extract the content of the file from the multipart of the body
:param request: The Request object
:return: A *bytes* with the content of the file
"""
if "file" not in request.files:
return None
return request.files["file"].stream.read()
@staticmethod
def get_ip(request):
"""
Return the real IP of the client
:param request: The Request object
:return: A string with the IP of the client
"""
num_proxies = Config.num_proxies
if num_proxies == 0 or len(request.access_route) < num_proxies:
return request.remote_addr
return request.access_route[-num_proxies]
|
algorithm-ninja/territoriali-backend
|
terry/handlers/base_handler.py
|
Python
|
mpl-2.0
| 9,492
|
import logging
from datetime import datetime
import os
from flask import request, g, Response, jsonify
from openspending.views.api_v4.common import blueprint
from openspending.lib.apihelper import DataBrowser_v4, GEO_MAPPING,FORMATOPTS
from openspending.lib.jsonexport import to_json
from openspending.lib.helpers import get_dataset
from openspending.lib.cache import cache_key
from openspending.core import cache
from openspending.views.error import api_json_errors
log = logging.getLogger(__name__)
def xlschecker(*args, **kwargs):
if "format" in request.args:
if request.args.get("format") in ['excel', 'csv']:
return True
return False
@blueprint.route("/api/4/slicer/aggregate", methods=["JSON", "GET"])
@api_json_errors
@cache.cached(timeout=60, key_prefix=cache_key, unless=xlschecker)
def slicer_agg():
d = DataBrowser_v4()
return d.get_response()
@blueprint.route("/api/4/slicer/model", methods=["JSON", "GET"])
@api_json_errors
@cache.cached(timeout=60, key_prefix=cache_key)
def slicer_model():
#options
#get dataset info
results = {
"models": {},
"options": {}
}
cubesarg = request.args.get("cubes", [])
cubes = cubesarg.split("|")
for cube in cubes:
dataset = get_dataset(cube)
if dataset:
results['models'][cube] = dataset.detailed_dict()
results['options'] = GEO_MAPPING
results['formats']= FORMATOPTS
resp = Response(response=to_json(results),
status=200, \
mimetype="application/json")
return resp
|
USStateDept/FPA_Core
|
openspending/views/api_v4/aggregate.py
|
Python
|
agpl-3.0
| 1,586
|
# pylint: disable=import-error,protected-access,too-few-public-methods
# Copyright 2016-2017 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2019 ForgeFlow S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import SavepointCase
class TestStockQuantityChangeReason(SavepointCase):
@classmethod
def setUpClass(cls):
super(TestStockQuantityChangeReason, cls).setUpClass()
# MODELS
cls.stock_move = cls.env["stock.move"]
cls.product_product_model = cls.env["product.product"]
cls.product_category_model = cls.env["product.category"]
cls.wizard_model = cls.env["stock.change.product.qty"]
cls.preset_reason_id = cls.env["stock.inventory.line.reason"]
cls.stock_location = cls.env.ref("stock.stock_location_stock")
# INSTANCES
cls.category = cls.product_category_model.create({"name": "Physical (test)"})
def _create_product(self, name):
return self.product_product_model.create(
{"name": name, "categ_id": self.category.id, "type": "product"}
)
def _product_change_qty(self, product, new_qty):
values = {
"product_tmpl_id": product.product_tmpl_id.id,
"product_id": product.id,
"new_quantity": new_qty,
}
wizard = self.wizard_model.create(values)
wizard.change_product_qty()
def _create_reason(self, name, description=None):
return self.preset_reason_id.create({"name": name, "description": description})
def test_inventory_adjustment_onchange_reason_preset_reason(self):
"""Check that adding a reason or a preset reason explode to lines"""
product2 = self._create_product("product_product_2")
self._product_change_qty(product2, 50)
inventory = self.env["stock.inventory"].create(
{
"name": "remove product2",
"product_ids": [(4, product2.id)],
"location_ids": [(4, self.stock_location.id)],
}
)
inventory.preset_reason_id = self._create_reason("Test 1", "Description Test 1")
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1)
inventory.reason = "Reason 2"
inventory.onchange_reason()
self.assertEqual(inventory.line_ids.reason, inventory.reason)
inventory.preset_reason_id = self._create_reason("Test 2", "Description Test 2")
inventory.onchange_preset_reason()
self.assertEqual(
inventory.line_ids.preset_reason_id, inventory.preset_reason_id
)
inventory.line_ids[0].write({"product_qty": 10})
inventory.action_validate()
move = self.stock_move.search(
[("product_id", "=", product2.id), ("preset_reason_id", "!=", False)]
)
self.assertEqual(len(move), 1)
self.assertEqual(move.origin, inventory.preset_reason_id.name)
self.assertEqual(move.preset_reason_id, inventory.preset_reason_id)
|
OCA/stock-logistics-warehouse
|
stock_change_qty_reason/tests/test_stock_change_qty_reason.py
|
Python
|
agpl-3.0
| 3,032
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Silvina Faner (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
import netsvc
class report_receipt_print(report_sxw.rml_parse):
_name = 'report.receipt.print'
def __init__(self, cr, uid, name, context):
super(report_receipt_print, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'convert': self.convert,
})
def convert(self, amount, currency): return self.pool.get('ir.translation').amount_to_text(amount, 'pe', currency or 'Pesos')
report_sxw.report_sxw(
'report.receipt.print',
'receipt.receipt',
'trunk/receipt_pay/report/receipt_pay_print.rml',
parser=report_receipt_print,header="external"
)
|
pronexo-odoo/odoo-argentina
|
l10n_ar_receipt/report/receipt_print.py
|
Python
|
agpl-3.0
| 1,664
|
# -*- coding: utf-8 -*-
"""Calculate controller"""
from __future__ import division
import collections
import copy
import itertools
import os
import time
from openfisca_core.legislations import ParameterNotFound
from .. import conf, contexts, conv, environment, model, wsgihelpers
def N_(message):
return message
def build_output_variables(simulations, use_label, variables):
return [
{
variable: simulation.get_holder(variable).to_value_json(use_label = use_label)
for variable in variables
}
for simulation in simulations
]
def fill_test_cases_with_values(intermediate_variables, scenarios, simulations, use_label, variables):
output_test_cases = []
for scenario, simulation in itertools.izip(scenarios, simulations):
if intermediate_variables:
holders = []
for step in simulation.traceback.itervalues():
holder = step['holder']
if holder not in holders:
holders.append(holder)
else:
holders = [
simulation.get_holder(variable)
for variable in variables
]
test_case = scenario.to_json()['test_case']
for holder in holders:
variable_value_json = holder.to_value_json(use_label = use_label)
if variable_value_json is None:
continue
variable_name = holder.column.name
entity_members = test_case[holder.entity.key_plural]
if isinstance(variable_value_json, dict):
for entity_member_index, entity_member in enumerate(entity_members):
entity_member[variable_name] = {}
for period, array_or_dict_json in variable_value_json.iteritems():
if type(array_or_dict_json) == dict:
if len(array_or_dict_json) == 1:
entity_member[variable_name][period] = \
array_or_dict_json[array_or_dict_json.keys()[0]][entity_member_index]
else:
entity_member[variable_name][period] = {}
for key, array in array_or_dict_json.iteritems():
entity_member[variable_name][period][key] = array[entity_member_index]
else:
entity_member[variable_name][period] = array_or_dict_json[entity_member_index]
else:
for entity_member, cell_json in itertools.izip(entity_members, variable_value_json):
entity_member[variable_name] = cell_json
output_test_cases.append(test_case)
return output_test_cases
@wsgihelpers.wsgify
def api1_calculate(req):
def calculate_simulations(scenarios, variables, trace):
simulations = []
for scenario_index, scenario in enumerate(scenarios):
simulation = scenario.new_simulation(trace = trace)
for variable_name in variables:
try:
simulation.calculate_output(variable_name)
except ParameterNotFound as exc:
raise wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
context = inputs.get('context'),
error = collections.OrderedDict(sorted(dict(
code = 500,
errors = [{"scenarios": {scenario_index: exc.to_json()}}],
).iteritems())),
method = req.script_name,
params = inputs,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
simulations.append(simulation)
return simulations
total_start_time = time.time()
ctx = contexts.Ctx(req)
headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
assert req.method == 'POST', req.method
if conf['load_alert']:
try:
load_average = os.getloadavg()
except (AttributeError, OSError):
# When load average is not available, always accept request.
pass
else:
if load_average[0] / environment.cpu_count > 1:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 503, # Service Unavailable
message = ctx._(u'Server is overloaded: {} {} {}').format(*load_average),
).iteritems())),
method = req.script_name,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
content_type = req.content_type
if content_type is not None:
content_type = content_type.split(';', 1)[0].strip()
if content_type != 'application/json':
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
message = ctx._(u'Bad content-type: {}').format(content_type),
).iteritems())),
method = req.script_name,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
inputs, error = conv.pipe(
conv.make_input_to_json(object_pairs_hook = collections.OrderedDict),
conv.test_isinstance(dict),
conv.not_none,
)(req.body, state = ctx)
if error is not None:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
errors = [conv.jsonify_value(error)],
message = ctx._(u'Invalid JSON in request POST body'),
).iteritems())),
method = req.script_name,
params = req.body,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
str_list_to_reforms = conv.make_str_list_to_reforms()
data, errors = conv.struct(
dict(
base_reforms = str_list_to_reforms,
context = conv.test_isinstance(basestring), # For asynchronous calls
intermediate_variables = conv.pipe(
conv.test_isinstance((bool, int)),
conv.anything_to_bool,
conv.default(False),
),
labels = conv.pipe( # Return labels (of enumerations) instead of numeric values.
conv.test_isinstance((bool, int)),
conv.anything_to_bool,
conv.default(False),
),
output_format = conv.pipe(
conv.test_isinstance(basestring),
conv.test_in(['test_case', 'variables']),
conv.default('test_case'),
),
reforms = str_list_to_reforms,
scenarios = conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.not_none, # Real conversion is done once tax-benefit system is known.
),
conv.test(lambda scenarios: len(scenarios) >= 1, error = N_(u'At least one scenario is required')),
conv.test(lambda scenarios: len(scenarios) <= 100,
error = N_(u"There can't be more than 100 scenarios")),
conv.not_none,
),
time = conv.pipe(
conv.test_isinstance((bool, int)),
conv.anything_to_bool,
conv.default(False),
),
trace = conv.pipe(
conv.test_isinstance((bool, int)),
conv.anything_to_bool,
conv.default(False),
),
validate = conv.pipe(
conv.test_isinstance((bool, int)),
conv.anything_to_bool,
conv.default(False),
),
variables = conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(basestring),
conv.empty_to_none,
# Remaining of conversion is done once tax-benefit system is known.
conv.not_none,
),
constructor = set,
),
conv.test(lambda variables: len(variables) >= 1, error = N_(u'At least one variable is required')),
conv.not_none,
),
),
)(inputs, state = ctx)
if errors is None:
compose_reforms_start_time = time.time()
country_tax_benefit_system = model.tax_benefit_system
base_tax_benefit_system = model.get_cached_composed_reform(
reform_keys = data['base_reforms'],
tax_benefit_system = country_tax_benefit_system,
) if data['base_reforms'] is not None else country_tax_benefit_system
if data['reforms'] is not None:
reform_tax_benefit_system = model.get_cached_composed_reform(
reform_keys = data['reforms'],
tax_benefit_system = base_tax_benefit_system,
)
compose_reforms_end_time = time.time()
compose_reforms_time = compose_reforms_end_time - compose_reforms_start_time
build_scenarios_start_time = time.time()
base_scenarios, base_scenarios_errors = conv.uniform_sequence(
base_tax_benefit_system.Scenario.make_json_to_cached_or_new_instance(
ctx = ctx,
repair = data['validate'],
tax_benefit_system = base_tax_benefit_system,
)
)(data['scenarios'], state = ctx)
errors = {'scenarios': base_scenarios_errors} if base_scenarios_errors is not None else None
if errors is None and data['reforms'] is not None:
reform_scenarios, reform_scenarios_errors = conv.uniform_sequence(
reform_tax_benefit_system.Scenario.make_json_to_cached_or_new_instance(
ctx = ctx,
repair = data['validate'],
tax_benefit_system = reform_tax_benefit_system,
)
)(data['scenarios'], state = ctx)
errors = {'scenarios': reform_scenarios_errors} if reform_scenarios_errors is not None else None
build_scenarios_end_time = time.time()
build_scenarios_time = build_scenarios_end_time - build_scenarios_start_time
if errors is None:
data, errors = conv.struct(
dict(
variables = conv.uniform_sequence(
conv.make_validate_variable(
base_tax_benefit_system = base_tax_benefit_system,
reform_tax_benefit_system = reform_tax_benefit_system if data['reforms'] else None,
reforms = data['reforms'],
),
),
),
default = conv.noop,
)(data, state = ctx)
if errors is not None:
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(dict(
apiVersion = 1,
context = inputs.get('context'),
error = collections.OrderedDict(sorted(dict(
code = 400, # Bad Request
errors = [conv.jsonify_value(errors)],
message = ctx._(u'Bad parameters in request'),
).iteritems())),
method = req.script_name,
params = inputs,
url = req.url.decode('utf-8'),
).iteritems())),
headers = headers,
)
scenarios = base_scenarios if data['reforms'] is None else reform_scenarios
suggestions = {}
for scenario_index, scenario in enumerate(scenarios):
if data['validate']:
original_test_case = scenario.test_case
scenario.test_case = copy.deepcopy(original_test_case)
suggestion = scenario.suggest() # This modifies scenario.test_case!
if data['validate']:
scenario.test_case = original_test_case
if suggestion is not None:
suggestions.setdefault('scenarios', {})[scenario_index] = suggestion
if not suggestions:
suggestions = None
if data['validate']:
# Only a validation is requested. Don't launch simulation
total_end_time = time.time()
total_time = total_end_time - total_start_time
response_data = dict(
apiVersion = 1,
context = inputs.get('context'),
method = req.script_name,
params = inputs,
repaired_scenarios = [
scenario.to_json()
for scenario in scenarios
],
suggestions = suggestions,
url = req.url.decode('utf-8'),
)
if data['time']:
response_data['time'] = collections.OrderedDict(sorted(dict(
build_scenarios = build_scenarios_time,
compose_reforms = compose_reforms_time,
total = total_time,
).iteritems())),
return wsgihelpers.respond_json(ctx,
collections.OrderedDict(sorted(response_data.iteritems())),
headers = headers,
)
calculate_simulation_start_time = time.time()
trace_simulations = data['trace'] or data['intermediate_variables']
base_simulations = calculate_simulations(scenarios, data['variables'], trace = trace_simulations)
if data['reforms'] is not None:
reform_simulations = calculate_simulations(reform_scenarios, data['variables'], trace = trace_simulations)
calculate_simulation_end_time = time.time()
calculate_simulation_time = calculate_simulation_end_time - calculate_simulation_start_time
if data['output_format'] == 'test_case':
base_value = fill_test_cases_with_values(
intermediate_variables = data['intermediate_variables'],
scenarios = base_scenarios,
simulations = base_simulations,
use_label = data['labels'],
variables = data['variables'],
)
if data['reforms'] is not None:
reform_value = fill_test_cases_with_values(
intermediate_variables = data['intermediate_variables'],
scenarios = reform_scenarios,
simulations = reform_simulations,
use_label = data['labels'],
variables = data['variables'],
)
else:
assert data['output_format'] == 'variables'
base_value = build_output_variables(
simulations = base_simulations,
use_label = data['labels'],
variables = data['variables'],
)
if data['reforms'] is not None:
reform_value = build_output_variables(
simulations = reform_simulations,
use_label = data['labels'],
variables = data['variables'],
)
if data['trace']:
simulations_variables_json = []
tracebacks_json = []
simulations = reform_simulations if data['reforms'] is not None else base_simulations
for simulation in simulations:
simulation_variables_json = {}
traceback_json = []
for (variable_name, period), step in simulation.traceback.iteritems():
holder = step['holder']
if variable_name not in simulation_variables_json:
variable_value_json = holder.to_value_json()
if variable_value_json is not None:
simulation_variables_json[variable_name] = variable_value_json
column = holder.column
input_variables_infos = step.get('input_variables_infos')
parameters_infos = step.get('parameters_infos')
traceback_json.append(collections.OrderedDict(sorted(dict(
cell_type = column.val_type, # Unification with OpenFisca Julia name.
default_input_variables = step.get('default_input_variables', False),
entity = column.entity,
input_variables = [
(input_variable_name, str(input_variable_period))
for input_variable_name, input_variable_period in input_variables_infos
] if input_variables_infos else None,
is_computed = step.get('is_computed', False),
label = column.label if column.label != variable_name else None,
name = variable_name,
parameters = parameters_infos or None,
period = str(period) if period is not None else None,
).iteritems())))
simulations_variables_json.append(simulation_variables_json)
tracebacks_json.append(traceback_json)
else:
simulations_variables_json = None
tracebacks_json = None
response_data = collections.OrderedDict(sorted(dict(
apiVersion = 1,
context = data['context'],
method = req.script_name,
params = inputs,
suggestions = suggestions,
tracebacks = tracebacks_json,
url = req.url.decode('utf-8'),
value = reform_value if data['reforms'] is not None else base_value,
variables = simulations_variables_json,
).iteritems()))
if data['reforms'] is not None:
response_data['base_value'] = base_value
total_end_time = time.time()
total_time = total_end_time - total_start_time
if data['time']:
response_data['time'] = collections.OrderedDict(sorted(dict(
build_scenarios = build_scenarios_time,
compose_reforms = compose_reforms_time,
calculate_simulation = calculate_simulation_time,
total = total_time,
).iteritems()))
return wsgihelpers.respond_json(ctx, response_data, headers = headers)
|
sgmap/openfisca-web-api
|
openfisca_web_api/controllers/calculate.py
|
Python
|
agpl-3.0
| 19,065
|
from ..extensions import celery, redis_store
from ..models.taxis import Taxi
import time
from flask import current_app
@celery.task
def clean_geoindex():
keys_to_clean = []
cursor = 0
taxi_id = set()
cursor = None
while cursor != 0:
if cursor == None:
cursor = 0
cursor, result = redis_store.scan(cursor, 'taxi:*')
pipe = redis_store.pipeline()
for key in result:
pipe.hvals(key)
values = pipe.execute()
lower_bound = int(time.time()) - 60 * 60
pipe = redis_store.pipeline()
for (key, l) in zip(result, values):
if any(map(lambda v: Taxi.parse_redis(v)['timestamp'] >= lower_bound, l)):
continue
pipe.zrem(current_app.config['REDIS_GEOINDEX'], key)
pipe.execute()
#Maybe it'll more efficient to delete some of the taxis in the global map, but
#if we do it we'll lose the information of when this taxis was active for the
#last time, it will be great to log it in database.
|
odtvince/APITaxi
|
APITaxi/tasks/clean_geoindex.py
|
Python
|
agpl-3.0
| 1,030
|
"""
WSGI config for credentials.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from os.path import abspath, dirname
from sys import path
from django.core.wsgi import get_wsgi_application
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "credentials.settings.local")
application = get_wsgi_application() # pylint: disable=invalid-name
|
edx/credentials
|
credentials/wsgi.py
|
Python
|
agpl-3.0
| 559
|
"""
column 'instances' will be deleted later. Has to be nullable for transition
Revision ID: 266658781c00
Revises: 204aae05372a
Create Date: 2019-04-15 16:27:22.362244
"""
# revision identifiers, used by Alembic.
revision = '266658781c00'
down_revision = '204aae05372a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.alter_column('equipments_provider', 'instances', existing_type=postgresql.ARRAY(sa.TEXT()), nullable=True)
def downgrade():
op.alter_column(
'equipments_provider', 'instances', existing_type=postgresql.ARRAY(sa.TEXT()), nullable=False
)
|
pbougue/navitia
|
source/tyr/migrations/versions/266658781c00_instances_nullable_in_equipments_provider.py
|
Python
|
agpl-3.0
| 643
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys, urllib2
def main():
if len(sys.argv) < 2:
print("Error, usage: {0} <your url>".format(sys.argv[0]))
return 1
url = sys.argv[1]
print(urllib2.urlopen('http://t34.me/api/?u=' + url).read())
return 0
if __name__ == '__main__':
main()
|
z0rr0/t34.me
|
configs/api_python.py
|
Python
|
agpl-3.0
| 328
|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.core.urlresolvers import reverse
import simplejson as json
from utils import test_factories
class TestEditor(object):
"""Simulates the editor widget for unit tests"""
def __init__(self, client, video, original_language_code=None,
base_language_code=None, mode=None):
"""Construct a TestEditor
:param client: django TestClient object for HTTP requests
:param video: Video object to edit
:param original_language_code: language code for the video audio.
Should be set if and only if the primary_audio_language_code hasn't
been set for the video.
:param base_language_code: base language code for to use for
translation tasks.
:param mode: one of ("review", "approve" or None)
"""
self.client = client
self.video = video
self.base_language_code = base_language_code
if original_language_code is None:
self.original_language_code = video.primary_audio_language_code
else:
if video.primary_audio_language_code is not None:
raise AssertionError(
"primary_audio_language_code is set (%r)" %
video.primary_audio_language_code)
self.original_language_code = original_language_code
self.mode = mode
self.task_approved = None
self.task_id = None
self.task_notes = None
self.task_type = None
def set_task_data(self, task, approved, notes):
"""Set data for the task that this edit is for.
:param task: Task object
:param approved: did the user approve the task. Should be one of the
values of Task.APPROVED_IDS.
:param notes: String to set for notes
"""
type_map = {
10: 'subtitle',
20: 'translate',
30: 'review',
40: 'approve',
}
self.task_id = task.id
self.task_type = type_map[task.type]
self.task_notes = notes
self.task_approved = approved
def _submit_widget_rpc(self, method, **data):
"""POST data to the widget:rpc view."""
url = reverse('widget:rpc', args=(method,))
post_data = dict((k, json.dumps(v)) for k, v in data.items())
response = self.client.post(url, post_data)
response_data = json.loads(response.content)
if 'error' in response_data:
raise AssertionError("Error calling widget rpc method %s:\n%s" %
(method, response_data['error']))
return response_data
def run(self, language_code, completed=True, save_for_later=False):
"""Make the HTTP requests to simulate the editor
We will use test_factories.dxfp_sample() for the subtitle data.
:param language_code: code for the language of these subtitles
:param completed: simulate the completed checkbox being set
:param save_for_later: simulate the save for later button
"""
self._submit_widget_rpc('fetch_start_dialog_contents',
video_id=self.video.video_id)
existing_language = self.video.subtitle_language(language_code)
if existing_language is not None:
subtitle_language_pk = existing_language.pk
else:
subtitle_language_pk = None
response_data = self._submit_widget_rpc(
'start_editing',
video_id=self.video.video_id,
language_code=language_code,
original_language_code=self.original_language_code,
base_language_code=self.base_language_code,
mode=self.mode,
subtitle_language_pk=subtitle_language_pk)
session_pk = response_data['session_pk']
self._submit_widget_rpc('finished_subtitles',
completed=completed,
save_for_later=save_for_later,
session_pk=session_pk,
subtitles=test_factories.dxfp_sample('en'),
task_approved=self.task_approved,
task_id=self.task_id,
task_notes=self.task_notes,
task_type=self.task_type)
|
ujdhesa/unisubs
|
utils/testeditor.py
|
Python
|
agpl-3.0
| 5,102
|
from zou.app.models.search_filter import SearchFilter
from .base import BaseModelResource, BaseModelsResource
class SearchFiltersResource(BaseModelsResource):
def __init__(self):
BaseModelsResource.__init__(self, SearchFilter)
class SearchFilterResource(BaseModelResource):
def __init__(self):
BaseModelResource.__init__(self, SearchFilter)
|
cgwire/zou
|
zou/app/blueprints/crud/search_filter.py
|
Python
|
agpl-3.0
| 370
|
index_power=lambda a,n:a[n]**n if n<len(a)else-1
|
aureooms/checkio
|
elementary/02-index-power.py
|
Python
|
agpl-3.0
| 49
|
# -*- coding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# info@vauxoo.com
############################################################################
# Coded by: Rodo (rodo@vauxoo.com),Moy (moylop260@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class product_product(orm.Model):
_inherit = "product.product"
_columns = {
'product_customer_code_ids': fields.one2many('product.customer.code',
'product_id',
'Customer Codes'),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default['product_customer_code_ids'] = False
res = super(product_product, self).copy(
cr, uid, id, default=default, context=context)
return res
def name_search(self, cr, user, name='', args=None, operator='ilike',
context=None, limit=80):
res = super(product_product, self).name_search(
cr, user, name, args, operator, context, limit)
if not context:
context = {}
product_customer_code_obj = self.pool.get('product.customer.code')
if not res:
ids = []
partner_id = context.get('partner_id', False)
if partner_id:
id_prod_code = \
product_customer_code_obj.search(cr, user,
[('product_code',
'=', name),
('partner_id', '=',
partner_id)],
limit=limit,
context=context)
# TODO: Search for product customer name
id_prod = id_prod_code and product_customer_code_obj.browse(
cr, user, id_prod_code, context=context) or []
for ppu in id_prod:
ids.append(ppu.product_id.id)
if ids:
res = self.name_get(cr, user, ids, context)
return res
|
cgstudiomap/cgstudiomap
|
main/parts/product-attribute/product_customer_code/product.py
|
Python
|
agpl-3.0
| 3,314
|
from flask_wtf import Form
from wtforms import HiddenField, StringField
from wtforms.validators import InputRequired, EqualTo
from flask_login import current_user, abort, login_required
from flask import request, flash, redirect, render_template
import random
import bcrypt
from models.user_model import User
from .. import blueprint
class ResetForm(Form):
who = HiddenField()
confirm_who = StringField('Confirm Username', validators=[InputRequired(),
EqualTo('who')])
@blueprint.route("/reset/<what>", methods=["POST"])
@login_required
def reset(what):
if not current_user.has_permission('reset.{}'.format(what)):
abort(403)
form = ResetForm(request.form)
user = User.objects(name=form.who.data).first()
if user is None:
abort(401)
if form.validate():
if what == 'password':
password = ''.join(random.choice('0123456789abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(16))
user.hash = bcrypt.hashpw(password, bcrypt.gensalt())
user.save()
return render_template('profile_reset_password_successful.html', user=user, password=password)
elif what == 'tfa':
user.tfa = False
user.tfa_secret = ''
user.save()
return render_template('profile_reset_tfa_successful.html', user=user)
else:
abort(401)
flash('Error in reset form. Make sure you are typing the confirmation token correctly.', category='alert')
return redirect(user.get_profile_url()), 303
|
JunctionAt/JunctionWWW
|
blueprints/player_profiles/views/admin_reset.py
|
Python
|
agpl-3.0
| 1,619
|
from django.shortcuts import render
from rest_framework import generics, serializers
from beacon.models import Inquiry, Reply
class InquirySerializer(serializers.ModelSerializer):
class Meta:
model = Inquiry
class ReplySerializer(serializers.ModelSerializer):
class Meta:
model = Reply
class InquiryUpdateAPIView(generics.RetrieveUpdateAPIView):
serializer_class = InquirySerializer
queryset = Inquiry.objects.all()
def dispatch(self, request, *args, **kwargs):
print(request)
print(request.body)
return super().dispatch(request,*args,**kwargs)
class ReplyListAPIView(generics.RetrieveAPIView):
serializer_class = ReplySerializer
queryset = Reply.objects.all()
|
SorenOlegnowicz/tracker
|
tracker/api/views.py
|
Python
|
agpl-3.0
| 739
|
# -*- coding: utf-8 -*-
# Copyright (C) 2018 Compassion CH
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
import pytz
from odoo import models, fields, api
_logger = logging.getLogger(__name__)
class HrAttendanceDay(models.Model):
"""
The instances of hr.attendance.day is created either at the first
attendance of the day or by the method
hr.employee._cron_create_attendance() called by a cron everyday.
"""
_name = "hr.attendance.day"
_description = "Attendance day"
_order = 'date DESC'
_sql_constraints = [('unique_attendance_day', 'unique(date, employee_id)',
'This "Attendance day" already exists for this '
'employee')]
##########################################################################
# FIELDS #
##########################################################################
date = fields.Date(required=True, default=fields.Date.today())
employee_id = fields.Many2one(
'hr.employee', "Employee", ondelete="cascade", required=True,
default=lambda self: self.env.user.employee_ids[0].id)
# Working schedule
working_schedule_id = fields.Many2one('resource.calendar', store=True,
string='Working schedule',
compute='_compute_working_schedule',
inverse='_inverse_working_schedule')
cal_att_ids = fields.Many2many('resource.calendar.attendance', store=True,
compute='_compute_cal_att_ids')
working_day = fields.Char(compute='_compute_working_day',
readonly=True, store=True)
name = fields.Char(compute='_compute_name', store=True)
# Leaves
leave_ids = fields.Many2many('hr.holidays', string='Leaves')
# todo replace by employee_id.is_absent_totay
in_leave = fields.Boolean('In leave', compute='_compute_in_leave',
store=True)
public_holiday_id = fields.Many2one('hr.holidays.public.line',
'Public holidays')
# Due hours
due_hours = fields.Float('Due hours', compute='_compute_due_hours',
readonly=True, store=True)
# Attendances
attendance_ids = fields.One2many('hr.attendance', 'attendance_day_id',
'Attendances', readonly=True)
has_change_day_request = fields.Boolean(
compute='_compute_has_change_day_request', store=True,
oldname='has_linked_change_day_request'
)
# Worked
paid_hours = fields.Float(
compute='_compute_paid_hours', store=True, readonly=True
)
free_breaks_hours = fields.Float(compute='_compute_free_break_hours')
total_attendance = fields.Float(
compute='_compute_total_attendance', store=True,
help='Sum of all attendances of the day'
)
coefficient = fields.Float(help='Worked hours coefficient')
# Break
due_break_min = fields.Float('Minimum break due',
compute='_compute_due_break')
due_break_total = fields.Float('Total break due',
compute='_compute_due_break')
break_ids = fields.One2many('hr.attendance.break',
'attendance_day_id',
'Breaks',
readonly=True)
break_total = fields.Float('Total break',
compute='_compute_break_total',
store=True)
rule_id = fields.Many2one('hr.attendance.rules', 'Rules',
compute='_compute_rule_id')
day_balance = fields.Float("Day balance",
compute='_compute_day_balance',
store=True)
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.multi
@api.depends('attendance_ids')
def _compute_working_schedule(self):
for att_day in self:
# Find the correspondent resource.calendar
# First check if the attendance has already a resource.calendar
schedule = att_day.attendance_ids.mapped('working_schedule_id')
if schedule:
# if there is more than one resource.calendar take one...
schedule = schedule[0]
else:
# look for a valid contract...
# todo: check if att_day.employee_id.current_contract is enough
contracts = self.env['hr.contract'].search([
('employee_id', '=', att_day.employee_id.id),
('date_start', '<=', att_day.date),
'|', ('date_end', '=', False),
('date_end', '>=', att_day.date)
], order='date_start desc', limit=1)
# ...or take the resource.calendar of employee
schedule = contracts.working_hours or (
att_day.employee_id.calendar_id)
att_day.working_schedule_id = schedule
def _inverse_working_schedule(self):
for att_day in self:
for att in self.attendance_ids:
att.working_schedule_id = att_day.working_schedule_id
@api.multi
@api.depends('working_schedule_id', 'working_schedule_id.attendance_ids')
def _compute_cal_att_ids(self):
"""
Find the resource.calendar.attendance matching
"""
for att_day in self:
week_day = fields.Date.from_string(att_day.date).weekday()
# select the calendar attendance(s) that are valid today.
current_cal_att = att_day.working_schedule_id.mapped(
'attendance_ids').filtered(
lambda a: int(a.dayofweek) == week_day)
# Specific period
att_schedule = current_cal_att.filtered(
lambda r: r.date_from is not False and
r.date_to is not False and
r.date_from <= att_day.date <= r.date_to)
# Period with only date_to or date_from
if not att_schedule:
att_schedule = current_cal_att.filtered(
lambda r:
(r.date_from <= att_day.date and not r.date_to and r.date_from) or
(r.date_to >= att_day.date and not r.date_from and r.date_to))
# Default schedule
if not att_schedule:
att_schedule = current_cal_att.filtered(
lambda r: not r.date_from and not r.date_to)
att_day.cal_att_ids = att_schedule
@api.multi
def get_related_forced_due_hours(self):
self.ensure_one()
return self.env['hr.forced.due.hours'].search([
('employee_id', '=', self.employee_id.id),
('date', '=', self.date)])
@api.multi
@api.depends('due_hours')
def _compute_has_change_day_request(self):
for att_day in self:
res = att_day.get_related_forced_due_hours()
att_day.has_change_day_request = len(res) == 1
@api.multi
@api.depends('date')
def _compute_working_day(self):
for att_day in self:
att_day.working_day = fields.Date.from_string(
att_day.date).strftime('%A').title()
@api.multi
@api.depends('working_day')
def _compute_name(self):
for rd in self:
rd.name = rd.working_day + ' ' + rd.date
@api.multi
@api.depends('leave_ids', 'public_holiday_id')
def _compute_in_leave(self):
for att_day in self:
att_day.in_leave = att_day.leave_ids or att_day.public_holiday_id
@api.multi
@api.depends('working_schedule_id', 'leave_ids', 'public_holiday_id',
'cal_att_ids.due_hours')
def _compute_due_hours(self):
"""First search the due hours based on the contract and after remove
some hours if they are public holiday or vacation"""
for att_day in self:
# Forced due hours (when an user changes work days)
forced_hours = att_day.get_related_forced_due_hours()
if forced_hours:
due_hours = forced_hours.forced_due_hours
else:
due_hours = sum(att_day.cal_att_ids.mapped('due_hours'))
# Public holidays
if att_day.public_holiday_id:
att_day.due_hours = 0
continue
# Leaves
due_hours -= att_day.get_leave_time(due_hours)
if due_hours < 0:
due_hours = 0
att_day.due_hours = due_hours
@api.multi
@api.depends('attendance_ids.worked_hours')
def _compute_total_attendance(self):
for att_day in self.filtered('attendance_ids'):
att_day.total_attendance = sum(
att_day.attendance_ids.mapped(
'worked_hours') or [0])
@api.multi
@api.depends('total_attendance', 'coefficient')
def _compute_paid_hours(self):
"""
Paid hours are the sum of the attendances minus the break time
added by the system and multiply by the coefficient.
"""
for att_day in self.filtered('attendance_ids'):
paid_hours = att_day.total_attendance
# Take only the breaks edited by the system
breaks = att_day.break_ids.filtered(
lambda r: r.system_modified and not r.is_offered)
paid_hours -= sum(breaks.mapped('additional_duration'))
att_day.paid_hours = paid_hours * att_day.coefficient
@api.multi
def _compute_free_break_hours(self):
for att_day in self:
att_day.free_breaks_hours = sum(att_day.break_ids.filtered(
'is_offered').mapped('total_duration') or [0])
@api.multi
@api.depends('attendance_ids')
def _compute_rule_id(self):
"""
To know which working rule is applied on the day, we deduce the
free break time offered from the paid hours.
"""
for att_day in self:
if att_day.paid_hours:
hours = int(att_day.paid_hours - att_day.free_breaks_hours)
else:
hours = int(att_day.due_hours - att_day.free_breaks_hours)
if hours < 0:
hours = 0
att_day.rule_id = self.env['hr.attendance.rules'].search([
('time_from', '<=', hours),
('time_to', '>', hours),
])
@api.multi
def _compute_due_break(self):
"""Calculation of the break duration due depending of
hr.attendance.rules (only for displaying it in the view)"""
for att_day in self:
if att_day.rule_id:
att_day.due_break_min = att_day.rule_id.due_break
att_day.due_break_total = att_day.rule_id.due_break_total
else:
att_day.due_break_min = 0
att_day.due_break_total = 0
@api.multi
@api.depends('break_ids', 'break_ids.total_duration')
def _compute_break_total(self):
for att_day in self:
att_day.break_total = sum(
att_day.break_ids.mapped('total_duration') or [0])
@api.multi
@api.depends('paid_hours', 'due_hours')
def _compute_day_balance(self):
for att_day in self:
att_day.day_balance = att_day.balance_computation()
def balance_computation(self):
self.ensure_one()
sick_leave = self.env.ref('hr_holidays.holiday_status_sl')
if sick_leave in self.leave_ids. \
filtered(lambda r: r.state == 'validate'). \
mapped('holiday_status_id'):
return 0
else:
return self.paid_hours - self.due_hours
@api.multi
def validate_extend_breaks(self):
"""
This will extend the break time based on the break attendance rules
of the day. The paid hours will be recomputed after that.
"""
def extend_longest_break(extension_duration):
# Extend the break duration
att_breaks = att_day.break_ids.filtered(
lambda r: not r.is_offered)
if att_breaks:
att_break = att_breaks.sorted('total_duration')[-1]
# if not exist create a new one
else:
att_break = self.env['hr.attendance.break'].create({
'employee_id': att_day.employee_id.id,
'attendance_day_id': att_day.id
})
att_break.write({
'additional_duration': extension_duration
})
def compute_break_time_to_add(rule):
breaks_total = sum(
att_day.break_ids.mapped('total_duration') or [0])
due_break_total = rule["due_break_total"]
due_break_min_length = rule["due_break"]
time_to_add = 0
break_max = max(
att_day.break_ids.mapped('total_duration') or [0])
if break_max < due_break_min_length:
# We want to extend an non-offered break to at least the
# minimum value.
break_max_non_offered = max(att_day.break_ids.filtered(
lambda b: not b.is_offered).mapped(
'total_duration') or [0])
time_to_add += due_break_min_length - break_max_non_offered
breaks_total += time_to_add
if breaks_total < due_break_total:
time_to_add += due_break_total - breaks_total
return time_to_add
for att_day in self:
logged_hours = att_day.total_attendance - att_day.free_breaks_hours
rule = self.env['hr.attendance.rules'].search([
('time_to', '>', logged_hours),
'|', ('time_from', '<=', logged_hours),
('time_from', '=', False),
])
time_to_add = compute_break_time_to_add(rule)
if time_to_add != 0:
# Ensure we don't fall under another working rule when removing
# hours from that day
new_logged_hours = logged_hours - time_to_add
new_rule = self.env['hr.attendance.rules'].search([
('time_to', '>', new_logged_hours),
'|', ('time_from', '<=', new_logged_hours),
('time_from', '=', False),
])
if new_rule != rule:
time_to_add = compute_break_time_to_add(new_rule)
time_to_add = max(time_to_add, logged_hours -
new_rule.time_to)
if time_to_add != 0:
extend_longest_break(time_to_add)
##########################################################################
# ORM METHODS #
##########################################################################
@api.model
def create(self, vals):
rd = super(HrAttendanceDay, self).create(vals)
att_date = fields.Date.from_string(rd.date)
# link to leaves (hr.holidays )
date_str = fields.Date.to_string(att_date)
rd.leave_ids = self.env['hr.holidays'].search([
('employee_id', '=', rd.employee_id.id),
('type', '=', 'remove'),
('date_from', '<=', date_str),
('date_to', '>=', date_str)])
# find coefficient
week_day = att_date.weekday()
co_ids = self.env['hr.weekday.coefficient'].search([
('day_of_week', '=', week_day)]).filtered(
lambda r: r.category_ids & rd.employee_id.category_ids)
rd.coefficient = co_ids[0].coefficient if co_ids else 1
# check public holiday
if self.env['hr.holidays.public'].is_public_holiday(
rd.date, rd.employee_id.id):
holidays_lines = self.env['hr.holidays.public'].get_holidays_list(
att_date.year, rd.employee_id.id)
rd.public_holiday_id = holidays_lines.filtered(
lambda r: r.date == rd.date)
# find related attendance
rd.attendance_ids = self.env['hr.attendance'].search([
('employee_id', '=', rd.employee_id.id),
('date', '=', rd.date),
])
for leave in rd.leave_ids:
leave._compute_att_day()
# compute breaks
rd.compute_breaks()
rd.recompute_period_if_old_day()
return rd
@api.multi
def write(self, vals):
res = super(HrAttendanceDay, self).write(vals)
if 'paid_hours' in vals:
self.recompute_period_if_old_day()
return res
##########################################################################
# PUBLIC METHODS #
##########################################################################
@api.multi
def recompute_period_if_old_day(self):
for day in self:
employee_periods = day.employee_id.period_ids
period_of_day = employee_periods.search([
('start_date', '<=', day.date),
('end_date', '>=', day.date)
], limit=1)
if period_of_day:
period_of_day.update_period()
self.employee_id._compute_balance()
@api.multi
def open_attendance_day(self):
""" Used to bypass opening a attendance in popup mode"""
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'name': 'Attendance day',
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'res_id': self.id,
'target': 'current',
}
def get_leave_time(self, due_hours):
"""
Compute leave duration for the day.
:return: deduction to due hours (in hours)
:rtype: float [0:24]
"""
deduction = 0
for leave in self.leave_ids:
if leave.state != 'validate' or \
leave.holiday_status_id.keep_due_hours:
continue
else:
utc_start = fields.Datetime.from_string(leave.date_from)
utc_end = fields.Datetime.from_string(leave.date_to)
# Convert UTC in local timezone
user_tz = self.employee_id.user_id.tz
if not user_tz:
user_tz = u'UTC'
local = pytz.timezone(user_tz)
utc = pytz.timezone('UTC')
local_start = utc.localize(utc_start).astimezone(local)
local_end = utc.localize(utc_end).astimezone(local)
leave_start_date = local_start.date()
leave_end_date = local_end.date()
date = fields.Date.from_string(self.date)
full_day = due_hours
if leave_start_date < date < leave_end_date:
deduction += full_day
elif date == leave_start_date:
# convert time in float
start = local_start.hour + local_start.minute / 60.
for att in self.cal_att_ids:
if att.hour_from <= start < att.hour_to:
deduction += att.hour_to - start
elif start < att.hour_from:
deduction += att.due_hours
elif date == leave_end_date:
# convert time in float
end = local_end.hour + local_end.minute / 60.
for att in self.cal_att_ids:
if att.hour_from < end <= att.hour_to:
deduction += end - att.hour_from
elif end > att.hour_to:
deduction += att.due_hours
else:
_logger.error(
"This day doesn't correspond to this leave"
)
return deduction
@api.multi
def compute_breaks(self):
"""
Given the attendance of the employee, check the break time rules
and compute the break time of the day. This will then trigger the
computation of the paid hours for the day
(total attendance - additional break time added)
:return: None
"""
att_day_ids = self.filtered('attendance_ids')
att_day_ids.mapped('break_ids').unlink()
for att_day in att_day_ids:
# add the offered break
free_break = self.env['base.config.settings'].get_free_break()
if free_break > 0:
self.env['hr.attendance.break'].create({
'employee_id': att_day.employee_id.id,
'attendance_day_id': att_day.id,
'is_offered': True,
'additional_duration': free_break
})
att_ids = att_day.attendance_ids
iter_att = iter(att_ids.sorted(key=lambda r: r.check_in))
previous_att = iter_att.next()
while True:
try:
attendance = iter_att.next()
self.env['hr.attendance.break'].create(
{
'employee_id': att_day.employee_id.id,
'attendance_day_id': att_day.id,
'previous_attendance': previous_att.id,
'next_attendance': attendance.id,
})
previous_att = attendance
except StopIteration:
break
# Extend the break time if needed
att_day.validate_extend_breaks()
self._compute_paid_hours()
@api.multi
def recompute_due_hours(self):
self._compute_total_attendance()
self._compute_due_hours()
self._compute_paid_hours()
|
eicher31/compassion-modules
|
hr_attendance_management/models/hr_attendance_day.py
|
Python
|
agpl-3.0
| 22,442
|
# Generated by Django 2.0.4 on 2019-07-16 21:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0009_auto_20190607_1518'),
]
operations = [
migrations.RenameField(
model_name='smpost',
old_name='post_instagram',
new_name='post_newsletter',
),
]
|
daily-bruin/meow
|
meow/scheduler/migrations/0010_auto_20190716_1418.py
|
Python
|
agpl-3.0
| 382
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import res_company
# WARNING: Order of imports matters on this module, so don't put res_company
# below the other modules since it will lead to a missing column error when
# the module is initialized for the first time since there are fields with
# default values wich refer to this new res.company field.
from . import event
from . import event_mail
from . import event_type
from . import res_config_settings
|
OCA/event
|
event_mail/models/__init__.py
|
Python
|
agpl-3.0
| 482
|
#!/usr/bin/env python3
# -*-coding:UTF-8 -*
import os
import re
import sys
import time
import redis
import datetime
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'packages'))
import Item
import Term
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/'))
import ConfigLoader
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
if __name__ == '__main__':
start_deb = time.time()
config_loader = ConfigLoader.ConfigLoader()
r_serv_term_stats = config_loader.get_redis_conn("ARDB_Trending")
r_serv_termfreq = config_loader.get_redis_conn("ARDB_TermFreq")
config_loader = None
r_serv_term_stats.flushdb()
#convert all regex:
all_regex = r_serv_termfreq.smembers('TrackedRegexSet')
for regex in all_regex:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(regex)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(regex)) )
new_term = regex[1:-1]
res = Term.parse_json_term_to_add({"term": new_term, "type": 'regex', "tags": tags, "mails": mails, "level": 1}, 'admin@admin.test')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('regex_{}'.format(regex))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Regex Removed: {}'.format(regex))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedRegexSet', regex)
all_tokens = r_serv_termfreq.smembers('TrackedSetTermSet')
for token in all_tokens:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(token)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(token)) )
res = Term.parse_json_term_to_add({"term": token, "type": 'word', "tags": tags, "mails": mails, "level": 1}, 'admin@admin.test')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('tracked_{}'.format(token))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Token Removed: {}'.format(token))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedSetTermSet', token)
all_set = r_serv_termfreq.smembers('TrackedSetSet')
for curr_set in all_set:
tags = list( r_serv_termfreq.smembers('TrackedNotificationTags_{}'.format(curr_set)) )
mails = list( r_serv_termfreq.smembers('TrackedNotificationEmails_{}'.format(curr_set)) )
to_remove = ',{}'.format(curr_set.split(',')[-1])
new_set = rreplace(curr_set, to_remove, '', 1)
new_set = new_set[2:]
new_set = new_set.replace(',', '')
res = Term.parse_json_term_to_add({"term": new_set, "type": 'set', "nb_words": 1, "tags": tags, "mails": mails, "level": 1}, 'admin@admin.test')
if res[1] == 200:
term_uuid = res[0]['uuid']
list_items = r_serv_termfreq.smembers('tracked_{}'.format(curr_set))
for paste_item in list_items:
item_id = Item.get_item_id(paste_item)
item_date = Item.get_item_date(item_id)
Term.add_tracked_item(term_uuid, item_id, item_date)
# Invalid Tracker => remove it
else:
print('Invalid Set Removed: {}'.format(curr_set))
print(res[0])
# allow reprocess
r_serv_termfreq.srem('TrackedSetSet', curr_set)
r_serv_termfreq.flushdb()
#Set current ail version
r_serv.set('ail:version', 'v2.2')
#Set current ail version
r_serv.hset('ail:update_date', 'v2.2', datetime.datetime.now().strftime("%Y%m%d"))
|
CIRCL/AIL-framework
|
update/v2.2/Update.py
|
Python
|
agpl-3.0
| 4,172
|
#Constants
PROD = 'prod'
LOCAL = 'local'
NOPI = 'nopi'
#Set configs here
ENV = PROD
loggingEnabled = True
|
tonitran/dvc-embedded
|
runConfigs.py
|
Python
|
agpl-3.0
| 107
|
#!/usr/bin/env python
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
'''
pipeline = [
{"$unwind" : "$entities.hashtags"},
{"$group" : {"_id" : "$user.screen_name",
"unique_hashtags" : {
"$addToSet" : "$entities.hashtags.text"
}}},
{"$sort" : {"_id" : -1}}]
'''
pipeline = [
{
"$group" :
{
"_id" : "$user.screen_name",
"tweet_texts" :{"$push" : "$text"},
"count" : {"$sum" : 1}
}
},
{
"$sort" :
{
"count" : -1
}
},
{
"$limit" : 5
}
]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
assert len(result["result"]) == 5
assert result["result"][0]["count"] > result["result"][4]["count"]
import pprint
pprint.pprint(result)
|
dvu4/Data-Wrangling-with-MongoDB
|
Lesson_5_Analyzing_Data/14-Using_push/push.py
|
Python
|
agpl-3.0
| 2,726
|
# -*- coding: utf-8 *-*
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import requests
import os
from optparse import make_option
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from django.conf import settings
from wger.exercises.models import Exercise, ExerciseImage
class Command(BaseCommand):
'''
Download exercise images from wger.de and updates the local database
The script assumes that the local IDs correspond to the remote ones, which
is the case if the user installed the exercises from the JSON fixtures.
Otherwise, the exercise is simply skipped
'''
option_list = BaseCommand.option_list + (
make_option('--remote-url',
action='store',
dest='remote_url',
default='https://wger.de',
help='Remote URL to fetch the exercises from (default: https://wger.de)'),
)
help = ('Download exercise images from wger.de and update the local database\n'
'\n'
'ATTENTION: The script will download the images from the server and add them\n'
' to your local exercises. The exercises are identified by\n'
' their UUID field, if you manually edited or changed it\n'
' the script will not be able to match them.')
def handle(self, *args, **options):
if not settings.MEDIA_ROOT:
raise ImproperlyConfigured('Please set MEDIA_ROOT in your settings file')
remote_url = options['remote_url']
try:
val = URLValidator()
val(remote_url)
except ValidationError:
raise CommandError('Please enter a valid URL')
exercise_api = "{0}/api/v2/exercise/?limit=999"
image_api = "{0}/api/v2/exerciseimage/?exercise={1}"
thumbnail_api = "{0}/api/v2/exerciseimage/{1}/thumbnails/"
# Get all exercises
result = requests.get(exercise_api.format(remote_url)).json()
for exercise_json in result['results']:
exercise_name = exercise_json['name'].encode('utf-8')
exercise_uuid = exercise_json['uuid']
exercise_id = exercise_json['id']
self.stdout.write('')
self.stdout.write(u"*** Processing {0} (ID: {1}, UUID: {2})".format(exercise_name,
exercise_id,
exercise_uuid))
try:
exercise = Exercise.objects.get(uuid=exercise_uuid)
except Exercise.DoesNotExist:
self.stdout.write(' Remote exercise not found in local DB, skipping...')
continue
# Get all images
images = requests.get(image_api.format(remote_url, exercise_id)).json()
if images['count']:
for image_json in images['results']:
image_id = image_json['id']
result = requests.get(thumbnail_api.format(remote_url, image_id)).json()
image_name = os.path.basename(result['original'])
self.stdout.write(' Fetching image {0} - {1}'.format(image_id, image_name))
try:
image = ExerciseImage.objects.get(pk=image_id)
self.stdout.write(' --> Image already present locally, skipping...')
continue
except ExerciseImage.DoesNotExist:
self.stdout.write(' --> Image not found in local DB, creating now...')
image = ExerciseImage()
image.pk = image_id
# Save the downloaded image, see link for details
# http://stackoverflow.com/questions/1308386/programmatically-saving-image-to-
retrieved_image = requests.get(result['original'])
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(retrieved_image.content)
img_temp.flush()
image.exercise = exercise
image.is_main = image_json['is_main']
image.status = image_json['status']
image.image.save(
os.path.basename(image_name),
File(img_temp),
)
image.save()
else:
self.stdout.write(' No images for this exercise, nothing to do')
|
DeveloperMal/wger
|
wger/exercises/management/commands/download-exercise-images.py
|
Python
|
agpl-3.0
| 5,449
|
from django.utils.translation import ugettext_lazy as _
from keops.db import models
STATUS = (
('draft', _('Draft')),
('open', _('In Progress')),
('pending', _('Pending')),
('done', _('Done')),
('cancelled', _('Cancelled'))
)
class Category(models.Model):
name = models.CharField(null=False, unique=True)
class TaskType(models.Model):
name = models.CharField(unique=True, null=False)
description = models.TextField()
status = models.CharField(max_length=16, choices=STATUS)
class Meta:
db_table = 'project_task_type'
class Project(models.Model):
manager = models.ForeignKey('base.User')
class Task(models.Model):
name = models.CharField(max_length=128, db_index=True)
description = models.TextField()
status = models.CharField(max_length=16, choices=STATUS)
|
mrmuxl/keops
|
keops/modules/project/models.py
|
Python
|
agpl-3.0
| 832
|
# -*- encoding: utf-8 -*-
from openerp.osv import fields
from openerp.osv import osv
import lasso
import simplejson
class auth_saml_provider(osv.osv):
"""Class defining the configuration values of an Saml2 provider"""
_name = 'auth.saml.provider'
_description = 'SAML2 provider'
_order = 'name'
def _get_lasso_for_provider(self, cr, uid, provider_id, context=None):
"""internal helper to get a configured lasso.Login object for the
given provider id"""
provider = self.browse(cr, uid, provider_id, context=context)
# TODO: we should cache those results somewhere because it is
# really costly to always recreate a login variable from buffers
server = lasso.Server.newFromBuffers(
provider.sp_metadata,
provider.sp_pkey
)
server.addProviderFromBuffer(
lasso.PROVIDER_ROLE_IDP,
provider.idp_metadata
)
return lasso.Login(server)
def _get_matching_attr_for_provider(
self, cr, uid, provider_id, context=None
):
"""internal helper to fetch the matching attribute for this SAML
provider. Returns a unicode object.
"""
provider = self.browse(cr, uid, provider_id, context=context)
return provider.matching_attribute
def _get_auth_request(self, cr, uid, id_, state, context=None):
"""build an authentication request and give it back to our client
WARNING: this method cannot be used for multiple ids
"""
login = self._get_lasso_for_provider(cr, uid, id_, context=context)
# ! -- this is the part that MUST be performed on each call and
# cannot be cached
login.initAuthnRequest()
login.request.nameIdPolicy.format = None
login.request.nameIdPolicy.allowCreate = True
login.msgRelayState = simplejson.dumps(state)
login.buildAuthnRequestMsg()
# msgUrl is a fully encoded url ready for redirect use
# obtained after the buildAuthnRequestMsg() call
return login.msgUrl
_columns = {
# Name of the OAuth2 entity, authentic, xcg...
'name': fields.char('Provider name'),
'idp_metadata': fields.text('IDP Configuration'),
'sp_metadata': fields.text('SP Configuration'),
'sp_pkey': fields.text(
'Private key of our service provider (this openerpserver)'
),
'matching_attribute': fields.text('Matching Attribute', required=True),
'enabled': fields.boolean('Enabled'),
'css_class': fields.char('CSS class'),
'body': fields.char(
'Body',
required=True,
),
'sequence': fields.integer(),
}
_defaults = {
'enabled': False,
'matching_attribute': "subject.nameId",
'css_class': 'zocial saml',
'body': 'Authentic',
}
|
xcgd/auth_saml
|
model/auth_saml.py
|
Python
|
agpl-3.0
| 2,907
|
"""
Forms for the bug tracker app.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from apps.txtrender.forms import MarkupCharField
from apps.contentreport.forms import ContentReportCreationForm
from apps.tools.http_utils import get_client_ip_address
from .models import (IssueTicket,
IssueTicketSubscription,
IssueComment,
BugTrackerUserProfile)
from .notifications import (notify_of_new_comment,
notify_of_new_issue)
class IssueTicketCreationForm(forms.Form):
"""
``IssueTicket`` creation form for registered users only.
"""
title = forms.CharField(widget=forms.TextInput(),
max_length=255,
label=_('Title'))
description = MarkupCharField(label=_('Problem description'))
notify_of_reply = forms.BooleanField(widget=forms.CheckboxInput(),
label=_('Notify me of new reply'),
required=False)
def save(self, request, submitter):
"""
Save the form by creating a new ``IssueTicket``.
:param request: The current request.
:param submitter: The ticket's submitter.
:return The newly created ticket.
"""
new_obj = IssueTicket.objects.create(title=self.cleaned_data['title'],
description=self.cleaned_data['description'],
submitter=submitter,
submitter_ip_address=get_client_ip_address(request))
# Add subscriber if necessary
if self.cleaned_data['notify_of_reply']:
IssueTicketSubscription.objects.subscribe_to_issue(submitter, new_obj)
# Notify subscribers
notify_of_new_issue(new_obj, request, submitter)
# Return the newly created object
return new_obj
class IssueTicketEditionForm(forms.ModelForm):
"""
``IssueTicket`` edition form for registered users only.
"""
class Meta:
model = IssueTicket
fields = ('title',
'description')
class IssueCommentCreationForm(forms.Form):
"""
``IssueComment`` creation form for registered users only.
"""
comment_body = MarkupCharField(label=_('Comment text'))
notify_of_reply = forms.BooleanField(widget=forms.CheckboxInput(),
label=_('Notify me of new reply'),
required=False)
def save(self, request, issue, author):
"""
Save the form by creating a new ``IssueComment`` for the given ``IssueTicket``.
Drop a success flash message after saving.
:param request: The current request.
:param issue: The related issue instance.
:param author: The author of this comment.
"""
new_obj = IssueComment.objects.create(issue=issue,
author=author,
body=self.cleaned_data['comment_body'],
author_ip_address=get_client_ip_address(request))
# Add subscriber if necessary
if self.cleaned_data['notify_of_reply']:
IssueTicketSubscription.objects.subscribe_to_issue(author, new_obj.issue)
else:
IssueTicketSubscription.objects.unsubscribe_from_issue(author, new_obj.issue)
# Notify subscribers
notify_of_new_comment(issue, new_obj, request, author)
# Return the newly created object
return new_obj
class IssueCommentReportCreationForm(ContentReportCreationForm):
"""
``IssueCommentReport`` creation form for registered users only.
"""
def get_extra_notification_kwargs(self):
"""
Return extra arguments for the notification template.
"""
return {
'content_object_name': 'comment',
'title_template_name': "bugtracker/issue_comment_report_subject.txt",
'message_template_name': "bugtracker/issue_comment_report_body.txt",
'message_template_name_html': "bugtracker/issue_comment_report_body.html",
}
class BugTrackerProfileModificationForm(forms.ModelForm):
"""
Bug tracker user's account modification form.
"""
class Meta:
model = BugTrackerUserProfile
fields = ('notify_of_new_issue',
'notify_of_reply_by_default')
|
TamiaLab/carnetdumaker
|
apps/bugtracker/forms.py
|
Python
|
agpl-3.0
| 4,597
|
# -*- coding: utf-8 -*-
# © 2017 Comunitea
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
from datetime import timedelta
from pytz import timezone
from odoo.addons import decimal_precision as dp
class StockPicking(models.Model):
_inherit = 'stock.picking'
neutral_document = fields.Boolean('Neutral Document',
related='sale_id.neutral_document')
operator = fields.Char('Operator')
same_day_delivery = fields.Boolean(compute='_compute_same_day_delivery')
delivery_date = fields.Char(compute='_compute_delivery_date')
delivery_amount = fields.Monetary(compute='_compute_delivery_amount')
global_discount_amount = fields.Monetary(
compute='_compute_global_discount_amount')
min_date_date = fields.Date(compute='_compute_min_date_date')
date_done_date = fields.Date(compute='_compute_min_date_date')
sale_services = fields.Many2many(
'sale.order.line', 'stock_picking_sale_order_line_services_rel',
'picking_id', 'sale_id', compute='_compute_sale_services')
purchase_currency_id = fields.Many2one(
related='purchase_id.currency_id',
string='Currency')
@api.depends('min_date')
def _compute_min_date_date(self):
for pick in self:
pick.min_date_date = pick.min_date and \
pick.min_date.split(' ')[0] or False
if pick.date_done:
pick.date_done_date = pick.date_done.split(' ')[0]
else:
pick.date_done_date = pick.min_date_date
# Si el albaran se finalizó antes de las 17:30 entre semana se envía el
# mismo día.
def _compute_same_day_delivery(self):
for pick in self:
if pick.date_done:
same_day_delivery = True
date_done = fields.Datetime.from_string(pick.date_done)\
.replace(tzinfo=timezone('Etc/UTC'))\
.astimezone(timezone(pick._context.get('tz', 'Etc/UTC')))
if date_done.hour > 17 or \
(date_done.hour == 17 and date_done.minute > 30) or \
date_done.isoweekday() in (6, 7):
same_day_delivery = False
pick.same_day_delivery = same_day_delivery
def _compute_delivery_date(self):
# Si no se envía el mismo día se comprueba que el día de envío no
# sea ni sabado ni domingo
for pick in self:
if pick.date_done:
if pick.same_day_delivery:
pick.delivery_date = pick.date_done
else:
date_done = fields.Datetime.from_string(pick.date_done)
next_date = date_done + timedelta(days=1)
delivery_date = next_date
if next_date.isoweekday() == 6:
delivery_date = next_date + timedelta(days=2)
elif next_date.isoweekday() == 7:
delivery_date = next_date + timedelta(days=1)
pick.delivery_date = delivery_date
@api.multi
def _compute_delivery_amount(self):
for picking in self:
delivery_line = picking.sale_id.order_line.filtered(
lambda x: x.product_id.delivery_cost)
if delivery_line:
picking.delivery_amount = delivery_line[0].price_subtotal
else:
picking.delivery_amount = 0.0
@api.multi
def _compute_global_discount_amount(self):
for picking in self:
global_discount_lines = picking.sale_id.order_line.filtered(
lambda x: x.promotion_line)
ep_disc = picking.sale_id.total_early_discount
if global_discount_lines or ep_disc:
picking.global_discount_amount = sum(
global_discount_lines.mapped('price_subtotal')) + ep_disc
else:
picking.global_discount_amount = 0.0
@api.multi
def _compute_amount_all(self):
res = super(StockPicking, self)._compute_amount_all()
for pick in self:
if pick.sale_id:
delivery_line = pick.sale_id.order_line.filtered(
lambda x: x.product_id.delivery_cost)
global_discount_lines = pick.sale_id.order_line.filtered(
lambda x: x.promotion_line)
if delivery_line:
amount_untaxed = sum(pick.pack_operation_ids.mapped(
'sale_price_subtotal')) + \
delivery_line[0].price_subtotal + \
sum(global_discount_lines.mapped('price_subtotal')) + \
sum(pick.sale_services.mapped('price_subtotal'))
amount_tax = sum(pick.pack_operation_ids.mapped(
'sale_price_tax')) + delivery_line[0].price_tax + \
sum(global_discount_lines.mapped('price_tax')) + \
sum(pick.sale_services.mapped('price_tax'))
pick.update({
'amount_untaxed': amount_untaxed,
'amount_tax': amount_tax,
'amount_total': amount_untaxed + amount_tax,
})
else:
amount_untaxed = sum(pick.pack_operation_ids.mapped(
'sale_price_subtotal')) + \
sum(global_discount_lines.mapped('price_subtotal')) + \
sum(pick.sale_services.mapped('price_subtotal'))
amount_tax = sum(pick.pack_operation_ids.mapped(
'sale_price_tax')) + \
sum(global_discount_lines.mapped('price_tax')) + \
sum(pick.sale_services.mapped('price_tax'))
pick.update({
'amount_untaxed': amount_untaxed,
'amount_tax': amount_tax,
'amount_total': amount_untaxed + amount_tax,
})
elif pick.purchase_id:
amount_tax = sum(pick.pack_operation_ids.mapped(
'purchase_price_tax'))
amount_total = sum(pick.pack_operation_ids.mapped(
'purchase_price_total'))
val = {
'amount_untaxed': amount_total - amount_tax,
'amount_tax': amount_tax,
'amount_total': amount_total,
}
pick.update(val)
return res
@api.depends('sale_id')
def _compute_sale_services(self):
for picking in self:
picking.sale_services = picking.sale_id.order_line.filtered(
lambda x: x.product_id.type == 'service' and not
x.product_id.delivery_cost)
@api.multi
def action_open_purchases_valued_ops(self):
action = self.env.ref(
'custom_documents.action_open_view_valued_stock_pack_op_tree').read()[0]
action['domain'] = [('id', 'in', self.pack_operation_ids.ids)]
action['context'] = {
'default_picking_id': self.id,
}
return action
class StockMove(models.Model):
_inherit = 'stock.move'
name_report = fields.Char(compute='_compute_name_report')
@api.multi
def _compute_name_report(self):
for line in self:
name_report = line.name
if '[%s]' % line.product_id.default_code in line.name:
name_report = line.name.replace(
'[%s]' % line.product_id.default_code, '')
line.name_report = name_report
@api.onchange('product_id')
def onchange_product_id(self):
"""Se hereda el onchange para establecer correctamente el nombre"""
res = super(StockMove, self).onchange_product_id()
product = self.product_id.with_context(lang=self.partner_id.lang or self.env.user.lang)
if product:
self.name = product.name_get()[0][1]
return res
class StockPackOperation(models.Model):
_inherit = "stock.pack.operation"
purchase_line = fields.Many2one(
comodel_name='purchase.order.line',
compute='_compute_purchase_order_line_fields',
string="Related order line")
purchase_tax_id = fields.Many2many(
comodel_name='account.tax',
compute='_compute_purchase_order_line_fields',
string="Taxes")
purchase_tax_description = fields.Char(
compute='_compute_purchase_order_line_fields',
string='Tax Description')
purchase_price_unit = fields.Float(
compute='_compute_purchase_order_line_fields',
digits=dp.get_precision('Product Price'),
string="purchase price unit")
purchase_discount = fields.Float(
compute='_compute_purchase_order_line_fields',
digits=dp.get_precision('Discount'),
string="purchase discount (%)")
purchase_price_subtotal = fields.Float(
compute='_compute_purchase_order_line_fields',
string="Price subtotal")
purchase_price_tax = fields.Float(
compute='_compute_purchase_order_line_fields',
string='Taxes')
purchase_price_total = fields.Float(
compute='_compute_purchase_order_line_fields',
string='Total')
qty_delivered = fields.Float('Delivered qty', default=0.0, digits=dp.get_precision('Product Unit of Measure'), compute="_get_qty_delivered")
@api.multi
def _get_qty_delivered(self):
for operation in self:
operation.qty_delivered = operation.qty_done or operation.product_qty
@api.multi
def _compute_purchase_order_line_fields(self):
for operation in self:
purchase_lines = operation.mapped(
'linked_move_operation_ids.move_id.purchase_line_id')
operation.update(operation.purchase_lines_values(purchase_lines))
@api.multi
def purchase_lines_values(self, purchase_lines):
if len(purchase_lines) <= 1:
price_unit = purchase_lines.price_unit
discount = 0.00
else:
price_unit = purchase_lines[0].price_unit
discount = 0.00
purchase_line = purchase_lines[:1]
purchase_tax = purchase_line.taxes_id
taxes = purchase_tax.compute_all(
price_unit=price_unit,
currency=purchase_line.currency_id,
quantity=self.qty_delivered,
product=purchase_line.product_id,
partner=purchase_line.order_id.partner_id)
if purchase_line.company_id.tax_calculation_rounding_method == (
'round_globally'):
price_tax = sum(
t.get('amount', 0.0) for t in taxes.get('taxes', []))
else:
price_tax = taxes['total_included'] - taxes['total_excluded']
val = {
'purchase_line': purchase_line,
'purchase_tax_id': purchase_tax,
'purchase_tax_description': ', '.join(map(lambda x: (
x.description or x.name), purchase_tax)),
'purchase_price_unit': price_unit,
'purchase_discount': discount,
'purchase_price_subtotal': taxes['total_excluded'],
'purchase_price_tax': price_tax,
'purchase_price_total': taxes['total_included'],
}
return val
|
Comunitea/CMNT_00098_2017_JIM_addons
|
custom_documents/models/stock_picking.py
|
Python
|
agpl-3.0
| 11,479
|
#encoding=utf-8
from django.utils.safestring import mark_safe
from django.db import models
from django.utils.six import python_2_unicode_compatible
from mptt.models import MPTTModel
from mptt.fields import TreeForeignKey, TreeManyToManyField
from datetime import date, timedelta
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import gettext_lazy as __
from decimal import Decimal
from itertools import chain
# Create your models here.
a_strG = "<a onclick='return showRelatedObjectLookupPopup(this);' href='/admin/general/"
a_strW = "<a onclick='return showRelatedObjectLookupPopup(this);' href='/admin/Welcome/"
#a_str2 = "?_popup=1&_changelist_filters=_popup=1&t=human' target='_blank' style='margin-left:-100px'>"
a_str2 = "?_popup=1&t=human' target='_blank' >"
a_str3 = "?_popup=1&t=human' target='_blank'>"
a_edit = '<b>Edit</b>'
ul_tag1 = '<ul style="margin-left:-10em;">'
ul_tag = '<ul>'
str_none = __('(none)')
str_remove = 'erase'
def erase_id_link(field, id):
out = '<a class="erase_id_on_box" name="'+str(field)+','+str(id)+'" href="javascript:;">'+str_remove+'</a>'
print(out)
return out
# C O N C E P T S - (Concepts, Ideas...)
@python_2_unicode_compatible
class Concept(MPTTModel): # Abstract
name = models.CharField(unique=True, verbose_name=_("Name"), max_length=200, help_text=_("The name of the Concept"), default="")
description = models.TextField(blank=True, verbose_name=_("Description"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', on_delete=models.CASCADE)
def __str__(self):
return self.name
class Meta:
abstract = True
verbose_name = _("Concept")
verbose_name_plural = _("c- Concepts")
@python_2_unicode_compatible
class Type(Concept): # Create own ID's (TREE)
#concept = models.OneToOneField('Concept', primary_key=True, parent_link=True, on_delete=models.CASCADE)
clas = models.CharField(blank=True, verbose_name=_("Class"), max_length=200,
help_text=_("Django model or python class associated to the Type"))
#types = TreeManyToManyField('self', through='rel_Type_Types', verbose_name=_(u"Related Types"), blank=True)
class Meta:
verbose_name = _("c- Type")
#verbose_name_plural = _(u"c- Types")
def __str__(self):
if self.clas is None or self.clas == '':
return self.name
else:
return self.name+' ('+self.clas+')'
def save(self, *args, **kwargs):
if not self.name_ca:
print("save: name_ca:"+self.name_en)
self.name_ca = self.name_en
if not self.name_es:
print("save: name_es:"+self.name_en)
self.name_es = self.name_en
super(Type, self).save(*args, **kwargs)
"""
class rel_Type_Types(models.Model):
typ = TreeForeignKey('Type', on_delete=models.CASCADE)
typ2 = TreeForeignKey('Type', verbose_name=_(u"related Type"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='ty_typ+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"T_type")
verbose_name_plural = _(u"Types related the Type")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.typ2.__str__()
else:
return self.relation.gerund+' > '+self.typ2.__str__()
"""
# B E I N G S - (Éssers, Entitats, Projectes...)
"""
@python_2_unicode_compatible
class Being(models.Model): # Abstract
name = models.CharField(verbose_name=_(u"Name"), max_length=200, help_text=_(u"The name of the Entity"))
#being_type = TreeForeignKey('Being_Type', blank=True, null=True, verbose_name="Type of entity", on_delete=models.SET_NULL)
birth_date = models.DateField(blank=True, null=True, verbose_name=_(u"Born date"), help_text=_(u"The day of starting existence"))
death_date = models.DateField(blank=True, null=True, verbose_name=_(u"Die date"), help_text=_(u"The day of ceasing existence"))
class Meta:
abstract = True
def __str__(self):
return self.name.encode("utf-8")
"""
class Being_Type(Type):
typ = models.OneToOneField('Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name= _("Type of entity")
verbose_name_plural = _("e--> Types of entities")
"""
@python_2_unicode_compatible
class Human(Being): # Create own ID's
nickname = models.CharField(max_length=50, blank=True, verbose_name=_(u"Nickname"), help_text=_(u"The nickname most used of the human entity"))
email = models.EmailField(max_length=100, blank=True, verbose_name=_(u"Email"), help_text=_(u"The main email address of the human entity"))
telephone_cell = models.CharField(max_length=20, blank=True, verbose_name=_(u"Mobile phone"), help_text=_(u"The main telephone of the human entity"))
telephone_land = models.CharField(max_length=20, blank=True, verbose_name=_(u"Land phone"))
website = models.CharField(max_length=100, blank=True, verbose_name=_(u"Web"), help_text=_(u"The main web url of the human entity"))
description = models.TextField(blank=True, null=True, verbose_name=_(u"Entity description"))
jobs = TreeManyToManyField('Job', through='rel_Human_Jobs', verbose_name=_(u"Activities, Jobs, Skills"), blank=True)
addresses = models.ManyToManyField('Address', through='rel_Human_Addresses', verbose_name=_(u"Addresses"), blank=True)
regions = models.ManyToManyField('Region', through='rel_Human_Regions', verbose_name=_(u"Regions"), blank=True)
records = models.ManyToManyField('Record', through='rel_Human_Records', verbose_name=_(u"Records"), blank=True)
materials = models.ManyToManyField('Material', through='rel_Human_Materials', verbose_name=_(u"Material artworks"), blank=True)
nonmaterials = models.ManyToManyField('Nonmaterial', through='rel_Human_Nonmaterials', verbose_name=_(u"Non-material artworks"), blank=True)
persons = models.ManyToManyField('Person', through='rel_Human_Persons', related_name='hum_persons', verbose_name=_(u"Persons"), blank=True)
projects = models.ManyToManyField('Project', through='rel_Human_Projects', related_name='hum_projects', verbose_name=_(u"Projects"), blank=True)
companies = models.ManyToManyField('Company', through='rel_Human_Companies', related_name='hum_companies', verbose_name=_(u"Companies"), blank=True)
class Meta:
verbose_name = _(u"Human")
verbose_name_plural = _(u"e- Humans")
def __str__(self):
if self.nickname is None or self.nickname == '':
return self.name
else:
return self.nickname+' ('+self.name+')'
def _my_accounts(self):
return list(chain(self.accountsCes.all(), self.accountsCrypto.all(), self.accountsBank.all()))
#_my_accounts.list = []
accounts = property(_my_accounts)
def _selflink(self):
if self.id:
if hasattr(self, 'person'):
return mark_safe( a_strG + "person/" + str(self.person.id) + a_str2 + a_edit + "</a>") # % str(self.id))
elif hasattr(self, 'project'):
return mark_safe( a_strG + "project/" + str(self.project.id) + a_str2 + a_edit + "</a>")# % str(self.id) )
else:
return "Not present"
_selflink.allow_tags = True
_selflink.short_description = ''
self_link = property (_selflink)
def _ic_membership(self):
try:
#print(self.ic_membership_set.all())
if hasattr(self, 'ic_person_membership_set'):
ic_ms = self.ic_person_membership_set.all()
out = ul_tag
for ms in ic_ms:
out += '<li>'+a_strW + "ic_person_membership/" + str(ms.id) + a_str3 + '<b>'+ms.name +"</b></a></li>"
return out+'</ul>'
elif hasattr(self, 'ic_project_membership_set'):
ic_ms = self.ic_project_membership_set.all()
out = ul_tag
for ms in ic_ms:
out += '<li>'+a_strW + "ic_project_membership/" + str(ms.id) + a_str3 + '<b>'+ms.name +"</b></a></li>"
if out == ul_tag:
return str_none
return out+'</ul>'
return str_none
except:
return str_none
_ic_membership.allow_tags = True
_ic_membership.short_description = _(u"IC Membership")
def _fees_to_pay(self):
try:
if self.out_fees.all().count() > 0:
out = ul_tag
for fe in self.out_fees.all():
if not fe.payed:
out += '<li>'+a_strW + "fee/" + str(fe.id) + a_str3 +'<b>'+ fe.name + "</b></a></li>"
if out == ul_tag:
return str_none
return out+'</ul>'
return str_none
except:
return str_none
_fees_to_pay.allow_tags = True
_fees_to_pay.short_description = _(u"Fees to pay")
def __init__(self, *args, **kwargs):
super(Human, self).__init__(*args, **kwargs)
if not 'rel_tit' in globals():
rel_tit = Relation.objects.get(clas='holder')
#print('I N I T H U M A N : '+self.name)
'''if hasattr(self, 'accountsCes') and self.accountsCes.count() > 0:
recrels = rel_Human_Records.objects.filter(human=self, record__in=self.accountsCes.all())
if recrels.count() == 0:
for acc in self.accountsCes.all():
newrec, created = rel_Human_Records.objects.get_or_create(human=self, record=acc, relation=rel_tit)
print('- new_REC acc_Ces: CREATED:' + str(created) + ' :: ' + str(newrec))
if hasattr(self, 'accountsBank') and self.accountsBank.count() > 0:
recrels = rel_Human_Records.objects.filter(human=self, record__in=self.accountsBank.all())
if recrels.count() == 0:
for acc in self.accountsBank.all():
newrec, created = rel_Human_Records.objects.get_or_create(human=self, record=acc, relation=rel_tit)
print('- new_REC acc_Bank: CREATED:' + str(created) + ' :: ' + str(newrec))
if hasattr(self, 'accountsCrypto') and self.accountsCrypto.count() > 0:
recrels = rel_Human_Records.objects.filter(human=self, record__in=self.accountsCrypto.all())
if recrels.count() == 0:
for acc in self.accountsCrypto.all():
newrec, created = rel_Human_Records.objects.get_or_create(human=self, record=acc, relation=rel_tit)
print('- new_REC acc_Crypto: CREATED:'+str(created)+' :: '+str(newrec))
'''
@python_2_unicode_compatible
class Person(Human):
human = models.OneToOneField('Human', primary_key=True, parent_link=True, on_delete=models.CASCADE)
surnames = models.CharField(max_length=200, blank=True, verbose_name=_(u"Surnames"), help_text=_(u"The surnames of the Person"))
id_card = models.CharField(max_length=9, blank=True, verbose_name=_(u"ID/DNI/NIE"))
email2 = models.EmailField(blank=True, verbose_name=_(u"Alternate email"))
nickname2 = models.CharField(max_length=50, blank=True, verbose_name=_(u"Nickname in FairNetwork"))
class Meta:
verbose_name= _(u'Person')
verbose_name_plural= _(u'e- Persons')
def __str__(self):
if self.nickname is None or self.nickname == '':
if self.surnames is None or self.surnames == '':
return self.name+' '+self.nickname2
else:
return self.name+' '+self.surnames
else:
#return self.nickname
if self.surnames is None or self.surnames == '':
return self.name+' ('+self.nickname+')'
else:
return self.name+' '+self.surnames+' ('+self.nickname+')'
@python_2_unicode_compatible
class Project(MPTTModel, Human):
human = models.OneToOneField('Human', primary_key=True, parent_link=True, on_delete=models.CASCADE)
project_type = TreeForeignKey('Project_Type', blank=True, null=True, verbose_name=_(u"Type of project"), on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='subprojects', verbose_name=_(u"Parent project"), on_delete=models.SET_NULL)
socialweb = models.CharField(max_length=100, blank=True, verbose_name=_(u"Social website"))
email2 = models.EmailField(blank=True, verbose_name=_(u"Alternate email"))
ecommerce = models.BooleanField(default=False, verbose_name=_(u"E-commerce?"))
#images = models.ManyToManyField('Image', blank=True, null=True, verbose_name=_(u"Images"))
def _is_collective(self):
if self.persons.count() < 2 and self.projects.count() < 2:
return False
else:
return True
_is_collective.boolean = True
_is_collective.short_description = _(u"is collective?")
collective = property(_is_collective)
#ref_persons = models.ManyToManyField('Person', blank=True, null=True, verbose_name=_(u"Reference Persons"))
class Meta:
verbose_name= _(u'Project')
verbose_name_plural= _(u'e- Projects')
def _get_ref_persons(self):
return self.human_persons.filter(relation__clas='reference')
def _ref_persons(self):
prs = self._get_ref_persons()
if prs.count() > 0:
out = ul_tag
for pr in prs:
out += '<li>'+str(pr)+'</li>'
return out+'</ul>'
return str_none
_ref_persons.allow_tags = True
_ref_persons.short_description = _(u"Reference person?")
def __str__(self):
if self.nickname is None or self.nickname == '':
if self.project_type:
return self.name+' ('+self.project_type.name+')'
else:
return self.name
else:
return self.nickname+' ('+self.name+')'
"""
class Project_Type(Being_Type):
projectType_being_type = models.OneToOneField('Being_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type of Project")
verbose_name_plural = _("e-> Types of Projects")
"""
class Company(Human):
human = models.OneToOneField('Human', primary_key=True, parent_link=True, on_delete=models.CASCADE)
company_type = TreeForeignKey('Company_Type', null=True, blank=True, verbose_name=_(u"Type of company"), on_delete=models.SET_NULL)
legal_name = models.CharField(max_length=200, blank=True, null=True, verbose_name=_(u"Legal name"))
vat_number = models.CharField(max_length=20, blank=True, null=True, verbose_name=_(u"VAT/CIF"))
class Meta:
verbose_name = _(u"Company")
verbose_name_plural = _(u"e- Companies")
"""
class Company_Type(Being_Type):
companyType_being_type = models.OneToOneField('Being_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type of Company")
verbose_name_plural = _("e-> Types of Companies")
"""
@python_2_unicode_compatible
class rel_Human_Jobs(models.Model):
human = models.ForeignKey('Human', on_delete=models.CASCADE)
job = TreeForeignKey('Job', verbose_name=_(u"Job"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_job+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_job")
verbose_name_plural = _(u"Skills of the entity")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.job.__str__()
else:
return self.relation.gerund+' > '+self.job.__str__()
@python_2_unicode_compatible
class rel_Human_Addresses(models.Model):
human = models.ForeignKey('Human', on_delete=models.CASCADE)
address = models.ForeignKey('Address', related_name='rel_human', verbose_name=_(u"Address"), on_delete=models.CASCADE,
help_text=_(u"Once choosed the address, save the profile to see the changes."))
relation = TreeForeignKey('Relation', related_name='hu_adr+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.CASCADE)
main_address = models.BooleanField(default=False, verbose_name=_(u"Main address?"))
class Meta:
verbose_name = _(u"H_addr")
verbose_name_plural = _(u"Addresses of the entity")
def __str__(self):
if self.relation is None or self.relation.gerund is None or self.relation.gerund == '':
return self.address.__str__()
else:
return self.relation.gerund+' > '+self.address.__str__()
def _is_main(self):
return self.main_address
_is_main.boolean = True
is_main = property(_is_main)
def _selflink(self):
if self.address:
return self.address._selflink()
_selflink.allow_tags = True
_selflink.short_description = ''
@python_2_unicode_compatible
class rel_Human_Regions(models.Model):
human = models.ForeignKey('Human', on_delete=models.CASCADE)
region = TreeForeignKey('Region', verbose_name=_(u"Region"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_reg+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_reg")
verbose_name_plural = _(u"Related regions")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.region.__str__()
else:
return self.relation.gerund+' > '+self.region.__str__()
@python_2_unicode_compatible
class rel_Human_Records(models.Model):
human = models.ForeignKey('Human', on_delete=models.CASCADE)
record = models.ForeignKey('Record', verbose_name=_(u"Record"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_rec+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_rec")
verbose_name_plural = _(u"Related records")
def __str__(self):
if not hasattr(self.relation, 'gerund') or self.relation.gerund is None or self.relation.gerund == '':
return self.record.__str__()
else:
if not hasattr(self.record, 'record_type') or self.record.record_type is None or self.record.record_type == '':
return self.relation.gerund+' > '+self.record.__str__()
return self.record.record_type.name+': '+self.relation.gerund+' > '+self.record.__str__()
def _selflink(self):
return self.record._selflink()
_selflink.allow_tags = True
_selflink.short_description = ''
@python_2_unicode_compatible
class rel_Human_Materials(models.Model):
human = models.ForeignKey('Human', on_delete=models.CASCADE)
material = models.ForeignKey('Material', verbose_name=_(u"Material artwork"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_mat+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_mat")
verbose_name_plural = _(u"Material Artworks")
def __str__(self):
if not hasattr(self.relation, 'gerund') or self.relation.gerund is None or self.relation.gerund == '':
return self.material.__str__()
else:
return self.relation.gerund+' > '+self.material.__str__()
@python_2_unicode_compatible
class rel_Human_Nonmaterials(models.Model):
human = models.ForeignKey('Human', on_delete=models.CASCADE)
nonmaterial = models.ForeignKey('Nonmaterial', verbose_name=_(u"Non-material artwork"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_non+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_inm")
verbose_name_plural = _(u"Non-material Artworks")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.nonmaterial.__str__()
else:
return self.relation.gerund+' > '+self.nonmaterial.__str__()
@python_2_unicode_compatible
class rel_Human_Persons(models.Model):
human = models.ForeignKey('Human', related_name='human_persons', on_delete=models.CASCADE)
person = models.ForeignKey('Person', related_name='rel_humans', verbose_name=_(u"Related person"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_hum+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_per")
verbose_name_plural = _(u"Related persons")
def __str__(self):
if self.relation is None or self.relation.gerund is None or self.relation.gerund == '':
return self.person.__str__()
else:
return self.relation.gerund+' > '+self.person.__str__()
def _selflink(self):
return self.person._selflink()
_selflink.allow_tags = True
_selflink.short_description = ''
@python_2_unicode_compatible
class rel_Human_Projects(models.Model):
human = models.ForeignKey('Human', related_name='human_projects', on_delete=models.CASCADE)
project = TreeForeignKey('Project', related_name='rel_humans', verbose_name=_(u"Related project"), on_delete=models.CASCADE,
help_text=_(u"Once choosed the project, save the profile to see the changes."))
relation = TreeForeignKey('Relation', related_name='hu_hum+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_pro")
verbose_name_plural = _(u"Related projects")
def __str__(self):
if self.project.project_type is None or self.project.project_type == '':
if self.relation.gerund is None or self.relation.gerund == '':
return self.project.__str__()
else:
return self.relation.gerund+' > '+self.project.__str__()
else:
if not self.relation or self.relation.gerund is None or self.relation.gerund == '':
return '('+self.project.project_type.being_type.name+') rel? > '+self.project.name
else:
return '('+self.project.project_type.being_type.name+') '+self.relation.gerund+' > '+self.project.name
@python_2_unicode_compatible
class rel_Human_Companies(models.Model):
human= models.ForeignKey('Human', related_name='human_companies', on_delete=models.CASCADE)
company = models.ForeignKey('Company', verbose_name=_(u"related Company"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='hu_hum+', blank=True, null=True, verbose_name=_(u"relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"H_emp")
verbose_name_plural = _(u"Related companies")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.company.__str__()
else:
return '('+self.company.company_type.being_type.name+') '+self.relation.gerund+' > '+self.company.__str__()
"""
'''
class rel_Address_Jobs(models.Model):
address = models.ForeignKey('Address')
job = models.ForeignKey('Job', verbose_name=_(u"related Art/Job"))
relation = TreeForeignKey('Relation', related_name='ad_job+', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"job")
verbose_name_plural = _(u"related Arts/Jobs")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.job.__str__()
else:
return self.relation.gerund+' > '+self.job.__str__()
'''
# A R T S - (Verbs, Relacions, Arts, Oficis, Sectors...)
@python_2_unicode_compatible
class Art(MPTTModel): # Abstract
name = models.CharField(unique=True, max_length=200, verbose_name=_("Name"), help_text=_("The name of the Art"))
verb = models.CharField(max_length=200, blank=True, verbose_name=_("Verb"), help_text=_("The verb of the action, infinitive"))
gerund = models.CharField(max_length=200, blank=True, verbose_name=_("Gerund"), help_text=_("The verb in gerund, present"))
description = models.TextField(blank=True, verbose_name=_("Description"))
parent = TreeForeignKey('self', null=True, blank=True, related_name='subarts', on_delete=models.SET_NULL)
def __str__(self):
if self.verb:
return self.name+', '+self.verb
else:
return self.name
class Meta:
abstract = True
verbose_name = _("Art")
verbose_name_plural = _("a- Arts")
@python_2_unicode_compatible
class Relation(Art): # Create own ID's (TREE)
#art = models.OneToOneField('Art', primary_key=True, parent_link=True, on_delete=models.CASCADE)
clas = models.CharField(blank=True, verbose_name=_("Class"), max_length=50,
help_text=_("Django model or python class associated to the Relation"))
class Meta:
verbose_name= _('Relation')
verbose_name_plural= _('a- Relations')
def __str__(self):
if self.verb:
if self.clas is None or self.clas == '':
return self.verb
else:
return self.verb+' ('+self.clas+')'
else:
if self.clas is None or self.clas == '':
return self.name
else:
return self.name+' ('+self.clas+')'
@python_2_unicode_compatible
class Job(Art): # Create own ID's (TREE)
#art = models.OneToOneField('Art', primary_key=True, parent_link=True, on_delete=models.CASCADE)
clas = models.CharField(blank=True, verbose_name=_("Clas"), max_length=50, help_text=_("Django model or python class associated to the Job"))
jobs = models.ManyToManyField(
'self',
through='rel_Job_Jobs',
through_fields=('job1', 'job2'),
symmetrical=False,
blank=True,
verbose_name=_("related Skills"))
class Meta:
verbose_name= _('Skill')
verbose_name_plural= _('a- Skills')
def __str__(self):
if self.clas is None or self.clas == '':
return self.name#+', '+self.verb
else:
return self.name+' ('+self.clas+')'
@python_2_unicode_compatible
class rel_Job_Jobs(models.Model):
job1 = models.ForeignKey('Job', on_delete=models.CASCADE, related_name="rel_jobs1")
job2 = TreeForeignKey('Job', on_delete=models.CASCADE, related_name="rel_jobs2") #, verbose_name=_(u"related Jobs")
relation = TreeForeignKey('Relation', on_delete=models.SET_NULL, related_name='jo_job+', blank=True, null=True)
class Meta:
verbose_name = _("J_job")
verbose_name_plural = _("Related jobs")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.job1.__str__()
else:
return '('+self.job1.name+') '+self.relation.gerund+' > '+self.job2.__str__()
#rel_tit = Relation.objects.get(clas='holder')
# S P A C E S - (Regions, Places, Addresses...)
@python_2_unicode_compatible
class Space(models.Model): # Abstact
name = models.CharField(verbose_name=_("Name"), max_length=100, help_text=_("The name of the Space"))
#space_type = TreeForeignKey('Space_Type', blank=True, null=True, verbose_name=_(u"Type of space"), on_delete=models.SET_NULL)
#m2 = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
abstract = True
class Space_Type(Type):
typ = models.OneToOneField('Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name= _("Type of Space")
verbose_name_plural= _("s--> Types of Spaces")
@python_2_unicode_compatible
class Address(Space): # Create own ID's
#space = models.OneToOneField('Space', primary_key=True, parent_link=True, on_delete=models.CASCADE)
address_type = TreeForeignKey('Address_Type', blank=True, null=True, verbose_name=_("Type of address"), on_delete=models.SET_NULL)
p_address = models.CharField(max_length=200, verbose_name=_("Address"), help_text=_("Postal address able to receive by post"))
town = models.CharField(max_length=150, verbose_name=_("Town"), help_text=_("Town or City"))
postalcode = models.CharField(max_length=5, blank=True, null=True, verbose_name=_("Postal/Zip code"))
region = TreeForeignKey('Region', blank=True, null=True, related_name='rel_addresses', verbose_name=_("Region"), on_delete=models.SET_NULL)
#telephone = models.CharField(max_length=20, blank=True, verbose_name=_(u"Telephone"))
ic_larder = models.BooleanField(default=False, verbose_name=_("Is a Larder?"))
#main_address = models.BooleanField(default=False, verbose_name=_(u"Main address?"))
size = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True, verbose_name=_('Size'), help_text=_("Number of units (accept 2 decimals)"))
size_unit = models.ForeignKey('Unit', blank=True, null=True, verbose_name=_("Unit of measure"), on_delete=models.SET_NULL)
longitude = models.IntegerField(blank=True, null=True, verbose_name=_("Longitude (geo)"))
latitude = models.IntegerField(blank=True, null=True, verbose_name=_("Latitude (geo)"))
jobs = models.ManyToManyField('Job', related_name='addresses', blank=True, verbose_name=_("Related Jobs"))
description = models.TextField(blank=True, null=True, verbose_name=_("Description of the Address"), help_text=_("Exact localization, indications to arrive or comments"))
def _main_addr_of(self):
'''rel = rel_Human_Addresses.objects.filter(address=self, main_address=True).first() #TODO accept various and make a list
if rel:
return rel.human
else:'''
return _('nobody')
_main_addr_of.allow_tags = True
_main_addr_of.short_description = _("Main address of")
main_addr_of = property(_main_addr_of)
class Meta:
verbose_name= _('Address')
verbose_name_plural= _('s- Addresses')
def __str__(self):
return self.name+' ('+self.p_address+' - '+self.town+')'
def _jobs_list(self):
out = ul_tag
for jo in self.jobs.all():
out += '<li><b>'+jo.verb+'</b> - '+erase_id_link('jobs', str(jo.id))+'</li>'
if out == ul_tag:
return str_none
return out+'</ul>'
_jobs_list.allow_tags = True
_jobs_list.short_description = ''
def _selflink(self):
if self.id:
return a_strG + "address/" + str(self.id) + a_str2 + a_edit +"</a>"# % str(self.id)
else:
return "Not present"
_selflink.allow_tags = True
class Address_Type(Space_Type):
addrTypeSpace_type = models.OneToOneField('Space_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type of Address")
verbose_name_plural = _("s-> Types of Addresses")
class Region(MPTTModel, Space): # Create own ID's (TREE)
#space = models.OneToOneField('Space', primary_key=True, parent_link=True, on_delete=models.CASCADE)
region_type = TreeForeignKey('Region_Type', blank=True, null=True, verbose_name=_("Type of region"), on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='subregions', on_delete=models.SET_NULL)
description = models.TextField(blank=True, null=True, verbose_name=_("Description of the Region"))
class Meta:
verbose_name= _('Region')
verbose_name_plural= _('s- Regions')
class Region_Type(Space_Type):
regionType_space_type = models.OneToOneField('Space_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type of Region")
verbose_name_plural = _("s-> Types of Regions")
# A R T W O R K S - (Obres, Coses, Registres, Documents...)
@python_2_unicode_compatible
class Artwork(models.Model): # Abstract
name = models.CharField(verbose_name=_("Name"), max_length=200, blank=True, null=True) #, help_text=_(u"The name of the artwork (Record, Unit, Thing)"))
#artwork_type = TreeForeignKey('Artwork_Type', blank=True, verbose_name=_(u"Type of Artwork"), on_delete=models.SET_NULL)
description = models.TextField(blank=True, null=True, verbose_name=_("Description"))
def __str__(self):
return self.name
class Meta:
abstract = True
class Artwork_Type(Type):
typ = models.OneToOneField('Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type of Artwork")
verbose_name_plural = _("o--> Types of Artworks")
# - - - - - N O N - M A T E R I A L
"""
@python_2_unicode_compatible
class rel_Nonmaterial_Records(models.Model):
nonmaterial = models.ForeignKey('Nonmaterial')
record = models.ForeignKey('Record', verbose_name=_(u"related Record"))
relation = TreeForeignKey('Relation', related_name='no_reg+', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"N_rec")
verbose_name_plural = _(u"Related records")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.record.__str__()
else:
return '('+self.record.record_type.name+') '+self.relation.gerund+' > '+self.record.__str__()
@python_2_unicode_compatible
class rel_Nonmaterial_Addresses(models.Model):
nonmaterial = models.ForeignKey('Nonmaterial')
address = models.ForeignKey('Address', verbose_name=_(u"related Address"))
relation = TreeForeignKey('Relation', related_name='no_adr+', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"N_adr")
verbose_name_plural = _(u"Related addresses")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.address.__str__()
else:
return '('+self.address.address_type.name+') '+self.relation.gerund+' > '+self.address.__str__()
@python_2_unicode_compatible
class rel_Nonmaterial_Jobs(models.Model):
nonmaterial = models.ForeignKey('Nonmaterial')
job = models.ForeignKey('Job', related_name='nonmaterials', verbose_name=_(u"related Arts/Jobs"))
relation = TreeForeignKey('Relation', related_name='no_job+', blank=True, null=True, verbose_name=_(u"Relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"N_ofi")
verbose_name_plural = _(u"Related Arts/Jobs")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.job.__str__()
else:
return self.relation.gerund+' > '+self.job.__str__()
@python_2_unicode_compatible
class rel_Nonmaterial_Nonmaterials(models.Model):
nonmaterial = models.ForeignKey('Nonmaterial')
nonmaterial2 = models.ForeignKey('Nonmaterial', related_name='subnonmaterials', verbose_name=_(u"related Non-material Artworks"))
relation = TreeForeignKey('Relation', related_name='ma_mat+', blank=True, null=True, verbose_name=_(u"Relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"N_mat")
verbose_name_plural = _(u"related Non-material artworks")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.nonmaterial2.__str__()
else:
return '('+self.nonmaterial2.material_type.name+') '+self.relation.gerund+' > '+self.nonmaterial2.__str__()
class Nonmaterial(Artwork): # Create own ID's
nonmaterial_type = TreeForeignKey('Nonmaterial_Type', blank=True, null=True, verbose_name=_(u"Type of non-material artwork"), on_delete=models.SET_NULL)
records = models.ManyToManyField('Record', through='rel_Nonmaterial_Records', blank=True, verbose_name=_(u"related Records"))
addresses = models.ManyToManyField('Address', through='rel_Nonmaterial_Addresses', blank=True, verbose_name=_(u"related Addresses"))
jobs = models.ManyToManyField('Job', through='rel_Nonmaterial_Jobs', blank=True, verbose_name=_(u"related Arts/Jobs"))
nonmaterials = models.ManyToManyField('self', through='rel_Nonmaterial_Nonmaterials', symmetrical=False, blank=True, verbose_name=_(u"related Non-material artworks"))
class Meta:
verbose_name = _(u"Non-material Artwork")
verbose_name_plural = _(u"o- Non-material Artworks")
"""
class Nonmaterial_Type(Artwork_Type):
nonmaterialType_artwork_type = models.OneToOneField('Artwork_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name= _("Type of Non-material artwork")
verbose_name_plural= _("o-> Types of Non-material artworks")
"""
class Image(Nonmaterial):
image_nonmaterial = models.OneToOneField('Nonmaterial', primary_key=True, parent_link=True, on_delete=models.CASCADE)
image_image = models.ImageField(upload_to='files/images', height_field='height', width_field='width',
blank=True, null=True,
verbose_name=_(u"Image (jpg/png)"))
#footer = models.TextField(blank=True, null=True, verbose_name=_(u"Image caption"))
url = models.URLField(blank=True, null=True, verbose_name=_(u"Url of the image"))
height = models.IntegerField(blank=True, null=True, verbose_name=_(u"Height"))
width = models.IntegerField(blank=True, null=True, verbose_name=_(u"Width"))
class Meta:
verbose_name = _(u"Image")
verbose_name_plural = _(u"o- Images")
"""
# - - - - - M A T E R I A L
"""
@python_2_unicode_compatible
class rel_Material_Nonmaterials(models.Model):
material = models.ForeignKey('Material', on_delete=models.CASCADE)
nonmaterial = models.ForeignKey('Nonmaterial', verbose_name=_(u"related Non-material"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='ma_non+', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"M_inm")
verbose_name_plural = _(u"related Non-materials")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.nonmaterial.__str__()
else:
return '('+self.nonmaterial.nonmaterial_type.name+') '+self.relation.gerund+' > '+self.nonmaterial.__str__()
@python_2_unicode_compatible
class rel_Material_Records(models.Model):
material = models.ForeignKey('Material', on_delete=models.CASCADE)
record = models.ForeignKey('Record', verbose_name=_(u"related Record"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='ma_reg+', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"M_rec")
verbose_name_plural = _(u"related Records")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.record.__str__()
else:
return '('+self.record.record_type.name+') '+self.relation.gerund+' > '+self.record.__str__()
@python_2_unicode_compatible
class rel_Material_Addresses(models.Model):
material = models.ForeignKey('Material', on_delete=models.CASCADE)
address = models.ForeignKey('Address', related_name='materials', verbose_name=_(u"related Address"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='ma_adr+', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"M_adr")
verbose_name_plural = _(u"related Addresses")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.address.__str__()
else:
return '('+self.address.address_type.name+') '+self.relation.gerund+' > '+self.address.__str__()
@python_2_unicode_compatible
class rel_Material_Materials(models.Model):
material = models.ForeignKey('Material', on_delete=models.CASCADE)
material2 = models.ForeignKey('Material', related_name='submaterials', verbose_name=_(u"related Material artworks"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='ma_mat+', blank=True, null=True, verbose_name=_(u"Relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"M_mat")
verbose_name_plural = _(u"related Material artworks")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.material2.__str__()
else:
return '('+self.material2.material_type.name+') '+self.relation.gerund+' > '+self.material2.__str__()
@python_2_unicode_compatible
class rel_Material_Jobs(models.Model):
material = models.ForeignKey('Material', on_delete=models.CASCADE, on_delete=models.CASCADE)
job = models.ForeignKey('Job', related_name='materials', verbose_name=_(u"related Arts/Jobs"), on_delete=models.CASCADE)
relation = TreeForeignKey('Relation', related_name='ma_job+', blank=True, null=True, verbose_name=_(u"Relation"), on_delete=models.SET_NULL)
class Meta:
verbose_name = _(u"M_ofi")
verbose_name_plural = _(u"related Arts/Jobs")
def __str__(self):
if self.relation.gerund is None or self.relation.gerund == '':
return self.job.__str__()
else:
return self.relation.gerund+' > '+self.job.__str__()
class Material(Artwork): # Create own ID's
material_type = TreeForeignKey('Material_Type', blank=True, null=True, verbose_name=_(u"Type of physical artwork"), on_delete=models.SET_NULL)
nonmaterials = models.ManyToManyField('Nonmaterial', through='rel_Material_Nonmaterials', blank=True, verbose_name=_(u"related Non-materials"))
records = models.ManyToManyField('Record', through='rel_Material_Records', blank=True, verbose_name=_(u"related Records"))
addresses = models.ManyToManyField('Address', through='rel_Material_Addresses', blank=True, verbose_name=_(u"related Addresses"))
materials = models.ManyToManyField('self', through='rel_Material_Materials', symmetrical=False, blank=True, verbose_name=_(u"related Material artworks"))
jobs = models.ManyToManyField('Job', through='rel_Material_Jobs', blank=True, verbose_name=_(u"related Arts/Jobs"))
class Meta:
verbose_name = _(u"Material Artwork")
verbose_name_plural = _(u"o- Material Artworks")
def _addresses_list(self):
out = ul_tag
print(self.addresses.all())
if self.addresses.all().count() > 0:
for add in self.addresses.all():
rel = add.materials.filter(material=self).first().relation
out += '<li>'+rel.gerund+': <b>'+add.__str__()+'</b></li>'
return out+'</ul>'
return str_none
_addresses_list.allow_tags = True
_addresses_list.short_description = _(u"related Addresses?")
def _jobs_list(self):
out = ul_tag
print(self.jobs.all())
if self.jobs.all().count() > 0:
for job in self.jobs.all():
rel = job.materials.filter(material=self).first().relation
out += '<li>'+rel.gerund+': <b>'+job.__str__()+'</b></li>'
return out+'</ul>'
return str_none
_jobs_list.allow_tags = True
_jobs_list.short_description = _(u"related Arts/Jobs?")
"""
class Material_Type(Artwork_Type):
materialType_artwork_type = models.OneToOneField('Artwork_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name= _("Type of Material artwork")
verbose_name_plural= _("o-> Types of Material artworks")
"""
@python_2_unicode_compatible
class Asset(Material):
asset_material = models.OneToOneField('Material', primary_key=True, parent_link=True, on_delete=models.CASCADE)
asset_human = models.ForeignKey('Human', verbose_name=_(u"Entity"), on_delete=models.CASCADE)
reciprocity = models.TextField(blank=True, verbose_name=_(u"Reciprocity"))
class Meta:
verbose_name = _(u"Asset")
verbose_name_plural = _(u"o- Assets")
def __str__(self):
return '('+self.material_type.name+') '+self.material.name
def _selflink(self):
if self.id:
return a_strG + "asset/" + str(self.id) + a_str2 + a_edit +"</a>"# % str(self.id)
else:
return "Not present"
_selflink.allow_tags = True
_selflink.short_description = ''
"""
# - - - - - U N I T S
from valuenetwork.valueaccounting.models import Unit as Ocp_Unit
@python_2_unicode_compatible
class Unit(Artwork): # Create own ID's
unit_type = TreeForeignKey('Unit_Type', blank=True, null=True, verbose_name=_("Type of Unit"), on_delete=models.SET_NULL)
code = models.CharField(max_length=4, verbose_name=_("Code or Symbol"))
region = TreeForeignKey('Region', blank=True, null=True, verbose_name=_("related use Region"), on_delete=models.SET_NULL)
#human = models.ForeignKey('Human', blank=True, null=True, verbose_name=_(u"related Entity"))
ocp_unit = models.OneToOneField(Ocp_Unit, blank=True, null=True, verbose_name=_("OCP Unit"), related_name="gen_unit", on_delete=models.SET_NULL)
class Meta:
verbose_name= _('Unit')
verbose_name_plural= _('o- Units')
def __str__(self):
if hasattr(self, 'ocp_unit') and self.ocp_unit:
return self.name+' <'
else:
return self.unit_type.name+': '+self.name
class Unit_Type(Artwork_Type):
unitType_artwork_type = models.OneToOneField('Artwork_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _("Type of Unit")
verbose_name_plural = _("o-> Types of Units")
# - - - - - R E C O R D
@python_2_unicode_compatible
class Record(Artwork): # Create own ID's
record_type = TreeForeignKey('Record_Type', blank=True, null=True, verbose_name=_("Type of Record"), on_delete=models.SET_NULL)
changed_date = models.DateTimeField(auto_now=True, blank=True, null=True, editable=False)
class Meta:
verbose_name= _('Record')
verbose_name_plural= _('o- Records')
def __str__(self):
if self.record_type is None or self.record_type == '':
return self.name
else:
return self.record_type.name+': '+self.name
def _selflink(self):
if self.id:
return a_strG + "record/" + str(self.id) + a_str2 + a_edit +"</a>"# % str(self.id)
else:
return "Not present"
_selflink.allow_tags = True
class Record_Type(Artwork_Type):
recordType_artwork_type = models.OneToOneField('Artwork_Type', primary_key=True, parent_link=True, on_delete=models.CASCADE)
class Meta:
verbose_name= _('Type of Record')
verbose_name_plural= _('o-> Types of Records')
@python_2_unicode_compatible
class UnitRatio(Record):
record = models.OneToOneField('Record', primary_key=True, parent_link=True, on_delete=models.CASCADE)
in_unit = models.ForeignKey('Unit', related_name='ratio_in', verbose_name=_("in Unit"), on_delete=models.CASCADE)
rate = models.DecimalField(max_digits=50, decimal_places=9, verbose_name=_("Ratio multiplier"), default=Decimal("0.0"))
out_unit = models.ForeignKey('Unit', related_name='ratio_out', verbose_name=_("out Unit"), on_delete=models.CASCADE)
class Meta:
verbose_name = _("Equivalence between Units")
verbose_name_plural = _("o- Equivalences between Units")
def __str__(self):
return self.in_unit.name+' * '+str(self.rate)+' = '+self.out_unit.name
"""
@python_2_unicode_compatible
class AccountCes(Record):
record = models.OneToOneField('Record', primary_key=True, parent_link=True, on_delete=models.CASCADE)
accCes_human = models.ForeignKey('Human', related_name='accountsCes', verbose_name=_(u"Owner human entity"), on_delete=models.CASCADE)
entity = models.ForeignKey('Project', verbose_name=_(u"Network of the account"), on_delete=models.CASCADE)
unit = models.ForeignKey('Unit', verbose_name=_(u"Unit (currency)"), on_delete=models.CASCADE)
code = models.CharField(max_length=10, blank=True, null=True, verbose_name=_(u"Network code"))
number = models.CharField(max_length=4, blank=True, null=True, verbose_name=_(u"Account number"))
class Meta:
verbose_name= _(u'CES Account')
verbose_name_plural= _(u'o- CES Accounts')
def __str__(self):
return '('+self.unit.code+') '+self.accCes_human.nickname + ' ' + self.code + self.number#+' '+self.name
@python_2_unicode_compatible
class AccountBank(Record):
record = models.OneToOneField('Record', primary_key=True, parent_link=True, on_delete=models.CASCADE)
accBnk_human = models.ForeignKey('Human', related_name='accountsBank', verbose_name=_(u"Owner human entity"), on_delete=models.CASCADE)
company = models.ForeignKey('Company', blank=True, null=True, verbose_name=_(u"Bank entity"), on_delete=models.SET_NULL)
unit = models.ForeignKey('Unit', blank=True, null=True, verbose_name=_(u"Unit (currency)"), on_delete=models.SET_NULL)
code = models.CharField(max_length=11, blank=True, null=True, verbose_name=_(u"SWIFT/BIC Code"))
number = models.CharField(max_length=34, blank=True, null=True, verbose_name=_(u"IBAN Account number"))
bankcard = models.BooleanField(default=False, verbose_name=_(u"with bank Card?"))
class Meta:
verbose_name= _(u'Bank Account')
verbose_name_plural= _(u'o- Bank Accounts')
def __str__(self):
try:
return '('+self.unit.code+') '+self.company.nickname+': '+self.accBnk_human.nickname + ' ' + self.number
except:
return "<projecte buit>"
@python_2_unicode_compatible
class AccountCrypto(Record):
record = models.OneToOneField('Record', primary_key=True, parent_link=True, on_delete=models.CASCADE)
accCrypt_human = models.ForeignKey('Human', related_name='accountsCrypto', verbose_name=_(u"Owner human entity"), on_delete=models.CASCADE)
unit = models.ForeignKey('Unit', verbose_name=_(u"Unit (currency)"), on_delete=models.CASCADE)
number = models.CharField(max_length=34, blank=True, verbose_name=_(u"Address of the wallet"))
class Meta:
verbose_name = _(u"Cryptocurrency Account")
verbose_name_plural = _(u"o- Cryptocurrency Accounts")
def __str__(self):
return '('+self.unit.code+') '+self.accCrypt_human.nickname + ' ' + self.number # +' '+self.name
"""
# B A S I C D B R E C O R D S ##
from django.db.models.signals import post_migrate
#from general.apps import GeneralAppConfig
def create_general_types(**kwargs):
sep = ", "
out = "Initial basic types created: <br>"
being, created = Type.objects.get_or_create(name_en='Being', clas='Being')
if created: out += str(being)+sep
artwork, created = Type.objects.get_or_create(name_en='Artwork', clas='Artwork')
if created: out += str(artwork)+sep
space, created = Type.objects.get_or_create(name_en='Space', clas='Space')
if created: out += str(space)+'<br>'
"""human, created = Being_Type.objects.get_or_create(name_en='Human', clas='Human', parent=being)
if created: out += str(human)+": "
persons = Being_Type.objects.filter(name_en="Person")
if persons:
if len(persons) > 1:
out += "ERROR there's more than one 'Person' as a Being_Type ?"+'<br>'
return out
else:
person = persons[0]
else:
person, created = Being_Type.objects.get_or_create(name_en='Person', parent=human)
if created: out += str(person)+sep
person.clas = 'Person'
person.parent = human
person.save()
projects = Being_Type.objects.filter(name_en="Project")
if projects:
if len(projects) > 1:
out += "ERROR there's more than one 'Project' as a Being_Type ?"+'<br>'
return out
else:
project = projects[0]
else:
project, created = Being_Type.objects.get_or_create(name_en='Project', parent=human)
if created: out += str(project)+sep
project.clas = 'Project'
project.parent = human
project.save()
companys = Being_Type.objects.filter(name_en="Company")
if companys:
if len(companys) > 1:
out += "ERROR there's more than one 'Company' as a Being_Type ?"+'<br>'
return out
else:
company = companys[0]
else:
company, created = Being_Type.objects.get_or_create(name_en='Company', parent=human)
if created: out += str(company)+'<br>'
company.clas = 'Company'
company.parent = human
company.save()
material, created = Artwork_Type.objects.get_or_create(name_en='Material', clas='Material', parent=artwork)
if created: out += str(material)+sep
nonmaterial, created = Artwork_Type.objects.get_or_create(name_en='Non-material', clas='Nonmaterial', parent=artwork)
if created: out += str(nonmaterial)+sep"""
record, created = Artwork_Type.objects.get_or_create(name_en='Record', clas='Record', parent=artwork)
if created: out += str(record)+sep
unit, created = Artwork_Type.objects.get_or_create(name_en='Unit', clas='Unit', parent=artwork)
if created: out += str(unit)+sep
"""currency, created = Unit_Type.objects.get_or_create(name_en='Currency', parent=unit)
if created: out += str(currency)+sep
social, created = Unit_Type.objects.get_or_create(name_en='MutualCredit currency', parent=currency)
if created: out += str(social)+sep
crypto, created = Unit_Type.objects.get_or_create(name_en='Cryptocurrency', parent=currency)
if created: out += str(crypto)+sep
fiat, created = Unit_Type.objects.get_or_create(name_en='Fiat currency', parent=currency)
if created: out += str(crypto)+'<br>'
"""
region, created = Space_Type.objects.get_or_create(name_en='Region', clas='Region', parent=space)
if created: out += str(region)+sep
address, created = Space_Type.objects.get_or_create(name_en='Address', clas='Address', parent=space)
if created: out += str(address)+'<br>'
unitratio, created = Record_Type.objects.get_or_create(name_en='Unit Ratio', clas='UnitRatio', parent=record)
if created: out += str(unitratio)+sep
"""ces, created = Record_Type.objects.get_or_create(name_en='Account Ces', clas='AccountCes', parent=record)
if created: out += str(ces)+sep
bank, created = Record_Type.objects.get_or_create(name_en='Account Bank', clas='AccountBank', parent=record)
if created: out += str(bank)+sep"""
print(out)
return out
#post_migrate.connect(create_general_types, sender=GeneralAppConfig)
|
FreedomCoop/valuenetwork
|
general/models.py
|
Python
|
agpl-3.0
| 55,292
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Authors: Stéphane Bidoul & Olivier Laurent
# Copyright (c) 2012 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from . import hr_utilization_print
|
acsone/acsone-addons
|
hr_utilization/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,429
|
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base import exceptions as exc
from django.apps import apps
from django.core import signing
from django.utils.translation import ugettext as _
def get_token_for_user(user, scope):
"""
Generate a new signed token containing
a specified user limited for a scope (identified as a string).
"""
data = {"user_%s_id" % (scope): user.id}
return signing.dumps(data)
def get_user_for_token(token, scope, max_age=None):
"""
Given a selfcontained token and a scope try to parse and
unsign it.
If max_age is specified it checks token expiration.
If token passes a validation, returns
a user instance corresponding with user_id stored
in the incoming token.
"""
try:
data = signing.loads(token, max_age=max_age)
except signing.BadSignature:
raise exc.NotAuthenticated(_("Invalid token"))
model_cls = apps.get_model("users", "User")
try:
user = model_cls.objects.get(pk=data["user_%s_id" % (scope)])
except (model_cls.DoesNotExist, KeyError):
raise exc.NotAuthenticated(_("Invalid token"))
else:
return user
|
gam-phon/taiga-back
|
taiga/auth/tokens.py
|
Python
|
agpl-3.0
| 2,045
|
import json
import tempfile
import os
from werkzeug import secure_filename
from webant.util import send_attachment_file, routes_collector
from flask import request, current_app, url_for, jsonify
from archivant import Archivant
from archivant.exceptions import NotFoundException
from util import ApiError, make_success_response
routes = []
route = routes_collector(routes)
@route('/volumes/')
def get_volumes():
q = request.args.get('q', "*:*")
try:
from_ = int(request.args.get('from', 0))
except ValueError:
raise ApiError("Bad Request", 400, details="could not covert 'from' parameter to number")
try:
size = int(request.args.get('size', 10))
except ValueError:
raise ApiError("Bad Request", 400, details="could not covert 'size' parameter to number")
if size > current_app.config.get('MAX_RESULTS_PER_PAGE', 50):
raise ApiError("Request Entity Too Large", 413, details="'size' parameter is too high")
q_res = current_app.archivant._db.get_books_querystring(query=q, from_=from_, size=size)
volumes = map(Archivant.normalize_volume, q_res['hits']['hits'])
next_args = "?q={}&from={}&size={}".format(q, from_ + size, size)
prev_args = "?q={}&from={}&size={}".format(q, from_ - size if ((from_ - size) > -1) else 0, size)
base_url = url_for('.get_volumes', _external=True)
res = {'link_prev': base_url + prev_args,
'link_next': base_url + next_args,
'total': q_res['hits']['total'],
'data': volumes}
return jsonify(res)
@route('/volumes/', methods=['POST'])
def add_volume():
metadata = receive_volume_metadata()
try:
volumeID = current_app.archivant.insert_volume(metadata)
except ValueError, e:
raise ApiError("malformed metadata", 400, details=str(e))
link_self = url_for('.get_volume', volumeID=volumeID, _external=True)
response = jsonify({'data': {'id': volumeID, 'link_self': link_self}})
response.status_code = 201
response.headers['Location'] = link_self
return response
@route('/volumes/<volumeID>', methods=['PUT'])
def update_volume(volumeID):
metadata = receive_volume_metadata()
try:
current_app.archivant.update_volume(volumeID, metadata)
except NotFoundException, e:
raise ApiError("volume not found", 404, details=str(e))
except ValueError, e:
raise ApiError("malformed metadata", 400, details=str(e))
return make_success_response("volume successfully updated", 201)
@route('/volumes/<volumeID>', methods=['GET'])
def get_volume(volumeID):
try:
volume = current_app.archivant.get_volume(volumeID)
except NotFoundException, e:
raise ApiError("volume not found", 404, details=str(e))
return jsonify({'data': volume})
@route('/volumes/<volumeID>', methods=['DELETE'])
def delete_volume(volumeID):
try:
current_app.archivant.delete_volume(volumeID)
except NotFoundException, e:
raise ApiError("volume not found", 404, details=str(e))
return make_success_response("volume has been successfully deleted")
@route('/volumes/<volumeID>/attachments/', methods=['GET'])
def get_attachments(volumeID):
try:
atts = current_app.archivant.get_volume(volumeID)['attachments']
except NotFoundException, e:
raise ApiError("volume not found", 404, details=str(e))
return jsonify({'data': atts})
@route('/volumes/<volumeID>/attachments/', methods=['POST'])
def add_attachments(volumeID):
metadata = receive_metadata(optional=True)
if 'file' not in request.files:
raise ApiError("malformed request", 400, details="file not found under 'file' key")
upFile = request.files['file']
tmpFileFd, tmpFilePath = tempfile.mkstemp()
upFile.save(tmpFilePath)
fileInfo = {}
fileInfo['file'] = tmpFilePath
fileInfo['name'] = secure_filename(upFile.filename)
fileInfo['mime'] = upFile.mimetype
fileInfo['notes'] = metadata.get('notes', '')
# close fileDescriptor
os.close(tmpFileFd)
try:
attachmentID = current_app.archivant.insert_attachments(volumeID, attachments=[fileInfo])[0]
except NotFoundException, e:
raise ApiError("volume not found", 404, details=str(e))
finally:
# remove temp files
os.remove(fileInfo['file'])
link_self = url_for('.get_attachment', volumeID=volumeID, attachmentID=attachmentID, _external=True)
response = jsonify({'data': {'id': attachmentID, 'link_self': link_self}})
response.status_code = 201
response.headers['Location'] = link_self
return response
@route('/volumes/<volumeID>/attachments/<attachmentID>', methods=['GET'])
def get_attachment(volumeID, attachmentID):
try:
att = current_app.archivant.get_attachment(volumeID, attachmentID)
except NotFoundException, e:
raise ApiError("attachment not found", 404, details=str(e))
return jsonify({'data': att})
@route('/volumes/<volumeID>/attachments/<attachmentID>', methods=['DELETE'])
def delete_attachment(volumeID, attachmentID):
try:
current_app.archivant.delete_attachments(volumeID, [attachmentID])
except NotFoundException, e:
raise ApiError("attachment not found", 404, details=str(e))
return make_success_response("attachment has been successfully deleted")
@route('/volumes/<volumeID>/attachments/<attachmentID>', methods=['PUT'])
def update_attachment(volumeID, attachmentID):
metadata = receive_metadata()
try:
current_app.archivant.update_attachment(volumeID, attachmentID, metadata)
except ValueError, e:
raise ApiError("malformed request", 400, details=str(e))
return make_success_response("attachment has been successfully updated")
@route('/volumes/<volumeID>/attachments/<attachmentID>/file', methods=['GET'])
def get_file(volumeID, attachmentID):
try:
return send_attachment_file(current_app.archivant, volumeID, attachmentID)
except NotFoundException, e:
raise ApiError("file not found", 404, details=str(e))
def receive_volume_metadata():
metadata = receive_metadata()
# TODO check also for preset consistency?
requiredFields = ['_language']
for requiredField in requiredFields:
if requiredField not in metadata:
raise ApiError("malformed metadata", 400, details="Required field '{}' is missing in metadata".format(requiredField))
return metadata
def receive_metadata(optional=False):
if optional and 'metdata' not in request.values:
return {}
try:
metadata = json.loads(request.values['metadata'])
except KeyError:
raise ApiError("malformed request", 400, details="missing 'metadata' in request")
except Exception, e:
raise ApiError("malformed metadata", 400, details=str(e))
if not isinstance(metadata, dict):
raise ApiError("malformed metadata", 400, details="metadata value should be a json object")
return metadata
|
ael-code/libreant
|
webant/api/archivant_api.py
|
Python
|
agpl-3.0
| 6,972
|
"""
MIT License
Copyright (c) 2017 Hajime Nakagami<nakagami@gmail.com>
Copyright (c) 2019 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# This is the adaptation of the program found on:
# https://gist.github.com/nakagami/7a7d799bd4bd4ad8fcea96135c4af179
import os, sys, random, itertools, time
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append("../../atlastk")
import atlastk
EMPTY = 0
BLACK = -1
WHITE = 1
# http://uguisu.skr.jp/othello/5-1.html
WEIGHT_MATRIX = [
[120, -20, 20, 5, 5, 20, -20, 120],
[-20, -40, -5, -5, -5, -5, -40, -20],
[20, -5, 15, 3, 3, 15, -5, 20],
[5, -5, 3, 3, 3, 3, -5, 5],
[5, -5, 3, 3, 3, 3, -5, 5],
[20, -5, 15, 3, 3, 15, -5, 20],
[-20, -40, -5, -5, -5, -5, -40, -20],
[120, -20, 20, 5, 5, 20, -20, 120],
]
class Reversi:
def reset(self):
self.board = []
for _ in range(8):
self.board.append([EMPTY] * 8)
self.board[3][3] = self.board[4][4] = BLACK
self.board[4][3] = self.board[3][4] = WHITE
def __init__(self, orig=None):
self.reset()
# copy constructor
if orig:
assert isinstance(orig, Reversi)
for i in range(8):
for j in range(8):
self.board[i][j] = orig.board[i][j]
def count(self, bwe):
"Count pieces or empty spaces in the board"
assert bwe in (BLACK, WHITE, EMPTY)
n = 0
for i in range(8):
for j in range(8):
if self.board[i][j] == bwe:
n += 1
return n
def _has_my_piece(self, bw, x, y, delta_x, delta_y):
"There is my piece in the direction of (delta_x, delta_y) from (x, y)."
assert bw in (BLACK, WHITE)
assert delta_x in (-1, 0, 1)
assert delta_y in (-1, 0, 1)
x += delta_x
y += delta_y
if x < 0 or x > 7 or y < 0 or y > 7 or self.board[x][y] == EMPTY:
return False
if self.board[x][y] == bw:
return True
return self._has_my_piece(bw, x, y, delta_x, delta_y)
def reversible_directions(self, bw, x, y):
"Can put piece on (x, y) ? Return list of reversible direction tuple"
assert bw in (BLACK, WHITE)
directions = []
if self.board[x][y] != EMPTY:
return directions
for d in itertools.product([-1, 1, 0], [-1, 1, 0]):
if d == (0, 0):
continue
nx = x + d[0]
ny = y + d[1]
if nx < 0 or nx > 7 or ny < 0 or ny > 7 or self.board[nx][ny] != bw * -1:
continue
if self._has_my_piece(bw, nx, ny, d[0], d[1]):
directions.append(d)
return directions
def _reverse_piece(self, bw, x, y, delta_x, delta_y):
"Reverse pieces in the direction of (delta_x, delta_y) from (x, y) untill bw."
assert bw in (BLACK, WHITE)
x += delta_x
y += delta_y
assert self.board[x][y] in (BLACK, WHITE)
if self.board[x][y] == bw:
return
self.board[x][y] = bw
return self._reverse_piece(bw, x, y, delta_x, delta_y)
def isAllowed(self, x, y, bw):
return len(self.reversible_directions(bw, x, y)) != 0
def put(self, x, y, bw):
"""
True: Put bw's piece on (x, y) and change board status.
False: Can't put bw's piece on (x, y)
"""
assert bw in (BLACK, WHITE)
directions = self.reversible_directions(bw, x, y)
if len(directions) == 0:
return False
self.board[x][y] = bw
for delta in directions:
self._reverse_piece(bw, x, y, delta[0], delta[1])
return True
def _calc_score(self, bw, weight_matrix):
assert bw in (BLACK, WHITE)
my_score = 0
against_score = 0
for i in range(8):
for j in range(8):
if self.board[i][j] == bw:
my_score += weight_matrix[i][j]
elif self.board[i][j] == bw * -1:
against_score += weight_matrix[i][j]
return my_score - against_score
def find_best_position(self, bw, weight_matrix):
"Return the best next position."
assert bw in (BLACK, WHITE)
next_positions = {}
for i in range(8):
for j in range(8):
reversi = Reversi(self)
if reversi.put(i, j, bw):
next_positions.setdefault(
reversi._calc_score(bw, weight_matrix), []
).append((i, j))
if next_positions:
next_position = random.choice(next_positions[max(next_positions)])
else:
next_position = None
return next_position
# -------------------------------------------------------------------------------
def drawBoard(reversi, dom, prefetch=False):
board = atlastk.createHTML("tbody")
for y, row in enumerate(reversi.board):
board.push_tag("tr")
for x, r in enumerate(row):
board.push_tag("td")
board.put_attribute("id", str(x) + str(y))
if (r == EMPTY) and (reversi.isAllowed(y, x, reversi.player)):
board.put_attribute("xdh:onevent", "Play")
if (prefetch == True):
r = reversi.player
board.put_attribute(
"style", "opacity: 0.1; background-color: white;")
board.put_attribute(
"class", {EMPTY: 'none', BLACK: 'black', WHITE: 'white'}[r])
board.pop_tag()
board.pop_tag()
dom.inner("board", board)
dom.set_values({
"black": reversi.count(BLACK),
"white": reversi.count(WHITE)
})
def acConnect(reversi, dom):
reversi.player = BLACK
reversi.weight_matrix = WEIGHT_MATRIX
dom.inner("", open("Main.html").read())
drawBoard(reversi, dom)
dom.alert("Welcome to this Reversi (aka Othello) game made with the Atlas toolkit.\n\nYou play against the computer with the black pieces.")
def acPlay(reversi, dom, id):
xy = [int(id[1]), int(id[0])]
player = reversi.player
weight_matrix = reversi.weight_matrix
if (reversi.put(xy[0], xy[1], player)):
drawBoard(reversi, dom, False)
xy = reversi.find_best_position(player * -1, weight_matrix)
if xy:
reversi.put(xy[0], xy[1], player * -1)
time.sleep(1)
drawBoard(reversi, dom)
if (reversi.count(EMPTY) == 0 or
reversi.count(BLACK) == 0 or
reversi.count(WHITE) == 0):
if reversi.count(player) > reversi.count(player * -1):
dom.alert('You win!')
elif reversi.count(player) < reversi.count(player * -1):
dom.alert('You lose!')
else:
dom.alert('Egality!')
def acNew(reversi, dom):
reversi.reset()
drawBoard(reversi, dom)
callbacks = {
"": acConnect,
"Play": acPlay,
"New": acNew
}
atlastk.launch(callbacks, Reversi, open("Head.html").read())
|
epeios-q37/epeios
|
tools/xdhq/examples/PYH/ReversiIMG/main.py
|
Python
|
agpl-3.0
| 7,631
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Resource'
db.create_table('inventory_resource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('trainable', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('inventory', ['Resource'])
# Adding model 'Metadata'
db.create_table('inventory_metadata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.IntegerField')()),
('value', self.gf('django.db.models.fields.TextField')()),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(related_name='metadata', to=orm['inventory.Resource'])),
))
db.send_create_signal('inventory', ['Metadata'])
# Adding model 'TrainingLevel'
db.create_table('inventory_traininglevel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('member', self.gf('django.db.models.fields.related.ForeignKey')(related_name='trainings', to=orm['membership.Member'])),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(related_name='trainings', to=orm['inventory.Resource'])),
('rank', self.gf('django.db.models.fields.IntegerField')()),
('comments', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('inventory', ['TrainingLevel'])
def backwards(self, orm):
# Deleting model 'Resource'
db.delete_table('inventory_resource')
# Deleting model 'Metadata'
db.delete_table('inventory_metadata')
# Deleting model 'TrainingLevel'
db.delete_table('inventory_traininglevel')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'inventory.metadata': {
'Meta': {'object_name': 'Metadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['inventory.Resource']"}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.TextField', [], {})
},
'inventory.resource': {
'Meta': {'object_name': 'Resource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'trainable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['membership.Member']", 'through': "orm['inventory.TrainingLevel']", 'symmetrical': 'False'})
},
'inventory.traininglevel': {
'Meta': {'object_name': 'TrainingLevel'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trainings'", 'to': "orm['membership.Member']"}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trainings'", 'to': "orm['inventory.Resource']"})
},
'membership.field': {
'Meta': {'object_name': 'Field'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'membership.fieldvalue': {
'Meta': {'object_name': 'FieldValue'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.Field']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['membership.Member']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'membership.member': {
'Meta': {'object_name': 'Member'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['membership.Field']", 'through': "orm['membership.FieldValue']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastSeen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['inventory']
|
SYNHAK/spiff
|
spiff/inventory/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 9,068
|
# -*- coding: utf-8 -*-
'''
Created on 21 janv. 2016
@author: christian
'''
import falcon
import os
import config
import db
class Stat(object):
'''
Get global statistics
'''
def on_get(self, req, resp):
'''Return global statistics
'''
dbc = db.connect()
cur = dbc.cursor()
query = """select format('{"nb_maps":%s,"nb_addr":%s,"last_map":"%s"}',
count(*),
count(distinct(address)),
left(max(time)::text,19)) as stats from maps;"""
cur.execute(query)
stats = cur.fetchone()[0]
resp.set_header('X-Powered-By', 'OpenEvacMap')
if stats is None:
resp.status = falcon.HTTP_404
else:
resp.status = falcon.HTTP_200
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Headers',
'X-Requested-With')
resp.body = (stats)
cur.close()
dbc.close()
|
openevacmap/openevac-back
|
src/routes/stat.py
|
Python
|
agpl-3.0
| 1,126
|
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from base64 import b64decode, b64encode
from werkzeug.datastructures import FileStorage
from odoo import models, fields, _
_logger = logging.getLogger(__name__)
try:
import magic
except ImportError:
_logger.warning("Please install magic in order to use Muskathlon module")
class OrderMaterialForm(models.AbstractModel):
_name = "cms.form.order.material.mixin"
_inherit = "cms.form"
_form_model = "crm.lead"
_form_model_fields = ["partner_id", "description"]
_form_required_fields = ["flyer_german", "flyer_french"]
partner_id = fields.Many2one("res.partner", readonly=False)
event_id = fields.Many2one("crm.event.compassion", readonly=False)
form_id = fields.Char()
flyers_select = [(i, str(i)) for i in (0, 5, 10, 15, 20, 30)]
flyer_german = fields.Selection(flyers_select, string="Number of flyers in german", default=0)
flyer_french = fields.Selection(flyers_select, string="Number of flyers in french", default=0)
@property
def _form_fieldsets(self):
return [
{"id": "flyers", "fields": ["flyer_german", "flyer_french", "form_id"]},
]
@property
def form_msg_success_created(self):
return _(
"Thank you for your request. You will hear back from us "
"within the next days."
)
@property
def form_widgets(self):
# Hide fields
res = super(OrderMaterialForm, self).form_widgets
res.update(
{
"form_id": "cms_form_compassion.form.widget.hidden",
"partner_id": "cms_form_compassion.form.widget.hidden",
"event_id": "cms_form_compassion.form.widget.hidden",
"description": "cms_form_compassion.form.widget.hidden",
}
)
return res
@staticmethod
def create_description(material, values, languages=["french", "german"]):
lines = []
for lang in languages:
if int(values[f'flyer_{lang}']) > 0:
lines.append(f"<li>{values[f'flyer_{lang}']} <b>{material}</b> in {lang}</li>")
description = f"<ul>{''.join(lines)}</ul>"
return description
def form_init(self, request, main_object=None, **kw):
form = super(OrderMaterialForm, self).form_init(request, main_object, **kw)
# Set default values
registration = kw.get("registration")
form.partner_id = registration and registration.partner_id
form.event_id = registration and registration.compassion_event_id
return form
def form_before_create_or_update(self, values, extra_values):
""" Dismiss any pending status message, to avoid multiple
messages when multiple forms are present on same page.
"""
super(OrderMaterialForm, self).form_before_create_or_update(
values, extra_values
)
self.o_request.website.get_status_message()
staff_id = (
self.env["res.config.settings"]
.sudo()
.get_param("muskathlon_order_notify_id")
)
values.update(
{
"name": f"Muskathlon flyer order - {self.partner_id.name}",
"description": self.create_description("flyer", extra_values),
"user_id": staff_id,
"event_ids": [(4, self.event_id.id, None)],
"partner_id": self.partner_id.id,
}
)
def form_check_empty_value(self, fname, field, value, **req_values):
"""Invalidate the form if they order 0 flyers"""
is_valid = super().form_check_empty_value(fname, field, value, **req_values)
is_valid |= int(req_values["flyer_french"]) + int(req_values["flyer_german"]) <= 0
return is_valid
def _form_create(self, values):
""" Run as Muskathlon user to authorize lead creation,
and prevents default mail notification to staff
(a better one is sent just after)."""
uid = self.env.ref("muskathlon.user_muskathlon_portal").id
self.main_object = self.form_model\
.sudo(uid).with_context(tracking_disable=True).create(values.copy())
def form_after_create_or_update(self, values, extra_values):
super(OrderMaterialForm, self).form_after_create_or_update(
values, extra_values
)
# Update contact fields on lead
self.main_object._onchange_partner_id()
# Send mail
email_template = self.env.ref("muskathlon.order_material_mail_template")
email_template.sudo().send_mail(
self.main_object.id,
raise_exception=False,
force_send=True,
email_values={
"attachments": [("picture.jpg", self.main_object.partner_id.image)],
"email_to": self.main_object.user_email,
},
)
return True
class OrderMaterialFormFlyer(models.AbstractModel):
_name = "cms.form.order.material"
_inherit = "cms.form.order.material.mixin"
form_id = fields.Char(default="order_material")
class OrderMaterialFormChildpack(models.AbstractModel):
_name = "cms.form.order.muskathlon.childpack"
_inherit = "cms.form.order.material.mixin"
form_id = fields.Char(default="muskathlon_childpack")
flyer_german = fields.Selection(string="Number of childpacks in german", default=0)
flyer_french = fields.Selection(string="Number of childpacks in french", default=0)
def form_before_create_or_update(self, values, extra_values):
super(OrderMaterialFormChildpack, self).form_before_create_or_update(
values, extra_values
)
values.update(
{
"name": f"Muskathlon childpack order - {self.partner_id.name}",
"description": self.create_description("childpack", extra_values),
}
)
|
CompassionCH/compassion-switzerland
|
muskathlon/forms/order_material_form.py
|
Python
|
agpl-3.0
| 6,229
|
#!/usr/bin/env python3
from math import pi, atan
class ScanSetting(object):
"""docstring for ScanSetting"""
def __init__(self):
super(ScanSetting, self).__init__()
# for scan
self.scan_step = 400 # steps
self.theta_a = pi / 6 # radius between center and laser
self.img_width = 640
self.img_height = 480
self.sensorWidth = 3.67
self.sensorHeight = 2.74 + 0.08
self.focalLength = 3.6
# ######### mockup 2, measure by solidwork###
self.cab_m = self.img_width / 2
self.cab_l = self.img_width / 2
self.cab_r = self.img_width / 2
self.cameraX = 0.0
self.cameraY = 22.28 + 8
self.cameraZ = -174.70
self.laserX_L = -53.61
self.laserY_L = 31.62
self.laserZ_L = -76.47
self.laserX_R = 53.61
self.laserY_R = 31.62
self.laserZ_R = -76.47
self.theta_a = atan(self.laserX_L / self.laserZ_L)
self.MAXLaserRange = 65
self.LaserRangeMergeDistance = 65
self.MINLaserRange = 3
self.MagnitudeThreshold = 3
self.LLaserAdjustment = 0
self.RLaserAdjustment = 0
# for modeling
self.NoiseNeighbors = 50
self.NeighborhoodDistance = 10
self.SegmentationDistance = 2
self.CloseBottom = -1000
self.CloseTop = 1000
|
blesscat/flux_line_bot
|
fluxclient/scanner/scan_settings.py
|
Python
|
agpl-3.0
| 1,386
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2008 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Fabian Jakobs (fjakobs)
# * Thomas Herchenroeder (thron7)
#
################################################################################
import re, sys
from ecmascript.frontend import tree, treeutil
##
# Run through all the qx.*.define nodes of a tree. This will cover multiple
# classes defined in single file, as well as nested calls to qx.*.define.
#
# - interface function
def patch(node):
patchCount = 0
classDefNodes = list(treeutil.findQxDefineR(node))
for classDefNode in classDefNodes:
patchCount += optimize(classDefNode, classDefNodes,)
return patchCount
##
# Optimize a single class definition; treats 'construct' and 'member' sections
def optimize(classDefine, classDefNodes):
patchCount = 0
# get class map
try:
classMap = treeutil.getClassMap(classDefine)
except tree.NodeAccessException: # this might happen when the second param is not a map literal
return 0
if not "extend" in classMap:
return 0
if classMap["extend"].type == "variable":
superClass = treeutil.assembleVariable(classMap["extend"])[0]
else:
return 0 # interfaces can have a list-valued "extend", but we currently don't optimize those
if "construct" in classMap:
patchCount = optimizeConstruct(classMap["construct"], superClass, "construct", classDefNodes)
if not "members" in classMap:
return patchCount
members = classMap["members"]
for methodName, methodNode in members.items():
patchCount += optimizeConstruct(methodNode, superClass, methodName, classDefNodes)
return patchCount
##
# Optimize calls to this.base in a tree (e.g. a method); this will skip nested
# calls to qx.*.define, as they are handled on a higher level
def optimizeConstruct(node, superClass, methodName, classDefNodes):
patchCount = 0
# Need to run through all the nodes, to skip embedded qx.*.define(),
# which will be treated separately
# Handle Node
# skip embedded qx.*.define()
if node in classDefNodes:
return 0
elif node.type == "variable" and node.hasParentContext("call/operand"):
varName, complete = treeutil.assembleVariable(node)
if not (complete and varName == "this.base"):
return 0
call = node.parent.parent
try:
firstArgName = treeutil.selectNode(call, "params/1/identifier/@name")
except tree.NodeAccessException:
return 0
if firstArgName != "arguments":
return 0
# "construct"
if methodName == "construct":
newCall = treeutil.compileString("%s.call()" % superClass)
# "member"
else:
newCall = treeutil.compileString("%s.prototype.%s.call()" % (superClass, methodName))
newCall.replaceChild(newCall.getChild("params"), call.getChild("params")) # replace with old arglist
treeutil.selectNode(newCall, "params/1/identifier").set("name", "this") # arguments -> this
call.parent.replaceChild(call, newCall)
patchCount += 1
# Handle Children
if node.hasChildren():
for child in node.children:
patchCount += optimizeConstruct(child, superClass, methodName, classDefNodes)
return patchCount
if __name__ == "__main__":
cls = """qx.Class.define("qx.Car", {
extend: qx.core.Object,
construct : function() {
this.base(arguments, "2")
},
members : {
foo : function() {
return this.base(arguments)
}
}
})"""
node = treeutil.compileString(cls)
patch(node)
print node.toJavascript()
|
Seldaiendil/meyeOS
|
devtools/qooxdoo-1.5-sdk/tool/pylib/ecmascript/transform/optimizer/basecalloptimizer.py
|
Python
|
agpl-3.0
| 4,225
|
# -*- encoding: utf-8 -*-
__name__ = "Remove useless IDENTIFIER column in table MEMBERSHIP_REQUEST"
def migrate(cr, version):
if not version:
return
cr.execute('SELECT 1 '
'FROM pg_class c,pg_attribute a '
'WHERE c.relname=%s '
'AND c.oid=a.attrelid '
'AND a.attname = %s', ('membership_request', 'identifier'))
if cr.fetchone():
cr.execute("ALTER TABLE membership_request "
"DROP COLUMN identifier")
|
acsone/mozaik
|
mozaik_membership/migrations/1.0.1/post-migration.py
|
Python
|
agpl-3.0
| 511
|
from django.conf import settings
from django.db import models as django_models
from django.utils.translation import ugettext_lazy as _
from cms import models as cms_models
from djangocms_utils import fields as cms_fields
from shop import models as shop_models
from shop.util import fields as shop_fields
from simple_translation import actions
CMSPLUGIN_BLOG_PLACEHOLDERS = getattr(settings, 'CMSPLUGIN_BLOG_PLACEHOLDERS', ('excerpt', 'content'))
class Product(shop_models.Product):
placeholders = cms_fields.M2MPlaceholderField(actions=actions.SimpleTranslationPlaceholderActions(), placeholders=CMSPLUGIN_BLOG_PLACEHOLDERS)
class Meta:
pass
def get_price(self):
if self.price_set.count() > 0:
return self.price_set.aggregate(django_models.Sum('price')).get('price__sum')
return self.unit_price
class ProductTitle(django_models.Model):
product = django_models.ForeignKey(Product)
language = django_models.CharField(max_length=2, choices=settings.LANGUAGES)
name = django_models.CharField(max_length=255)
slug = django_models.SlugField()
def __unicode__(self):
return self.name
class Meta:
unique_together = ('language', 'slug')
class Item(django_models.Model):
product = django_models.ForeignKey(Product)
item = django_models.CharField(max_length=255)
quantity = django_models.IntegerField(default=1)
has_nodewatcher_firmware = django_models.BooleanField()
class Price(django_models.Model):
product = django_models.ForeignKey(Product)
price = shop_fields.CurrencyField()
price_type = django_models.CharField(max_length=255, choices=((_('Purchase price'), _('Purchase price')), (_('Import tax'), _('Import tax')), ))
class ProductPlugin(cms_models.CMSPlugin):
product = django_models.ForeignKey(Product)
|
matevzmihalic/wlansi-store
|
wlansi_store/models.py
|
Python
|
agpl-3.0
| 1,854
|
from django.conf.urls.defaults import *
urlpatterns = patterns('contrib.karma.views',
(r'^$', 'index'),
(r'index', 'index'),
)
|
danigm/sweetter
|
sweetter/contrib/karma/urls.py
|
Python
|
agpl-3.0
| 136
|
from rest_framework import status
from rest_framework.decorators import detail_route, list_route
from sigma_core.views.sigma_viewset import SigmaViewSet
from sigma_core.importer import Sigma, load_ressource
User = load_ressource("User")
from django.core.mail import send_mail
from rest_framework.permissions import AllowAny
import random
reset_mail = {
'from_email': 'support@sigma.fr',
'subject': 'Mot de passe Sigma',
'message': u"""
Bonjour,
Ton mot de passe Sigma a été réinitialisé.
C'est maintenant "{password}".
Cordialement,
L'équipe Sigma.
"""
}
class UserViewSet(SigmaViewSet):
serializer_class = User.serializer
queryset = User.model.objects.all()
#*********************************************************************************************#
#** Read actions **#
#*********************************************************************************************#
def retrieve(self, request, pk=None):
"""
Retrieve an User according to its id.
"""
return self.handle_action('retrieve', request, pk)
@list_route(methods=['get'])
def me(self, request):
"""
Retrieve the data of the current user.
"""
return self.serialized_response(request.user)
#*********************************************************************************************#
#** Write actions **#
#*********************************************************************************************#
# def perform_create(self, serializer):
# from sigma_core.models.cluster import Cluster
# from sigma_core.models.group import Group
# serializer.save()
# # Create related GroupMember associations
# # TODO: Looks like a hacky-way to do this.
# # But how to do it properly ?
# memberships = [GroupMember(group=Group(id=c), user=User(id=serializer.data['id']),) for c in serializer.data['clusters_ids']]
# GroupMember.objects.bulk_create(memberships)
# def update(self, request, pk=None):
# """
# Update the data of the specified user.
# """
# try:
# user = User.objects.prefetch_related('clusters').get(pk=pk)
# except User.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# # I can update my own profile, or another's profile if I'm a sigma/cluster admin
# if not (request.user.is_sigma_admin() or int(pk) == request.user.id or request.user.is_admin_of_one_cluster(user.clusters.all())):
# return Response(status=status.HTTP_403_FORBIDDEN)
# # Names edition is allowed to sigma/clusters admins only
# if (request.data['lastname'] != user.lastname or request.data['firstname'] != user.firstname) and not request.user.is_sigma_admin() and not request.user.is_admin_of_one_cluster(user.clusters.all()):
# return Response('You cannot change your lastname or firstname', status=status.HTTP_400_BAD_REQUEST)
# return super(UserViewSet, self).update(request, pk)
# def destroy(self, request, pk=None):
# if not request.user.is_sigma_admin() and int(pk) != request.user.id:
# return Response(status=status.HTTP_403_FORBIDDEN)
# super().destroy(request, pk)
@list_route(methods=['put'])
def change_password(self, request):
"""
Allow current user to change his password.
---
omit_serializer: true
parameters_strategy:
form: replace
parameters:
- name: old_password
type: string
- name: password
type: string
"""
# PASSWORD_MIN_LENGTH = 8
# user = request.user
# data = request.data
# if not user.check_password(data['old_password']):
# return Response("Wrong password", status=status.HTTP_403_FORBIDDEN)
# if len(data['password']) < PASSWORD_MIN_LENGTH:
# return Response("'password' must be at least %d characters long" % PASSWORD_MIN_LENGTH, status=status.HTTP_400_BAD_REQUEST)
# user.set_password(data['password'])
# user.save()
return Response('Password successfully changed', status=status.HTTP_200_OK)
#Dangerous to send a password in clear...
@list_route(methods=['post'], permission_classes=[AllowAny])
def reset_password(self, request):
"""
Reset current user's password and send him an email with the new one.
---
omit_serializer: true
parameters_strategy:
form: replace
parameters:
- name: email
type: string
"""
# email = request.data.get('email')
# if email == '':
# return Response("'email' field cannot be empty", status=status.HTTP_400_BAD_REQUEST)
# try:
# user = User.objects.get(email=email)
# except User.DoesNotExist:
# return Response('No user found with this email', status=status.HTTP_404_NOT_FOUND)
# password = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(10))
# mail = reset_mail.copy()
# mail['recipient_list'] = [user.email]
# mail['message'] = mail['message'].format(email=user.email, password=password, name=user.get_full_name())
# send_mail(**mail)
# user.set_password(password)
# user.save()
return Response('Password reset', status=status.HTTP_200_OK)
|
SRLKilling/sigma-backend
|
data-server/django_app/sigma_core/views/user.py
|
Python
|
agpl-3.0
| 5,766
|
import sys
sys.path.append("./m1")
import div
sum=div.div(1,200000)
print(sum)
|
40423118/2017springcd_hw
|
w10/appdiv.py
|
Python
|
agpl-3.0
| 78
|
from __future__ import unicode_literals
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('user', '0018_auto_20160922_1258'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='activity_cantons',
field=multiselectfield.db.fields.MultiSelectField(default='', verbose_name='Défi Vélo mobile', choices=[('BS', 'Basel-Stadt'), ('BE', 'Berne'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VS', 'Valais'), ('VD', 'Vaud'), ('ZH', 'Zurich')], max_length=29),
preserve_default=False,
),
migrations.AlterField(
model_name='userprofile',
name='affiliation_canton',
field=models.CharField(verbose_name="Canton d'affiliation", choices=[('', '---------'), ('BS', 'Basel-Stadt'), ('BE', 'Berne'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VS', 'Valais'), ('VD', 'Vaud'), ('ZH', 'Zurich')], max_length=2),
),
]
|
defivelo/db
|
apps/user/migrations/0019_auto_20160922_1342.py
|
Python
|
agpl-3.0
| 1,161
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@pexego.es>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
class product_product(models.Model):
_inherit = 'product.product'
is_outlet = fields.Boolean('Is outlet', compute='_is_outlet')
normal_product_id = fields.Many2one('product.product', 'normal product')
outlet_product_ids = fields.One2many('product.product',
'normal_product_id',
'Outlet products')
@api.one
def _is_outlet(self):
outlet_cat = self.env.ref('product_outlet.product_category_outlet')
if self.categ_id == outlet_cat or \
self.categ_id.parent_id == outlet_cat:
self.is_outlet = True
else:
self.is_outlet = False
@api.model
def cron_update_outlet_price(self):
outlet_categ_ids = []
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o1').id)
outlet_categ_ids.append(self.env.ref('product_outlet.product_category_o2').id)
outlet_products = self.env['product.product'].search([('categ_id', 'in', outlet_categ_ids),
('normal_product_id.list_price', '!=', 0)],
order="id desc")
for product_o in outlet_products:
origin_product = product_o.normal_product_id
price_outlet = origin_product.list_price * (1 - product_o.categ_id.percent / 100)
price_outlet2 = origin_product.list_price2 * (1 - product_o.categ_id.percent / 100)
price_outlet3 = origin_product.list_price3 * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd = origin_product.pvd1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd2 = origin_product.pvd2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvd3 = origin_product.pvd3_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi = origin_product.pvi1_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi2 = origin_product.pvi2_price * (1 - product_o.categ_id.percent / 100)
price_outlet_pvi3 = origin_product.pvi3_price * (1 - product_o.categ_id.percent / 100)
if round(product_o.list_price, 2) != round(price_outlet, 2) or \
round(product_o.list_price2, 2) != round(price_outlet2, 2) or \
round(product_o.list_price3, 2) != round(price_outlet3, 2) or \
round(product_o.pvd1_price, 2) != round(price_outlet_pvd, 2) or \
round(product_o.pvd2_price, 2) != round(price_outlet_pvd2, 2) or \
round(product_o.pvd3_price, 2) != round(price_outlet_pvd3, 2) or \
round(product_o.pvi1_price, 2) != round(price_outlet_pvi, 2) or \
round(product_o.pvi2_price, 2) != round(price_outlet_pvi2, 2) or \
round(product_o.pvi3_price, 2) != round(price_outlet_pvi3, 2) or \
round(product_o.commercial_cost, 2) != round(origin_product.commercial_cost, 2):
# update all prices
values = {
'standard_price': price_outlet,
'list_price': price_outlet,
'list_price2': price_outlet2,
'list_price3': price_outlet3,
'pvd1_price': price_outlet_pvd,
'pvd2_price': price_outlet_pvd2,
'pvd3_price': price_outlet_pvd3,
'pvi1_price': price_outlet_pvi,
'pvi2_price': price_outlet_pvi2,
'pvi3_price': price_outlet_pvi3,
'commercial_cost': origin_product.commercial_cost,
}
product_o.write(values)
|
jgmanzanas/CMNT_004_15
|
project-addons/product_outlet/product.py
|
Python
|
agpl-3.0
| 4,831
|
'''
cloudminingstatus.py
@summary: Show selected API data from cloudhasher and miningpool.
@author: Andreas Krueger
@since: 12 Feb 2017
@contact: https://github.com/drandreaskrueger
@copyright: @author @since @license
@license: Donationware, see README.md. Plus see LICENSE.
@version: v0.1.0
@status: It is working well.
@todo: Make it into webservice?
'''
from __future__ import print_function
import time
import sys
import pprint
import requests # pip install requests
SLEEP_SECONDS= 5*60
SHOW_COMPOSITE_RESULTS = True
try:
from credentials_ME import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY
except:
from credentials import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY
POOL_API_URL="http://soil.miners-zone.net/apisoil/accounts/%s"
HASHER_ORDERS_API_URL="https://www.nicehash.com/api?method=orders.get&my&algo=20&location=0&id=%s&key=%s"
HASHER_BALANCE_API_URL="https://www.nicehash.com/api?method=balance&id=%s&key=%s" # unused
def humanTime(epoch):
return time.strftime("GMT %H:%M:%S %a %d %b %Y", time.gmtime(epoch))
POOL_JSON=[('currentHashrate', (lambda x: "%6.2f MHash/s 30m average" % (x/1000000.0))),
('hashrate' , (lambda x: "%6.2f MHash/s 3h average" % (x/1000000.0))),
('paymentsTotal' , (lambda x:x)),
('stats' , (lambda x: "%10.4f SOIL paid" % (float(x['paid'])/1000000000))),
('stats' , (lambda x: "%10.4f SOIL balance" % (float(x['balance'])/1000000000))),
('24hreward',(lambda x: "%10.4f SOIL" % (float(x)/1000000000))),
('stats' , (lambda x: "%d blocksFound" % (x['blocksFound']))),
('stats' , (lambda x: "%s lastShare" % (humanTime(x['lastShare'])))),
('workers' , (lambda x: "%s last beat" % (humanTime(x['0']['lastBeat'])))),
('workers' , (lambda x: "%s Online" % (not bool(x['0']['offline'])))),
('workersTotal', (lambda x:x)),
]
HASHER_JSON_PATH=('result', 'orders', 0)
HASHER_JSON=[
('alive', (lambda x: x)),
('workers', (lambda x: x)),
('id', (lambda x: x)),
('pool_host', (lambda x: x)),
('pool_user', (lambda x: x)),
('limit_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))),
('accepted_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))),
('btc_paid', (lambda x: x)),
('btc_avail', (lambda x: x)),
('price', (lambda x: "%s BTC/GH/Day" % x)),
('end', (lambda x: "%4.2f days order lifetime" % (x/1000.0/60/60/24))),
]
def getJsonData(url):
"""
get url, check for status_code==200, return as json
"""
try:
r=requests.get(url)
except Exception as e:
print ("no connection: ", e)
return False
if r.status_code != 200:
print ("not answered OK==200, but ", r.status_code)
return False
try:
j=r.json()
except Exception as e:
print ("no json, text:")
print (r.text)
# raise e
return False
return j
def showPoolData(url):
"""
gets all json data from pool, but shows only what is in POOL_JSON
"""
print ("Pool:")
j=getJsonData(url)
if not j:
return False
# pprint.pprint (j)
for Jkey, Jfn in POOL_JSON:
print (Jfn(j[Jkey]), "(%s)" % Jkey)
return j
def showHasherData(url):
"""
gets all json data from cloudhasher, but shows only what is in HASHER_JSON
"""
print ("CloudHasher:")
j=getJsonData(url)
if not j:
return False
# pprint.pprint (j)
# climb down into the one branch with all the interesting data:
j=j [HASHER_JSON_PATH[0]] [HASHER_JSON_PATH[1]] [HASHER_JSON_PATH[2]]
# pprint.pprint (j)
for Jkey, Jfn in HASHER_JSON:
print (Jfn(j[Jkey]), "(%s)" % Jkey)
estimate = (float(j['btc_avail']) / ( float(j['price'])*float(j['accepted_speed'])) )
print ("%.2f days" % estimate, end='')
print ("(remaining btc / order price / hashrate)")
return j
def showCompositeResults(pooldata, hasherdata):
"""
Estimates a coin prices by money spent versus money mined.
N.B.: In this form probably only be roughly correct
during first buy order? We'll see.
"""
coinsMined = float(pooldata['stats']['paid'])
coinsMined += float(pooldata['stats']['balance'])
coinsMined /= 1000000000
hashingCostsBtc = float(hasherdata['btc_paid'])
satoshiPrice = hashingCostsBtc / coinsMined * 100000000
print ("%.1f Satoshi/SOIL (mining price approx)" % satoshiPrice)
return satoshiPrice
def loop(sleepseconds):
"""
Shows both, then sleeps, the repeats.
"""
while True:
print ()
pooldata=showPoolData(url=POOL_API_URL%POOL_API_USERNAME)
print ()
hasherdata=showHasherData(url=HASHER_ORDERS_API_URL%(HASHER_API_ID, HASHER_API_KEY))
print ()
if SHOW_COMPOSITE_RESULTS and pooldata and hasherdata:
showCompositeResults(pooldata, hasherdata)
print ()
print (humanTime(time.time()), end='')
print ("... sleep %s seconds ..." % sleepseconds)
time.sleep(sleepseconds)
def checkCredentials():
"""
See credentials.py
"""
yourCredentials=(POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY)
if "" in yourCredentials:
print ("You must fill in credentials.py first.")
print (yourCredentials)
return False
else:
return True
if __name__ == '__main__':
if not checkCredentials():
sys.exit()
try:
loop(sleepseconds=SLEEP_SECONDS)
except KeyboardInterrupt:
print ("Bye.")
sys.exit()
|
drandreaskrueger/cloudminingstatus
|
cloudminingstatus.py
|
Python
|
agpl-3.0
| 5,999
|
from .messages import I18nMessages
|
astrobin/astrobin
|
astrobin_apps_json_api/i18n/views/__init__.py
|
Python
|
agpl-3.0
| 35
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>) and the
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
# ########################################################################
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from osv import osv, fields
from tools.translate import _
from datetime import datetime, timedelta
_logger = logging.getLogger(__name__)
month_list= [('01','January'),
('02','February'),
('03','March'),
('04','April'),
('05','Maj'),
('06','June'),
('07','July'),
('08','August'),
('09','September'),
('10','October'),
('11','November'),
('12','December'),
]
# WIZARD INTERVENT REPORT ######################################################
class contract_report_intervent_wizard(osv.osv_memory):
''' Middle window to choose intervent report parameter
'''
_name = 'contract.report.intervent.wizard'
_description = 'Intervent report wizard'
# Button events:
def print_invoice(self, cr, uid, ids, context=None):
''' Redirect to intervent print passing parameters
'''
wiz_proxy = self.browse(cr, uid, ids)[0]
datas = {}
if wiz_proxy.all:
datas['department_id'] = False
datas['department_name'] = 'All'
else:
datas['department_id'] = wiz_proxy.department_id.id
datas['department_name'] = wiz_proxy.department_id.name
if wiz_proxy.absence_account_id:
datas['absence_account_id'] = wiz_proxy.absence_account_id.id
datas['absence_account_name'] = wiz_proxy.absence_account_id.name
datas['month'] = wiz_proxy.month
datas['year'] = wiz_proxy.year
# not_work report:
datas['user_id'] = wiz_proxy.user_id.id
datas['user_name'] = wiz_proxy.user_id.name
datas['from_date'] = wiz_proxy.from_date
datas['to_date'] = wiz_proxy.to_date
datas['detailed'] = wiz_proxy.detailed
if wiz_proxy.mode == 'intervent':
report_name = 'intervent_report'
elif wiz_proxy.mode == 'absence':
report_name = 'absence_report'
else:
report_name = 'not_work_report'
return {
'type': 'ir.actions.report.xml',
'report_name': report_name,
'datas': datas,
}
_columns = {
'all': fields.boolean('All department', required=False),
'department_id': fields.many2one('hr.department', 'Department',
required=False),
'year': fields.integer('Year'),
'month': fields.selection(month_list, 'Month', select=True),
# For not_work:
'user_id': fields.many2one('res.users', 'Employee / User'),
'from_date': fields.date('From date >='),
'to_date': fields.date('To date <='),
'detailed': fields.boolean('Detailed'),
'mode': fields.selection([
('intervent','Intervent'),
('absence','Absence'),
('not_work','Not work status'), # statistic on absence
], 'Mode', select=True, readonly=False, required=True),
'absence_account_id': fields.many2one('account.analytic.account',
'Absence type', required=False,
help="If absence report is only for one type of account"),
}
_defaults = {
'all': lambda *a: True,
'month': lambda *a: datetime.now().strftime('%m'),
'year': lambda *a: datetime.now().strftime('%Y'),
'mode': lambda *a: 'intervent',
}
contract_report_intervent_wizard()
# WIZARD CONTRACT DEPT. REPORT ################################################
class contract_department_report_wizard(osv.osv_memory):
''' Middle window to choose intervent report parameter
'''
_name = 'contract.department.report.wizard'
_description = 'Contract dept. report wizard'
# Button events:
def print_invoice(self, cr, uid, ids, context=None):
''' Redirect to contract dept. print passing parameters
'''
wiz_proxy = self.browse(cr, uid, ids)[0]
datas = {}
if wiz_proxy.mode == 'detailed': # Detailed report ####################
# block:
datas['hour'] = wiz_proxy.hour
datas['cost'] = wiz_proxy.cost
datas['invoice'] = wiz_proxy.invoice
datas['balance'] = wiz_proxy.balance
datas['supplier'] = wiz_proxy.supplier
# date:
datas['start_date'] = wiz_proxy.start_date
datas['end_date'] = wiz_proxy.end_date
datas['active_contract'] = wiz_proxy.active_contract
datas['date_summary'] = (wiz_proxy.end_date or wiz_proxy.start_date) and wiz_proxy.date_summary # True if there's one date and set to true
# report name
report='contracts_report'
elif wiz_proxy.mode == 'list': # Simple list report ##################
datas['active'] = wiz_proxy.active
report = 'dept_contract_list_report'
else: # Summary report ################################################
#datas['department_id'] = wiz_proxy.department_id.id if wiz_proxy.department_id else False
datas['start_date'] = wiz_proxy.start_date
datas['end_date'] = wiz_proxy.end_date
report='dept_contract_summary' # TODO create report
if wiz_proxy.all_contract:
datas['contract_id'] = False
if wiz_proxy.all:
datas['department_id'] = False
else:
datas['department_id'] = wiz_proxy.department_id.id
datas['department_name'] = wiz_proxy.department_id.name
else: # contract selected:
datas['contract_id'] = wiz_proxy.contract_id.id
datas['contract_name'] = wiz_proxy.contract_id.name
datas['department_id'] = wiz_proxy.contract_id.department_id.id if wiz_proxy.contract_id.department_id else False
datas['department_name'] = wiz_proxy.department_id.name
return {
'type': 'ir.actions.report.xml',
'report_name': report, #'dept_contract_list_report',
'datas': datas,
}
_columns = {
'all_contract': fields.boolean('All contract',),
'active_contract': fields.boolean('Active contract',),
'contract_id': fields.many2one('account.analytic.account', 'Contract',
required=False,
help="All 'working' contract in contract list (absence fake contract not visible)"),
'all':fields.boolean('All department',),
'active':fields.boolean('Only active', help='In open state'),
'department_id':fields.many2one('hr.department', 'Department',
required=False),
'mode':fields.selection([
('list','Short list'),
('detailed','Detailed'),
('summary','Summary'),
],'Mode', select=True, required=True),
# Blocks:
'hour':fields.boolean('With hours'),
'cost':fields.boolean('With cost'),
'invoice':fields.boolean('With invoice'),
'balance':fields.boolean('With balance'),
'supplier':fields.boolean('With supplier invoice'),
'date_summary':fields.boolean('With date summary', required=False),
'start_date': fields.date('Start date', help="Start date of period, for evaluate costs, intervent, invoice"),
'end_date': fields.date('End Date', help="End date of period, for evaluate cost, intervent, invoice"),
}
_defaults = {
'mode': lambda *a: 'list',
'all': lambda *a: True,
'active': lambda *a: False,
'all_contract': lambda *a: True,
'hour': lambda *a: True,
'cost': lambda *a: True,
'invoice': lambda *a: True,
'balance': lambda *a: True,
'supplier': lambda *a: True,
'date_summary': lambda *a: True,
}
contract_department_report_wizard()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Micronaet/micronaet-contract
|
contract_manage_report/wizard/wizard_report.py
|
Python
|
agpl-3.0
| 9,417
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of fiware-cygnus (FI-WARE project).
#
# fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see
# http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact:
# iot_support at tid.es
#
import json
__author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)'
import time
from lettuce import world
from decimal import Decimal
from tools import general_utils
from tools import http_utils
from tools.notification_utils import Notifications
from tools.fabric_utils import FabricSupport
from tools.cygnus_agent_config import Agent
from tools.cygnus_instance_config import Cygnus_Instance
from tools.cygnus_krb5_config import Krb5
from tools.cygnus_grouping_rules_config import Grouping_Rules
from tools.remote_log_utils import Remote_Log
# notification constants
VERSION = u'version'
# general constants
ROW_MODE = u'row'
COL_MODE = u'column'
CKAN_SINK = u'ckan'
MYSQL_SINK = u'mysql'
HDFS_SINK = u'hdfs'
MONGO_SINK = u'mongo'
STH_SINK = u'sth'
DEFAULT = u'default'
RANDOM = u'random'
EMPTY = u''
# ckan constants
MAX_TENANT_LENGTH = u'abcde678901234567890123456789019'
MAX_SERVICE_PATH_LENGTH = u'/abcdefghij1234567890abcdefghij1234567890abcdefgh'
MAX_RESOURCE_LENGTH = u'123456789012345678901234567890123456789012345678901234567890123'
WITH_MAX_LENGTH_ALLOWED = u'with max length allowed'
ORGANIZATION_MISSING = u'organization is missing'
ORGANIZATION_WITHOUT_DATASET = u'organization_without_dataset'
RESOURCE_MISSING = u'resource_missing'
RESULT = u'result'
RECORDS = u'records'
VALUE = u'value'
CONTENT_VALUE = u'contextValue'
NAME = u'name'
METADATA = u'metadata'
CONTEXT_METADATA = u'contextMetadata'
METADATAS = u'metadatas'
ATTR_NAME = u'attrName'
ATTR_TYPE = u'attrType'
ATTR_VALUE = u'attrValue'
ATTR_MD = u'attrMd'
TYPE = u'type'
# mysql constants
DATABASE_WITHOUT_TABLE = u'database_without_table'
DATABASE_MISSING = u'database_missing'
MAX_TABLE_LENGTH = 64
# mongo/sth
STH_DATABASE_PREFIX = u'sth'
STH_COLLECTION_PREFIX = u'sth'
AGGR = u'aggr'
class Cygnus:
"""
cygnus class with generals features
"""
# ---------------------------------------- Configuration -----------------------------------------------------------
def __init__(self, **kwargs):
"""
constructor
:param protocol: protocol used in cygnus requests
:param host: host used in cygnus, in multi-instances, will be incremental
:param host: port used in cygnus
:param management_port : management port to know the version
:param version : cygnus version
:param verify_version : determine if verify cygnus version or not (True | False)
:param ttl: Number of channel re-injection retries before a Flume event is definitely discarded (-1 means infinite retries)
:param user: user used to connect by fabric
:param password: password used to connect by fabric, if use cert file, password will be None
:param cert_file: cert_file used to connect by fabric, if use password, cert_file will be None
:param error_retry: Number of times Fabric will attempt to connect when connecting to a new server
:param source_path: source path where are templates files
:param target_path: target path where are copied config files
:param sudo_run: operations in cygnus with superuser privileges (True | False)
:param log_level: log level used in cygnus (log4j.properties)
:param log_file: log file used in cygnus
:param log_owner: log file's owner of cygnus
:param log_group: log file's group of cygnus
:param log_mod: log file's mod of cygnus
"""
self.cygnus_protocol = kwargs.get("protocol", "http")
self.cygnus_host = kwargs.get("host", "localhost")
self.cygnus_port = kwargs.get("port", "5050")
self.cygnus_url = "%s://%s:%s" % (self.cygnus_protocol, self.cygnus_host, self.cygnus_port)
self.management_port = kwargs.get("management_port", "8081")
self.version = kwargs.get("version", "0.1.0_test")
self.verify_version = kwargs.get("verify_version", "false")
self.ttl = kwargs.get("ttl", "10")
#fabric
self.fabric_user = kwargs.get("user", None)
self.fabric_password = kwargs.get("password", None)
self.fabric_cert_file = kwargs.get("cert_file", None)
self.fabric_error_retry = kwargs.get("error_retry", 1)
self.fabric_source_path = kwargs.get("source_path", "/tmp")
self.fabric_target_path = kwargs.get("target_path", "/tmp")
self.fabric_sudo_cygnus = bool(kwargs.get("sudo_run", "False"))
# log file
self.log_level = kwargs.get("log_level", "INFO")
self.log_file = kwargs.get("log_file", "/var/log/cygnus/cygnus.log")
self.log_owner = kwargs.get("log_owner", "cygnus")
self.log_group = kwargs.get("log_group", "cygnus")
self.log_mod = kwargs.get("log_mod", "775")
self.dataset_id = None
self.resource_id = None
def configuration(self, service, service_path, entity_type, entity_id, attributes_number, attributes_name, attribute_type):
"""
general configuration
table (max 64 chars) = service_path + "_" + resource
:param service: service used in scenario
:param service_path: service path used in scenario
:param entity_type: entity type used in scenario
:param entity_id: entity id used in scenario
:param attributes_number: number of attributes used in scenario
:param attributes_name: name of attributes used in scenario
:param attribute_type: type of attributes used in scenario
"""
self.dataset = None
self.table = None
if service == WITH_MAX_LENGTH_ALLOWED: self.service = MAX_TENANT_LENGTH.lower()
elif service != DEFAULT:
self.service = service.lower()
if service_path == WITH_MAX_LENGTH_ALLOWED: self.service_path = MAX_SERVICE_PATH_LENGTH.lower()
elif service_path != DEFAULT:
self.service_path = service_path.lower()
if not (self.sink.find("mongo") >= 0 or self.sink.find("sth") >= 0): # if sink is different of mongo or sth values, the service path remove "/" char if does exists
if self.service_path[:1] == "/": self.service_path = self.service_path[1:]
self.entity_type = entity_type
self.entity_id = entity_id
if (entity_type == WITH_MAX_LENGTH_ALLOWED): self.entity_type = MAX_RESOURCE_LENGTH[0:(len(MAX_RESOURCE_LENGTH)-len(self.service_path)-1)-len(entity_id)-2].lower()
if (entity_id == WITH_MAX_LENGTH_ALLOWED): self.entity_id = MAX_RESOURCE_LENGTH[0:(len(MAX_RESOURCE_LENGTH)-len(self.service_path)-1)-len(entity_type)-2].lower()
self.resource = str(self.entity_id+"_"+self.entity_type).lower()
self.attributes_number = int(attributes_number)
self.attributes_name = attributes_name
self.attributes_type = attribute_type
self.dataset = self.service
self.table = self.resource
if self.service_path != EMPTY:
self.dataset = self.dataset+"_"+self.service_path
self.table = self.service_path+ "_" +self.table
def __get_port (self, port, inc, different_port):
"""
get port value incremented to multi-instances
:param port: port value
:param inc: increment
:return port string
"""
try:
if different_port.lower() == "true":
return str(int(port)+inc)
else:
return port
except Exception, e:
assert False, "ERROR - port %s is not numeric format \n %s" % (str(port), str (e))
def __get_channels(self, sinks):
"""
return the channel used by each sink
:param sinks: sinks list
:return: channels (string)
"""
sink_list = sinks.split (" ") # sink ex: ckan-sink mysql-sink hdfs-sink
channels = ""
for i in range(len(sink_list)):
channels = channels + sink_list[i].split("-")[0]+"-channel " # channel ex: ckan-channel mysql-channel hdfs-channel
return channels[:len(channels)-1]
def config_instances(self, id, quantity, sinks, persistence, different_port="true"):
"""
initialize instance files
In case of multi-instances and different_port is true, the port will be increment to initial port. ex: 5050, 5051, 5053, 5054, etc.
:param id: postfix used in instances name
:param quantity: number of instances
:param sinks: sinks string list
:param persistence: determine the mode of persistence by cygnus (row | column)
:param different_port: determine if the port is different or not
"""
self.instance_id = id
self.sink = sinks
self.instances_number = quantity
self.persistence = persistence
self.different_port = different_port
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True)
cygnus_instance = Cygnus_Instance(source_path=self.fabric_source_path, target_path=self.fabric_target_path, sudo=self.fabric_sudo_cygnus)
cygnus_agent = Agent(source_path=self.fabric_source_path, target_path=self.fabric_target_path, sudo=self.fabric_sudo_cygnus)
for i in range(int(self.instances_number)):
# generate cygnus_instance_<id>.conf ex: cygnus_instance_test_0.conf
port = self.__get_port(self.cygnus_port, i, self.different_port)
management_port = self.__get_port(self.management_port, i, self.different_port)
myfab.runs(cygnus_instance.append_id(admin_port=str(management_port), id=self.instance_id+"_"+str(i)))
# generate agent_<id>.conf ex: agent_test_0.conf
ops_list = cygnus_agent.append_id(id=self.instance_id+"_"+str(i))
ops_list = cygnus_agent.source(sink=sinks, channel=self.__get_channels(sinks), port=port, grouping_rules_file=self.fabric_target_path+"/grouping_rules.conf",)
sinks_list = sinks.split(" ")
for i in range(len(sinks_list)):
if sinks_list[i].find(HDFS_SINK)>=0:
hdfs_host = world.config['hadoop']['hadoop_namenode_url'].split(":")[1][2:]
hdfs_port = world.config['hadoop']['hadoop_namenode_url'].split(":")[2]
ops_list = cygnus_agent.config_hdfs_sink(sink=sinks_list[i], channel=self.__get_channels(sinks_list[i]), host=hdfs_host, port=hdfs_port, user=world.config['hadoop']['hadoop_user'], password=world.config['hadoop']['hadoop_password'], api=world.config['hadoop']['hadoop_api'], persistence=self.persistence, hive_host=hdfs_host, hive_port=hdfs_port, krb5_auth=world.config['hadoop']['hadoop_krb5_auth'], krb5_user=world.config['hadoop']['hadoop_krb5_user'], krb5_password=world.config['hadoop']['hadoop_krb5_password'], krb5_login_file=self.fabric_target_path+"/krb5_login.conf", krb5_conf_file=self.fabric_target_path+"/krb5.conf")
ops_list = cygnus_agent.config_channel (self.__get_channels(sinks_list[i]), capacity=world.config['hadoop']['hadoop_channel_capacity'], transaction_capacity=world.config['hadoop']['hadoop_channel_transaction_capacity'])
elif sinks_list[i].find(CKAN_SINK)>=0:
ops_list = cygnus_agent.config_ckan_sink(sink=sinks_list[i], channel=self.__get_channels(sinks_list[i]),api_key=world.config['ckan']['ckan_authorization'], host=world.config['ckan']['ckan_host'], port=world.config['ckan']['ckan_port'], orion_url=world.config['ckan']['ckan_orion_url'], persistence=self.persistence, ssl=world.config['ckan']['ckan_ssl'])
ops_list = cygnus_agent.config_channel (self.__get_channels(sinks_list[i]), capacity=world.config['ckan']['ckan_channel_capacity'], transaction_capacity=world.config['ckan']['ckan_channel_transaction_capacity'])
elif sinks_list[i].find(MYSQL_SINK)>=0:
ops_list = cygnus_agent.config_mysql_sink(sink=sinks_list[i], channel=self.__get_channels(sinks_list[i]), host=world.config['mysql']['mysql_host'], port=world.config['mysql']['mysql_port'], user=world.config['mysql']['mysql_user'], password=world.config['mysql']['mysql_pass'], persistence=self.persistence)
ops_list = cygnus_agent.config_channel (self.__get_channels(sinks_list[i]), capacity=world.config['mysql']['mysql_channel_capacity'], transaction_capacity=world.config['mysql']['mysql_channel_transaction_capacity'])
elif sinks_list[i].find(MONGO_SINK)>=0:
ops_list = cygnus_agent.config_mongo_sink(sink=sinks_list[i], channel=self.__get_channels(sinks_list[i]), host_port="%s:%s" % (world.config['mongo']['mongo_host'], world.config['mongo']['mongo_port']), user=world.config['mongo']['mongo_user'], password=world.config['mongo']['mongo_password'])
ops_list = cygnus_agent.config_channel (self.__get_channels(sinks_list[i]), capacity=world.config['mongo']['mongo_channel_capacity'], transaction_capacity=world.config['mongo']['mongo_channel_transaction_capacity'])
elif sinks_list[i].find(STH_SINK)>=0:
ops_list = cygnus_agent.config_sth_sink(sink=sinks_list[i], channel=self.__get_channels(sinks_list[i]), host_port="%s:%s" % (world.config['sth']['sth_host'], world.config['sth']['sth_port']), user=world.config['sth']['sth_user'], password=world.config['sth']['sth_password'])
ops_list = cygnus_agent.config_channel (self.__get_channels(sinks_list[i]), capacity=world.config['sth']['sth_channel_capacity'], transaction_capacity=world.config['sth']['sth_channel_transaction_capacity'])
# create and modify values in agent_<id>.conf
myfab.runs(ops_list)
def another_files (self, grouping_rules_file_name=DEFAULT):
"""
copy another configuration files used by cygnus
- flume-env.sh
- grouping_rules.conf
- log4j.properties
- krb5.conf
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
myfab.current_directory(self.fabric_target_path)
myfab.run("cp -R flume-env.sh.template flume-env.sh")
# grouping_rules.conf configuration
if grouping_rules_file_name == DEFAULT:
myfab.run("cp -R grouping_rules.conf.template grouping_rules.conf")
elif grouping_rules_file_name != EMPTY:
Grouping_Rules(fab_driver=myfab, file=grouping_rules_file_name, target_path=self.fabric_target_path, sudo=self.fabric_sudo_cygnus)
else:
myfab.run("rm -f grouping_rules.conf")
# change to DEBUG mode in log4j.properties
myfab.current_directory(self.fabric_target_path)
myfab.run("cp -R log4j.properties.template log4j.properties", target_path=self.fabric_target_path, sudo=self.fabric_sudo_cygnus)
myfab.run(' sed -i "s/flume.root.logger=INFO,LOGFILE/flume.root.logger=%s,LOGFILE /" log4j.properties.template' % (self.log_level))
# krb5.conf configuration
krb5 = Krb5(self.fabric_target_path, self.fabric_sudo_cygnus)
myfab.runs(krb5.config_kbr5(default_realm=world.config['hadoop']['hadoop_krb5_default_realm'], kdc=world.config['hadoop']['hadoop_krb5_kdc'], admin_server=world.config['hadoop']['hadoop_krb5_admin_server'], dns_lookup_realm=world.config['hadoop']['hadoop_krb5_dns_lookup_realm'], dns_lookup_kdc=world.config['hadoop']['hadoop_krb5_dns_lookup_kdc'], ticket_lifetime=world.config['hadoop']['hadoop_krb5_ticket_lifetime'], renew_lifetime=world.config['hadoop']['hadoop_krb5_renew_lifetime'], forwardable=world.config['hadoop']['hadoop_krb5_forwardable']))
def cygnus_service(self, operation):
"""
cygnus service (status | stop | start | restart)
:param operation:
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
myfab.warn_only(True)
myfab.run("service cygnus %s" % operation, sudo=self.fabric_sudo_cygnus)
def verify_cygnus (self):
"""
verify if cygnus is installed correctly and its version
"""
self.cygnus_mode = world.persistence
with open("configuration.json") as config_file:
try:
configuration = json.load(config_file)
if configuration["jenkins"].lower() == "true":
self.cygnus_host = "127.0.0.1"
self.cygnus_url = "%s://%s:%s" % (self.cygnus_protocol, self.cygnus_host, self.cygnus_port)
except Exception, e:
assert False, 'Error parsing configuration.json file: \n%s' % (e)
if self.verify_version.lower() == "true":
management_url = "%s://%s:%s/%s" % (self.cygnus_protocol, self.cygnus_host, self.management_port, VERSION)
resp = http_utils.request(http_utils.GET, url= management_url)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - in management operation (version)")
body_dict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
assert str(body_dict[VERSION]) == self.version, \
"Wrong cygnus version verified: %s. Expected: %s. \n\nBody content: %s" % (str(body_dict[VERSION]), str(self.version), str(resp.text))
return True
def init_log_file(self):
"""
reinitialize log file
delete and create a new log file (empty)
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
log = Remote_Log (fabric=myfab)
log.delete_log_file()
log.create_log_file()
def verify_log(self, label, text):
"""
Verify in log file if a label with a text exists
:param label: label to find
:param text: text to find (begin since the end)
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
log = Remote_Log (fabric=myfab)
line = log.find_line(label, text)
assert line != None, "ERROR - label %s and text %s is not found. \n - %s" % (label, text, line)
def delete_grouping_rules_file(self, grouping_rules_file_name):
"""
delete grouping rules file in cygnus conf remotely
used the file name "grouping_rules_name" stored in configuration.json file
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
myfab.current_directory(self.fabric_target_path)
Grouping_Rules(fab_driver=myfab, file=grouping_rules_file_name, target_path=self.fabric_target_path, sudo=self.fabric_sudo_cygnus)
grouping_rules = Grouping_Rules()
myfab.run("rm -f %s" % grouping_rules.get_grouping_rules_file_name())
def delete_cygnus_instances_files(self):
"""
delete all cygnus instances files (cygnus_instance_%s_*.conf and agent_test_*.conf)
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
myfab.current_directory(self.fabric_target_path)
myfab.run("rm -f cygnus_instance_%s_*.conf" % self.instance_id)
myfab.run("rm -f agent_%s_*.conf" % self.instance_id)
myfab.run("rm -f agent_%s_*.conf" % self.instance_id)
# --------------------------------------------- general action -----------------------------------------------------
def __split_resource (self, resource_name):
"""
split resource in identity Id and identity Type
"""
res = resource_name.split ("_")
return res [0], res [1] # identity Id , identity Type
def get_timestamp_remote(self):
"""
return date-time in timestamp from sth server
:return float
"""
myfab = FabricSupport(host=self.cygnus_host, user=self.fabric_user, password=self.fabric_password, cert_file=self.fabric_cert_file, retry=self.fabric_error_retry, hide=True, sudo=self.fabric_sudo_cygnus)
return float(myfab.run("date +%s")) # get timestamp
def received_notification(self, attribute_value, metadata_value, content):
"""
notifications
:param attribute_value: attribute value
:param metadata_value: metadata value (true or false)
:param content: xml or json
"""
if self.sink == "mysql-sink": # limitation in lettuce change ' by " in mysql
attribute_value = general_utils.mappingQuotes(attribute_value)
self.content = content
self.metadata_value = metadata_value
metadata_attribute_number = 1
cygnus_notification_path = u'/notify'
notification = Notifications (self.cygnus_url+cygnus_notification_path,tenant=self.service, service_path=self.service_path, content=self.content)
if self.metadata_value:
notification.create_metadatas_attribute(metadata_attribute_number, RANDOM, RANDOM, RANDOM)
notification.create_attributes (self.attributes_number, self.attributes_name, self.attributes_type, attribute_value)
resp = notification.send_notification(self.entity_id, self.entity_type)
self.date_time = self.get_timestamp_remote()
self.attributes = notification.get_attributes()
self.attributes_name = notification.get_attributes_name()
self.attributes_value = notification.get_attributes_value()
self.attributes_metadata = notification.get_attributes_metadata_number()
self.attributes_number = int(notification.get_attributes_number())
return resp
def receives_n_notifications(self, notif_number, attribute_value_init):
"""
receives N notifications with consecutive values, without metadatas and json content
:param attribute_value_init: attribute value for all attributes in each notification increment in one
hint: "random number=X" per attribute_value_init is not used in this function
:param notif_number: number of notification
"""
self.notifications_number = notif_number
for i in range(int(notif_number)):
temp_value = Decimal(attribute_value_init) + i
resp = world.cygnus.received_notification(str(temp_value), "False", "json")
self.attributes_value = attribute_value_init
return resp
def __change_port(self, port):
"""
change a port used by notifications, update url variables
:param port: new port
"""
temp = self.cygnus_url.split(":")
self.cygnus_url = "%s:%s:%s" % (temp[0], temp[1], port)
def received_multiples_notifications(self, attribute_value, metadata_value, content):
"""
receive several notifications by each instance, but changing port
:param attribute_value:
:param metadata_value:
:param content:
:return: response
"""
self.attrs_list = []
for i in range(int(self.instances_number)):
self.__change_port(self.__get_port(self.cygnus_port, i, self.different_port))
resp = self.received_notification(attribute_value, metadata_value, content)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - in multi-notifications")
self.attrs_list.append(self.attributes) # used by verify if dates are stored
# ------------------------------------------- CKAN Column Mode -----------------------------------------------------
def create_organization_and_dataset(self):
"""
Create a new organization and a dataset associated
"""
self.dataset = self.service
if self.service_path != EMPTY: self.dataset = self.dataset+"_"+self.service_path
if self.service != ORGANIZATION_MISSING:
world.ckan.create_organization (self.service)
if self.service != ORGANIZATION_WITHOUT_DATASET:
self.dataset_id = world.ckan.create_dataset (self.dataset)
def create_resource_and_datastore (self, attribute_data_type, metadata_data_type):
"""
create a new resource and its datastore associated if it does not exists
:param attribute_data_type: attribute data type
:param metadata_data_type: metadata data type
"""
self.dataset_id = world.ckan.verify_if_dataset_exist(self.dataset)
if (self.service != ORGANIZATION_MISSING and \
self.service != ORGANIZATION_WITHOUT_DATASET and \
self.dataset_id != False) and \
self.entity_id != RESOURCE_MISSING:
fields = world.ckan.generate_field_datastore_to_resource(self.attributes_number, self.attributes_name, attribute_data_type, metadata_data_type)
self.resource_id = world.ckan.create_resource(self.resource, self.dataset_id, fields)
def retry_in_datastore_search_sql_column (self, resource_name, dataset_name, attributes_name, value):
"""
retry in get data from ckan in column mode
:return: record from ckan
"""
c=0
row=1
for i in range(int(world.ckan.retries_number)):
resp=world.ckan.datastore_search_last_sql(row, resource_name, dataset_name)
temp_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
if len(temp_dict[RESULT][RECORDS])>0:
if str(temp_dict[RESULT][RECORDS][0][attributes_name+"_0"]) == str(value):
return temp_dict
c+=1
print " WARN - Retry in get data from ckan. No: ("+ str(c)+")"
time.sleep(world.ckan.retry_delay)
return u'ERROR - Attributes are missing....'
def retry_in_datastore_search_sql_row (self, position, resource_name, dataset_name, attributes_name):
"""
retry in get data from ckan in row mode
:return: record from ckan
"""
c=0
for i in range(int(world.ckan.retries_number)):
resp=world.ckan.datastore_search_last_sql(self.attributes_number, resource_name, dataset_name)
if resp != False:
temp_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
if len(temp_dict[RESULT][RECORDS]) == int (self.attributes_number):
for i in range(0, self.attributes_number):
if str(temp_dict[RESULT][RECORDS][i][ATTR_NAME]) == str(attributes_name+"_"+str(position)):
return temp_dict[RESULT][RECORDS][i]
c+=1
print " WARN - Retry in get data from ckan. No: ("+ str(c)+")"
time.sleep(world.ckan.retry_delay)
return u'ERROR - Attributes are missing....'
# ---------------------------------------------- CKAN Row Mode -----------------------------------------------------
# ------------------------------------------ MySQL Column Mode -----------------------------------------------------
def create_database (self):
"""
create a new Database per column
:param tenant: database name
"""
if self.service != DATABASE_MISSING:
world.mysql.create_database (self.service)
def create_table (self, attribute_data_type, metadata_data_type):
"""
create a new table per column
"""
self.table = self.resource
if self.service_path != EMPTY: self.table = self.service_path+ "_" +self.table
if self.service != DATABASE_MISSING and \
self.service != DATABASE_WITHOUT_TABLE and \
self.entity_id != RESOURCE_MISSING:
fields = world.mysql.generate_field_datastore_to_resource (self.attributes_number, self.attributes_name, attribute_data_type, metadata_data_type)
world.mysql.create_table (self.table, self.service, fields)
def retry_in_table_search_sql_column (self, table_name, database_name, value):
"""
retry in get data from mysql
:return: record in mysql
"""
c = 0
for i in range(int(world.mysql.retries_number)):
row=world.mysql.table_search_one_row(database_name, table_name)
if row != None and str(row[1]) == value:
return row
c += 1
print " WARN - Retry in get data from mysql. No: ("+ str(c)+")"
time.sleep(world.mysql.retry_delay)
return u'ERROR - Attributes are missing....'
def retry_in_table_search_sql_row(self, position, database_name, table_name, attributes_name):
"""
retry in get data from mysql
:return: record in mysql
"""
c = 0
for i in range(int(world.mysql.retries_number)):
rows=world.mysql.table_search_several_rows(self.attributes_number, database_name, table_name)
if rows != False:
if len(rows) == self.attributes_number:
for line in rows:
for j in range(len(line)):
if str(line[4]) == str(attributes_name+"_"+str(position)):
return line
c += 1
print " WARN - Retry in get data from mysql. No: ("+ str(c)+")"
time.sleep(world.mysql.retry_delay)
return u'ERROR - Attributes are missing....'
# ---------------------------------------------- Mysql Row Mode ----------------------------------------------------
# ------------------------------------------ Hadoop Row Mode -------------------------------------------------------
def hadoop_configuration(self, tenant, service_path, resource_name, attributes_number, attributes_name, attribute_type):
"""
hadoop configuration
"""
if tenant == WITH_MAX_LENGTH_ALLOWED: self.service = MAX_TENANT_LENGTH.lower()
elif tenant != DEFAULT:
self.service = tenant.lower()
if service_path != DEFAULT: self.service_path = service_path.lower()
if resource_name == WITH_MAX_LENGTH_ALLOWED:
self.resource = MAX_RESOURCE_LENGTH[0:(len(MAX_RESOURCE_LENGTH)-len(self.service_path)-1)].lower() # file (max 64 chars) = service_path + "_" + resource
elif resource_name == DEFAULT:
self.resource = str(self.entity_id+"_"+self.entity_type).lower()
else:
self.resource = resource_name.lower()
self.entity_id, self.entity_type = self.__split_resource (self.resource)
if attributes_number != DEFAULT: self.attributes_number = int(attributes_number)
if attributes_name != DEFAULT: self.attributes_name = attributes_name
self.attributes_type = attribute_type
# ------------------------------------------ mongo raw ------------------------------------------------------------
def verify_mongo_version(self, driver):
"""
verify mongo version
if the version is incorrect show an error with both versions, the one used and the expected
:param driver: mongo driver
"""
driver.connect()
resp = driver.eval_version()
driver.disconnect()
assert resp == u'OK', resp
def verify_values_in_mongo(self):
"""
verify attribute value and type from mongo
:return document dict (cursor)
"""
find_dict = { "attrName": {'$regex':'%s.*' % (self.attributes_name)}, #the regular expression is because in multi attribute the name is with postfix <_value>. ex: temperature_0
"attrType" : self.attributes_type,
"attrValue" : str(self.attributes_value)
}
world.mongo.connect("%s_%s" % (STH_DATABASE_PREFIX, self.service))
world.mongo.choice_collection("%s_%s_%s_%s" % (STH_COLLECTION_PREFIX, self.service_path, self.entity_id, self.entity_type))
cursor = world.mongo.find_with_retry(find_dict)
assert cursor.count() != 0, " ERROR - the attributes with prefix %s has not been stored in mongo successfully" % (self.attributes_name)
world.mongo.disconnect()
# --------------------------------------- mongo aggregated---------------------------------------------------------
def verify_aggregates_in_mongo(self, resolution):
"""
verify aggregates from mongo:
- origin, max, min, sum sum2
:param resolution: resolutions type ( month | day | hour | minute | second )
"""
time_zone = 2
time.sleep(int(self.instances_number)) # delay to process all notifications and calculate aggregates
find_dict = {"_id.attrName": {'$regex':'%s.*' % (self.attributes_name)}, #the regular expression is because in multi attribute the name is with postfix + <_value>. ex: temperature_0
"_id.resolution": str(resolution)}
origin_year = general_utils.get_date_only_one_value(self.date_time, "year")
origin_month = general_utils.get_date_only_one_value(self.date_time, "month")
origin_day = general_utils.get_date_only_one_value(self.date_time, "day")
origin_hour = int(general_utils.get_date_only_one_value(self.date_time, "hour"))-time_zone
if origin_hour < 10: origin_hour = u'0' + str(origin_hour)
origin_minute = general_utils.get_date_only_one_value(self.date_time, "minute")
origin_second = general_utils.get_date_only_one_value(self.date_time, "second")
world.sth.connect("%s_%s" % (STH_DATABASE_PREFIX, self.service))
world.sth.choice_collection("%s_%s_%s_%s.%s" % (STH_COLLECTION_PREFIX, self.service_path, self.entity_id, self.entity_type, AGGR))
cursor = world.sth.find_with_retry(find_dict)
assert cursor.count() != 0, " ERROR - the aggregated has not been stored in mongo successfully "
doc_list = world.sth.get_cursor_value(cursor) # get all dictionaries into a cursor, return a list
for doc in doc_list:
offset = int(general_utils.get_date_only_one_value(self.date_time, resolution))
if resolution == "month":
offset=offset-1
origin_by_resolution = "%s-01-01 00:00:00" % (origin_year)
elif resolution == "day":
offset=offset-1
origin_by_resolution = "%s-%s-01 00:00:00" % (origin_year, origin_month)
elif resolution == "hour":
offset=offset-time_zone
origin_by_resolution = "%s-%s-%s 00:00:00" % (origin_year, origin_month, origin_day)
elif resolution == "minute":
origin_by_resolution = "%s-%s-%s %s:00:00" % (origin_year, origin_month, origin_day, origin_hour)
elif resolution == "second":
c = 0
MAX_SECS = 20
while (c < MAX_SECS):
if float(doc["points"][offset]["min"]) == float(self.attributes_value):
break
offset = offset - 1
if offset < 0: offset = 59
c = c + 1
if (origin_second < c): origin_minute = origin_minute - 1
origin_by_resolution = "%s-%s-%s %s:%s:00" % (origin_year, origin_month, origin_day, origin_hour, origin_minute)
else:
assert False, " ERROR - resolution type \"%s\" is not allowed, review your tests in features..." % (resolution)
sum_value = 0
sum2_value = 0
for i in range(int(self.instances_number)):
sum_value = sum_value + float(self.attributes_value)
sum2_value = sum2_value + (float(self.attributes_value)*float(self.attributes_value))
assert str(doc["_id"]["origin"]) == origin_by_resolution, " ERROR -- in origin field by the %s resolution in %s attribute" % (resolution, str(doc["_id"]["attrName"]))
assert float(doc["points"][offset]["min"]) == float(self.attributes_value), " ERROR -- in minimun value into offset %s in %s attribute" % (str(offset), str(doc["_id"]["attrName"]))
assert float(doc["points"][offset]["max"]) == float(self.attributes_value), " ERROR -- in maximun value into offset %s in %s attribute" % (str(offset), str(doc["_id"]["attrName"]))
assert float(doc["points"][offset]["sum"]) == float(sum_value), " ERROR -- in sum value into offset %s in %s attribute" % (str(offset), str(doc["_id"]["attrName"]))
assert float(doc["points"][offset]["sum2"]) == float(sum2_value), " ERROR -- in sum2 value into offset %s in %s attribute" % (str(offset), str(doc["_id"]["attrName"]))
world.sth.disconnect()
def verify_aggregates_is_not_in_mongo(self, resolution):
"""
verify that aggregates is not stored in mongo:
:param resolution: resolutions type ( month | day | hour | minute | second )
"""
find_dict = {"_id.attrName" : {'$regex':'%s.*' % (self.attributes_name)}, #the regular expression is because in multi attribute the name is with postfix + <_value>. ex: temperature_0
"_id.entityId" : self.entity_id,
"_id.entityType" : self.entity_type,
"_id.resolution" : resolution }
world.mongo.connect("%s_%s" % (STH_DATABASE_PREFIX, self.service))
world.mongo.choice_collection("%s_%s.%s" % (STH_COLLECTION_PREFIX, self.service_path, AGGR))
cursor = world.mongo.find_data(find_dict)
assert cursor.count() == 0, " ERROR - the aggregated has been stored in mongo."
world.mongo.disconnect()
def validate_that_the_aggregated_is_calculated_successfully(self, resolution):
"""
validate that the aggregated is calculated successfully
"""
sum = 0
sum2 = 0
offset = 0
find_dict = {"_id.attrName": {'$regex':'%s.*' % (self.attributes_name)}, #the regular expression is because in multi attribute the name is with postfix + <_value>. ex: temperature_0
"_id.resolution": str(resolution)}
world.sth.connect("%s_%s" % (STH_DATABASE_PREFIX, self.service))
world.sth.choice_collection("%s_%s_%s_%s.%s" % (STH_COLLECTION_PREFIX, self.service_path, self.entity_id, self.entity_type, AGGR))
cursor = world.sth.find_with_retry(find_dict)
assert cursor.count() != 0, " ERROR - the aggregated has not been stored in mongo successfully "
doc= world.sth.get_cursor_value(cursor)[0] # get all dictionaries into a cursor, return a list
offset = int(general_utils.get_date_only_one_value(self.date_time, resolution))
if resolution == "month":
offset=offset-1
elif resolution == "day":
offset=offset-1
elif resolution == "hour":
offset =offset-2
assert float(doc["points"][offset]["min"]) == float(self.attributes_value), \
" ERROR - in aggregated with min %s" % (str(doc["points"][offset]["min"]))
assert float(doc["points"][offset]["max"]) == float(self.attributes_value) + int(self.notifications_number)-1, \
" ERROR - in aggregated with max %s" % (str(doc["points"][offset]["max"]))
for i in range(int(self.notifications_number)):
v = int(self.attributes_value) + i
sum = sum + v
assert float(doc["points"][offset]["sum"]) == float(sum), \
" ERROR - in aggregated with sum %s" % (str(doc["points"][offset]["sum"]))
for i in range(int(self.notifications_number)):
v = int(self.attributes_value) + i
sum2 = sum2 + (v*v)
assert float(doc["points"][offset]["sum2"]) == float(sum2), \
" ERROR - in aggregated with sum2 %s" % (str(doc["points"][offset]["sum2"]))
def drop_database_in_mongo(self, driver):
"""
delete database and collections in mongo
:param driver: mongo instance
"""
driver.connect("%s_%s" % (STH_DATABASE_PREFIX, self.service))
driver.drop_database()
driver.disconnect()
# ------------------------------------------ Validations ----------------------------------------------------------
def verify_response_http_code (self, http_code_expected, response):
"""
validate http code in response
"""
http_utils.assert_status_code(http_utils.status_codes[http_code_expected], response, "ERROR - in http code received: ")
def change_destination_to_pattern (self, destination, new_service_path):
"""
change destination to verify
:param destination: new resource name
:param dataset: new dataset name
"""
self.resource = destination
if destination != EMPTY:
self.entity_id, self.entity_type = self.__split_resource (self.resource)
self.service_path = new_service_path # used in notification request
self.dataset = self.service+"_"+new_service_path # used in ckan validation request
self.table = new_service_path +"_"+destination # used in mysql validation
# ------------------------------------------ ckan validations -----------------------------------------------------
def verify_dataset_search_values_by_column(self):
"""
Verify that the attribute contents (value) are stored in ckan
"""
if self.content == general_utils.XML:
VALUE_TEMP = CONTENT_VALUE
else:
VALUE_TEMP = VALUE
self.temp_dict=self.retry_in_datastore_search_sql_column (self.resource, self.dataset, self.attributes_name, self.attributes_value)
assert self.temp_dict != u'ERROR - Attributes are missing....', u'\nERROR - Attributes %s are missing, value expected: %s \n In %s >>> %s ' %(self.attributes_name, self.attributes_value, self.dataset, self.resource)
for i in range(0,self.attributes_number-1):
temp_attr = str(self.attributes_name+"_"+str(i))
assert str(self.temp_dict[RESULT][RECORDS][0][temp_attr]) == str(self.attributes[i][VALUE_TEMP]),\
"The "+self.attributes[i][NAME]+" value does not match..."
def verify_dataset_search_metadata_values_by_column(self):
"""
Verify that the attribute metadata contents (value) are stored in ckan
"""
if self.metadata_value:
for i in range(0, self.attributes_number-1):
if self.content == general_utils.XML:
assert str(self.temp_dict[RESULT][RECORDS][0][self.attributes_name+"_"+str(i)+"_md"][0][VALUE]) == str(self.attributes[i][METADATA][CONTEXT_METADATA][0][VALUE]),\
"The "+self.attributes[i][NAME]+" metadata value does not match..."
else:
assert str(self.temp_dict[RESULT][RECORDS][0][self.attributes_name+"_"+str(i)+"_md"][0][VALUE]) == str(self.attributes[i][METADATAS][0][VALUE]),\
"The "+self.attributes[i][NAME]+" metadata value does not match..."
def verify_dataset_search_without_data(self, error_msg):
"""
Verify that is not stored in ckan when a field missing (attribute value or metadata)
"""
row=1
resp= world.ckan.datastore_search_last_sql (row, self.resource, self.dataset)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - Ckan sql query")
dict_temp = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
assert len(dict_temp[RESULT][RECORDS]) == 0, "ERROR - " + error_msg
def verify_dataset_search_without_element(self, error_msg):
"""
Verify that is not stored in ckan when a element missing (organization, dataset, resource)
"""
row=1
resp= world.ckan.datastore_search_last_sql (row, self.resource, self.dataset)
assert not (resp), "ERROR - " + error_msg
def verify_dataset_search_values_by_row(self, metadata="true"):
"""
Verify that the attribute contents (value, metadata and type) are stored in ckan in row mode
"""
if self.content == general_utils.XML:
VALUE_TEMP = CONTENT_VALUE
else:
VALUE_TEMP = VALUE
for i in range(0,self.attributes_number):
self.temp_dict=self.retry_in_datastore_search_sql_row (i, self.resource, self.dataset, self.attributes_name)
assert self.temp_dict != u'ERROR - Attributes are missing....', \
u'\nERROR - Attributes %s are missing, value expected: %s \n In %s >>> %s ' %(self.attributes_name, self.attributes_value, self.dataset, self.resource)
assert str(self.temp_dict[ATTR_VALUE]) == str(self.attributes[i][VALUE_TEMP]),\
"The "+self.attributes[i][NAME]+" value does not match..."
assert str(self.temp_dict[ATTR_TYPE]) == str(self.attributes[i][TYPE]),\
"The "+self.attributes[i][NAME]+" type does not match..."
if metadata.lower() == "true":
assert self.verify_dataset_search_metadata_by_row (i), \
"The "+self.attributes[i][NAME]+" metadata value does not match..."
def verify_dataset_search_metadata_by_row (self, position):
"""
Verify that the attribute metadata contents (value) are stored in ckan by row mode
"""
if self.metadata_value:
if self.content == general_utils.XML:
if (self.temp_dict[ATTR_MD][0][VALUE]) != str(self.attributes[position][METADATA][CONTEXT_METADATA][0][VALUE]):
return False
else:
if str(self.temp_dict[ATTR_MD][0][VALUE]) != str(self.attributes[position][METADATAS][0][VALUE]):
return False
return True
# ------------------------------------------ mysql validations ----------------------------------------------------
def close_connection (self):
"""
close mysql connection and delete the database used
"""
world.mysql.set_database(self.service)
world.mysql.disconnect()
def verify_table_search_values_by_column(self):
"""
Verify that the attribute contents (value) are stored in mysql in column mode
"""
if self.content == general_utils.XML:
VALUE_TEMP = CONTENT_VALUE
else:
VALUE_TEMP = VALUE
self.row= self.retry_in_table_search_sql_column (self.table, self.service, self.attributes_value)
assert self.row != u'ERROR - Attributes are missing....', u'ERROR - Attributes are missing....'
for i in range(len (self.attributes)):
if str(self.row [((i+1)*2)-1]) != str(self.attributes[i][VALUE_TEMP]): # verify the value
return "The "+self.attributes[i][NAME]+" value does not match..."
def verify_table_search_metadatas_values_by_column(self):
"""
Verify that the attribute metadata (value) are stored in mysql in column mode
"""
if self.attributes_metadata > 0:
if self.row != None:
for i in range(len (self.attributes)):
self.metadata = general_utils.convert_str_to_dict(self.row [(i+1)*2], general_utils.JSON)
if self.content == general_utils.XML:
if self.metadata[0][VALUE] != self.attributes[i][METADATA][CONTEXT_METADATA][0][VALUE]:
return "The "+self.attributes[i][NAME]+" metatada value does not match..."
else:
if self.metadata[0][VALUE] != self.attributes[i][METADATAS][0][VALUE]:
return "The "+self.attributes[i][NAME]+" metatada value does not match..."
self.metadata = None
def verify_table_search_without_data (self, error_msg):
"""
Verify that is not stored in mysql
"""
row=world.mysql.table_search_one_row(self.service, self.table)
assert row == None or row == False, u'ERROR - ' + error_msg
def verify_table_search_values_by_row(self, metadata = "true"):
"""
Verify that the attribute contents (value, metadata and type) are stored in mysql in row mode
"""
if self.content == general_utils.XML:
VALUE_TEMP = CONTENT_VALUE
else:
VALUE_TEMP = VALUE
for i in range(0,self.attributes_number):
self.temp_dict=self.retry_in_table_search_sql_row (i, self.service, self.table, self.attributes_name)
assert self.temp_dict != u'ERROR - Attributes are missing....', \
u'\nERROR - Attributes %s are missing, value expected: %s \n In %s >>> %s ' %(self.attributes_name, self.attributes_value, self.table, )
assert str(self.temp_dict[6]) == str(self.attributes[i][VALUE_TEMP]),\
"The "+self.attributes[i][NAME]+" value does not match..."
assert str(self.temp_dict[5]) == str(self.attributes[i][TYPE]),\
"The "+self.attributes[i][NAME]+" type does not match..."
if metadata.lower() == "true":
assert self.verify_table_search_metadata_by_row (i), \
"The "+self.attributes[i][NAME]+" metadata value does not match..."
def verify_table_search_metadata_by_row (self, position):
"""
Verify that the attribute metadata contents (value) are stored in ckan by row mode
"""
metadata_remote = general_utils.convert_str_to_dict(self.temp_dict[7], general_utils.JSON)[0][VALUE]
if self.metadata_value:
if self.content == general_utils.XML:
if (metadata_remote) != str(self.attributes[position][METADATA][CONTEXT_METADATA][0][VALUE]):
return False
else:
if str(metadata_remote) != str(self.attributes[position][METADATAS][0][VALUE]):
return False
return True
# ------------------------------------------ hadoop validations ---------------------------------------------------
def verify_file_search_values_and_type(self):
"""
Verify that the attribute contents (type and value) are stored in hadoop un row mode
"""
directory = "%s/%s/%s" %(self.service, self.service_path, self.resource)
file_name = self.resource
for i in range (int(self.attributes_number)):
resp=world.hadoop.retry_in_file_search_data (directory, file_name, self.attributes_name+"_"+str(i), self.attributes_value)
assert resp != u'ERROR - Attributes are missing....', u'ERROR - Attributes are missing.... (%s)' % (self.attributes_name)
def verify_file_search_metadata(self):
"""
Verify that the attribute contents (type and value) are stored in hadoop un row mode
"""
directory = "%s/%s/%s" %(self.service, self.service_path, self.resource)
file_name = self.resource
for i in range (int(self.attributes_number)):
resp=world.hadoop.retry_in_file_search_data (directory, file_name, self.attributes_name+"_"+str(i), self.attributes_value)
if self.content == general_utils.XML:
assert resp[ATTR_MD][0][VALUE] == self.attributes[i][METADATA][CONTEXT_METADATA][0][VALUE],\
"The "+self.attributes[i][NAME]+" metatada value does not match..."
else:
assert resp[ATTR_MD][0][VALUE] == self.attributes[i][METADATAS][0][VALUE], \
"The "+self.attributes[i][NAME]+" metatada value does not match..."
# ------------------------------------------ mongo validations ---------------------------------------------------
|
jmcanterafonseca/fiware-cygnus
|
test/acceptance/tools/cygnus.py
|
Python
|
agpl-3.0
| 54,276
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import event_mass_edit
|
CLVsol/clvsol_odoo_addons
|
clv_event_history/wizard/__init__.py
|
Python
|
agpl-3.0
| 205
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2017 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
Version 3 as published by the Free Software Foundation; either
or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
This example illustrates using callbacks to toggle an LED. Each time the
button switch is pressed the LED state will toggle to the opposite state.
The latch is rearmed within the callback routing.
"""
import time
import signal
import sys
from PyMata.pymata import PyMata
# Digital pins
GREEN_LED = 6
PUSH_BUTTON = 12
# Switch states
ON = 1
OFF = 0
# Default state of the LED
led_state = OFF
def get_led_state():
global led_state
return led_state
def set_led_state(state):
global led_state
led_state = state
# Callback function
# Set the LED to current state of the pushbutton switch
def cb_push_button(data):
print(data)
if get_led_state() == OFF:
board.digital_write(GREEN_LED, ON)
set_led_state(ON)
else:
board.digital_write(GREEN_LED, OFF)
set_led_state(OFF)
# Re-arm the latch to fire on the next transition to high
board.set_digital_latch(PUSH_BUTTON, board.DIGITAL_LATCH_HIGH, cb_push_button)
def signal_handler(sig, frame):
print('You pressed Ctrl+C')
if board is not None:
board.reset()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Create a PyMata instance
board = PyMata("/dev/ttyACM0", verbose=True)
# Set pin modes
# Set the pin to digital output to light the green LED
board.set_pin_mode(GREEN_LED, board.OUTPUT, board.DIGITAL)
# Set the pin to digital input to receive button presses
board.set_pin_mode(PUSH_BUTTON, board.INPUT, board.DIGITAL)
# Arm the digital latch to detect when the button is pressed
board.set_digital_latch(PUSH_BUTTON, board.DIGITAL_LATCH_HIGH, cb_push_button)
# A forever loop until user presses Ctrl+C
while 1:
pass
|
MrYsLab/PyMata
|
examples/digital_analog_io/callback_buttonLed_toggle.py
|
Python
|
agpl-3.0
| 2,453
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestScale(TestCase):
def testRegression(self):
inputSize = 1024
input = range(inputSize)
factor = 0.5
expected = [factor * n for n in input]
output = Scale(factor=factor, clipping=False)(input)
self.assertEqualVector(output, expected)
def testZero(self):
inputSize = 1024
input = [0] * inputSize
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testEmpty(self):
input = []
expected = input[:]
output = Scale()(input)
self.assertEqualVector(output, input)
def testClipping(self):
inputSize = 1024
maxAbsValue= 10
factor = 1
input = [n + maxAbsValue for n in range(inputSize)]
expected = [maxAbsValue] * inputSize
output = Scale(factor=factor, clipping=True, maxAbsValue=maxAbsValue)(input)
self.assertEqualVector(output, expected)
def testInvalidParam(self):
self.assertConfigureFails(Scale(), { 'maxAbsValue': -1 })
suite = allTests(TestScale)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
arseneyr/essentia
|
test/src/unittest/standard/test_scale.py
|
Python
|
agpl-3.0
| 2,054
|
"""
Each store has slightly different semantics wrt draft v published. XML doesn't officially recognize draft
but does hold it in a subdir. Old mongo has a virtual but not physical draft for every unit in published state.
Split mongo has a physical for every unit in every state.
Given that, here's a table of semantics and behaviors where - means no record and letters indicate values.
For xml, (-, x) means the item is published and can be edited. For split, it means the item's
been deleted from draft and will be deleted from published the next time it gets published. old mongo
can't represent that virtual state (2nd row in table)
In the table body, the tuples represent virtual modulestore result. The row headers represent the pre-import
modulestore state.
Modulestore virtual | XML physical (draft, published)
(draft, published) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
----------------------+--------------------------------------------
(-, -) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
(-, a) | (-, a) | (x, a) | (x, x) | (x, y) | (-, x) : deleted from draft before import
(a, -) | (a, -) | (x, -) | (x, x) | (x, y) | (a, x)
(a, a) | (a, a) | (x, a) | (x, x) | (x, y) | (a, x)
(a, b) | (a, b) | (x, b) | (x, x) | (x, y) | (a, x)
"""
import logging
import os
import mimetypes
from path import path
import json
import re
from lxml import etree
from .xml import XMLModuleStore, ImportSystem, ParentTracker
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor
from opaque_keys.edx.keys import UsageKey
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
from xmodule.tabs import CourseTabList
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.mongo.base import MongoRevisionKey
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
log = logging.getLogger(__name__)
def import_static_content(
course_data_path, static_content_store,
target_course_id, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if re.match(ASSET_IGNORE_REGEX, filename):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
asset_key = StaticContent.compute_location(target_course_id, fullname_with_subpath)
policy_ele = policy.get(asset_key.path, {})
displayname = policy_ele.get('displayname', filename)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception(u'Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = asset_key
return remap_dict
def import_from_xml(
store, user_id, data_dir, course_dirs=None,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True, static_content_store=None,
target_course_id=None, verbose=False,
do_import_static=True, create_course_if_not_present=False,
raise_on_failure=False):
"""
Import xml-based courses from data_dir into modulestore.
Returns:
list of new course objects
Args:
store: a modulestore implementing ModuleStoreWriteBase in which to store the imported courses.
data_dir: the root directory from which to find the xml courses.
course_dirs: If specified, the list of data_dir subdirectories to load. Otherwise, load
all course dirs
target_course_id: is the CourseKey that all modules should be remapped to
after import off disk. NOTE: this only makes sense if importing only
one course. If there are more than one course loaded from data_dir/course_dirs & you
supply this id, this method will raise an AssertException.
static_content_store: the static asset store
do_import_static: if True, then import the course's static files into static_content_store
This can be employed for courses which have substantial
unchanging static content, which is too inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
create_course_if_not_present: If True, then a new course is created if it doesn't already exist.
Otherwise, it throws an InvalidLocationError if the course does not exist.
default_class, load_error_modules: are arguments for constructing the XMLModuleStore (see its doc)
"""
xml_module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
)
# If we're going to remap the course_id, then we can only do that with
# a single course
if target_course_id:
assert(len(xml_module_store.modules) == 1)
new_courses = []
for course_key in xml_module_store.modules.keys():
if target_course_id is not None:
dest_course_id = target_course_id
else:
dest_course_id = store.make_course_key(course_key.org, course_key.course, course_key.run)
runtime = None
# Creates a new course if it doesn't already exist
if create_course_if_not_present and not store.has_course(dest_course_id, ignore_case=True):
try:
new_course = store.create_course(dest_course_id.org, dest_course_id.course, dest_course_id.run, user_id)
runtime = new_course.runtime
except DuplicateCourseError:
# course w/ same org and course exists
log.debug(
"Skipping import of course with id, %s,"
"since it collides with an existing one", dest_course_id
)
continue
with store.bulk_operations(dest_course_id):
source_course = xml_module_store.get_course(course_key)
# STEP 1: find and import course module
course, course_data_path = _import_course_module(
store, runtime, user_id,
data_dir, course_key, dest_course_id, source_course,
do_import_static, verbose
)
new_courses.append(course)
# STEP 2: import static content
_import_static_content_wrapper(
static_content_store, do_import_static, course_data_path, dest_course_id, verbose
)
# Import asset metadata stored in XML.
_import_course_asset_metadata(store, course_data_path, dest_course_id, raise_on_failure)
# STEP 3: import PUBLISHED items
# now loop through all the modules depth first and then orphans
with store.branch_setting(ModuleStoreEnum.Branch.published_only, dest_course_id):
all_locs = set(xml_module_store.modules[course_key].keys())
all_locs.remove(source_course.location)
def depth_first(subtree):
"""
Import top down just so import code can make assumptions about parents always being available
"""
if subtree.has_children:
for child in subtree.get_children():
try:
all_locs.remove(child.location)
except KeyError:
# tolerate same child occurring under 2 parents such as in
# ContentStoreTest.test_image_import
pass
if verbose:
log.debug('importing module location {loc}'.format(loc=child.location))
_import_module_and_update_references(
child,
store,
user_id,
course_key,
dest_course_id,
do_import_static=do_import_static,
runtime=course.runtime
)
depth_first(child)
depth_first(source_course)
for leftover in all_locs:
if verbose:
log.debug('importing module location {loc}'.format(loc=leftover))
_import_module_and_update_references(
xml_module_store.get_item(leftover), store,
user_id,
course_key,
dest_course_id,
do_import_static=do_import_static,
runtime=course.runtime
)
# STEP 4: import any DRAFT items
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_course_id):
_import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
course_key,
dest_course_id,
course.runtime
)
return new_courses
def _import_course_asset_metadata(store, data_dir, course_id, raise_on_failure):
"""
Read in assets XML file, parse it, and add all asset metadata to the modulestore.
"""
asset_dir = path(data_dir) / AssetMetadata.EXPORTED_ASSET_DIR
assets_filename = AssetMetadata.EXPORTED_ASSET_FILENAME
asset_xml_file = asset_dir / assets_filename
def make_asset_id(course_id, asset_xml):
"""
Construct an asset ID out of a complete asset XML section.
"""
asset_type = None
asset_name = None
for child in asset_xml.iterchildren():
if child.tag == AssetMetadata.ASSET_TYPE_ATTR:
asset_type = child.text
elif child.tag == AssetMetadata.ASSET_BASENAME_ATTR:
asset_name = child.text
return course_id.make_asset_key(asset_type, asset_name)
all_assets = []
try:
xml_data = etree.parse(asset_xml_file).getroot()
assert(xml_data.tag == AssetMetadata.ALL_ASSETS_XML_TAG)
for asset in xml_data.iterchildren():
if asset.tag == AssetMetadata.ASSET_XML_TAG:
# Construct the asset key.
asset_key = make_asset_id(course_id, asset)
asset_md = AssetMetadata(asset_key)
asset_md.from_xml(asset)
all_assets.append(asset_md)
except IOError:
logging.info('No {} file is present with asset metadata.'.format(assets_filename))
return
except Exception: # pylint: disable=W0703
logging.exception('Error while parsing asset xml.')
if raise_on_failure:
raise
else:
return
# Now add all asset metadata to the modulestore.
if len(all_assets) > 0:
store.save_asset_metadata_list(all_assets, all_assets[0].edited_by, import_only=True)
def _import_course_module(
store, runtime, user_id, data_dir, course_key, dest_course_id, source_course, do_import_static,
verbose,
):
"""
Import a course module.
"""
if verbose:
log.debug("Scanning {0} for course module...".format(course_key))
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
course_data_path = path(data_dir) / source_course.data_dir
log.debug(u'======> IMPORTING course {course_key}'.format(
course_key=course_key,
))
if not do_import_static:
# for old-style xblock where this was actually linked to kvs
source_course.static_asset_path = source_course.data_dir
source_course.save()
log.debug('course static_asset_path={path}'.format(
path=source_course.static_asset_path
))
log.debug('course data_dir={0}'.format(source_course.data_dir))
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_course_id):
course = _import_module_and_update_references(
source_course, store, user_id,
course_key,
dest_course_id,
do_import_static=do_import_static,
runtime=runtime,
)
for entry in course.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
asset_key = StaticContent.get_location_from_path(chapter['url'])
chapter['url'] = StaticContent.get_static_path_from_location(asset_key)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if course_key != course.id:
original_unique_wiki_slug = u'{0}.{1}.{2}'.format(
course_key.org,
course_key.course,
course_key.run
)
if course.wiki_slug == original_unique_wiki_slug or course.wiki_slug == course_key.course:
course.wiki_slug = u'{0}.{1}.{2}'.format(
course.id.org,
course.id.course,
course.id.run,
)
# cdodge: more hacks (what else). Seems like we have a
# problem when importing a course (like 6.002) which
# does not have any tabs defined in the policy file.
# The import goes fine and then displays fine in LMS,
# but if someone tries to add a new tab in the CMS, then
# the LMS barfs because it expects that -- if there are
# *any* tabs -- then there at least needs to be
# some predefined ones
if course.tabs is None or len(course.tabs) == 0:
CourseTabList.initialize_default(course)
store.update_item(course, user_id)
return course, course_data_path
def _import_static_content_wrapper(static_content_store, do_import_static, course_data_path, dest_course_id, verbose):
# then import all the static content
if static_content_store is not None and do_import_static:
# first pass to find everything in /static/
import_static_content(
course_data_path, static_content_store,
dest_course_id, subpath='static', verbose=verbose
)
elif verbose and not do_import_static:
log.debug(
"Skipping import of static content, "
"since do_import_static={0}".format(do_import_static)
)
# no matter what do_import_static is, import "static_import" directory
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(course_data_path / simport):
import_static_content(
course_data_path, static_content_store,
dest_course_id, subpath=simport, verbose=verbose
)
def _import_module_and_update_references(
module, store, user_id,
source_course_id, dest_course_id,
do_import_static=True, runtime=None):
logging.debug(u'processing import of module {}...'.format(module.location.to_deprecated_string()))
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_id,
dest_course_id,
module.data
)
# Move the module to a new course
def _convert_reference_fields_to_new_namespace(reference):
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
assert isinstance(reference, UsageKey)
if source_course_id == reference.course_key:
return reference.map_into_course(dest_course_id)
else:
return reference
fields = {}
for field_name, field in module.fields.iteritems():
if field.is_set_on(module):
if field.scope == Scope.parent:
continue
if isinstance(field, Reference):
fields[field_name] = _convert_reference_fields_to_new_namespace(field.read_from(module))
elif isinstance(field, ReferenceList):
references = field.read_from(module)
fields[field_name] = [_convert_reference_fields_to_new_namespace(reference) for reference in references]
elif isinstance(field, ReferenceValueDict):
reference_dict = field.read_from(module)
fields[field_name] = {
key: _convert_reference_fields_to_new_namespace(reference)
for key, reference
in reference_dict.iteritems()
}
elif field_name == 'xml_attributes':
value = field.read_from(module)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_url' in value:
del value['parent_url']
if 'parent_sequential_url' in value:
del value['parent_sequential_url']
if 'index_in_children_list' in value:
del value['index_in_children_list']
fields[field_name] = value
else:
fields[field_name] = field.read_from(module)
return store.import_xblock(user_id, dest_course_id, module.location.category, module.location.block_id, fields, runtime)
def _import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
source_course_id,
target_course_id,
mongo_runtime
):
'''
This will import all the content inside of the 'drafts' folder, if it exists
NOTE: This is not a full course import, basically in our current
application only verticals (and downwards) can be in draft.
Therefore, we need to use slightly different call points into
the import process_xml as we can't simply call XMLModuleStore() constructor
(like we do for importing public content)
'''
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=source_course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
parent_tracker=ParentTracker(),
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
)
def _import_module(module):
# IMPORTANT: Be sure to update the module location in the NEW namespace
module_location = module.location.map_into_course(target_course_id)
# Update the module's location to DRAFT revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module_location.replace(revision=MongoRevisionKey.draft))
parent_url = get_parent_url(module)
index = index_in_children_list(module)
# make sure our parent has us in its list of children
# this is to make sure private only modules show up
# in the list of children since they would have been
# filtered out from the non-draft store export.
if parent_url is not None and index is not None:
course_key = descriptor.location.course_key
parent_location = course_key.make_usage_key_from_deprecated_string(parent_url)
# IMPORTANT: Be sure to update the parent in the NEW namespace
parent_location = parent_location.map_into_course(target_course_id)
parent = store.get_item(parent_location, depth=0)
non_draft_location = module.location.map_into_course(target_course_id)
if not any(child.block_id == module.location.block_id for child in parent.children):
parent.children.insert(index, non_draft_location)
store.update_item(parent, user_id)
_import_module_and_update_references(
module, store, user_id,
source_course_id,
target_course_id,
runtime=mongo_runtime,
)
for child in module.get_children():
_import_module(child)
# Now walk the /vertical directory.
# Each file in the directory will be a draft copy of the vertical.
# First it is necessary to order the draft items by their desired index in the child list,
# since the order in which os.walk() returns the files is not guaranteed.
drafts = []
for dirname, _dirnames, filenames in os.walk(draft_dir):
for filename in filenames:
module_path = os.path.join(dirname, filename)
with open(module_path, 'r') as f:
try:
# note, on local dev it seems like OSX will put
# some extra files in the directory with "quarantine"
# information. These files are binary files and will
# throw exceptions when we try to parse the file
# as an XML string. Let's make sure we're
# dealing with a string before ingesting
data = f.read()
try:
xml = data.decode('utf-8')
except UnicodeDecodeError, err:
# seems like on OSX localdev, the OS is making
# quarantine files in the unzip directory
# when importing courses so if we blindly try to
# enumerate through the directory, we'll try
# to process a bunch of binary quarantine files
# (which are prefixed with a '._' character which
# will dump a bunch of exceptions to the output,
# although they are harmless.
#
# Reading online docs there doesn't seem to be
# a good means to detect a 'hidden' file that works
# well across all OS environments. So for now, I'm using
# OSX's utilization of a leading '.' in the filename
# to indicate a system hidden file.
#
# Better yet would be a way to figure out if this is
# a binary file, but I haven't found a good way
# to do this yet.
if filename.startswith('._'):
continue
# Not a 'hidden file', then re-raise exception
raise err
# process_xml call below recursively processes all descendants. If
# we call this on all verticals in a course with verticals nested below
# the unit level, we try to import the same content twice, causing naming conflicts.
# Therefore only process verticals at the unit level, assuming that any other
# verticals must be descendants.
if 'index_in_children_list' in xml:
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
filename, __ = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=filename)
index = index_in_children_list(descriptor)
parent_url = get_parent_url(descriptor, xml)
draft_url = descriptor.location.to_deprecated_string()
draft = draft_node_constructor(
module=descriptor, url=draft_url, parent_url=parent_url, index=index
)
drafts.append(draft)
except Exception: # pylint: disable=broad-except
logging.exception('Error while parsing course xml.')
# sort drafts by `index_in_children_list` attribute
drafts.sort(key=lambda x: x.index)
for draft in get_draft_subtree_roots(drafts):
try:
_import_module(draft.module)
except Exception: # pylint: disable=broad-except
logging.exception('while importing draft descriptor %s', draft.module)
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
'''
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
'''
allowed = allowed_metadata_by_category(module.location.category)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=module.location.to_deprecated_string(), keys=illegal_keys
)
)
return err_cnt
def get_parent_url(module, xml=None):
"""
Get the parent_url, if any, from module using xml as an alternative source. If it finds it in
xml but not on module, it modifies module so that the next call to this w/o the xml will get the parent url
"""
if hasattr(module, 'xml_attributes'):
return module.xml_attributes.get(
# handle deprecated old attr
'parent_url', module.xml_attributes.get('parent_sequential_url')
)
if xml is not None:
create_xml_attributes(module, xml)
return get_parent_url(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def index_in_children_list(module, xml=None):
"""
Get the index_in_children_list, if any, from module using xml
as an alternative source. If it finds it in xml but not on module,
it modifies module so that the next call to this w/o the xml
will get the field.
"""
if hasattr(module, 'xml_attributes'):
val = module.xml_attributes.get('index_in_children_list')
if val is not None:
return int(val)
return None
if xml is not None:
create_xml_attributes(module, xml)
return index_in_children_list(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def create_xml_attributes(module, xml):
"""
Make up for modules which don't define xml_attributes by creating them here and populating
"""
xml_attrs = {}
for attr, val in xml.attrib.iteritems():
if attr not in module.fields:
# translate obsolete attr
if attr == 'parent_sequential_url':
attr = 'parent_url'
xml_attrs[attr] = val
# now cache it on module where it's expected
setattr(module, 'xml_attributes', xml_attrs)
def validate_no_non_editable_metadata(module_store, course_id, category):
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.category == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy(
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].itervalues():
if module.location.category == parent_category:
parents.append(module)
for parent in parents:
for child_loc in parent.children:
if child_loc.category != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.category
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None):
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir):
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].itervalues():
if module.location.category == 'course':
if not module._field_data.has(module, 'rerandomize'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint(
data_dir, course_dirs,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules
)
# check all data source path information
for course_dir in course_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._course_errors.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print(msg)
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
if not module_store.has_item(course_id.make_usage_key('about', 'video')):
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print("\n")
print("------------------------------------------")
print("VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt, warn=warn_cnt)
)
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print("This course can be imported successfully.")
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
module.get_explicitly_set_fields_by_scope(Scope.content).keys() +
module.get_explicitly_set_fields_by_scope(Scope.settings).keys()
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to first
# explicitly set each field to its current value before triggering the save.
if len(rekey_fields) > 0:
for rekey_field_name in rekey_fields:
setattr(module, rekey_field_name, getattr(module, rekey_field_name))
module.save()
|
UQ-UQx/edx-platform_lti
|
common/lib/xmodule/xmodule/modulestore/xml_importer.py
|
Python
|
agpl-3.0
| 40,861
|
# coding: utf-8
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
###############################################################################
# Coded by: Sergio Ernesto Tostado Sánchez (sergio@vauxoo.com)
###############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import exceptions
from openerp.tools import mute_logger
from psycopg2 import IntegrityError
from .test_common import TestCommon
class TestUnicity(TestCommon):
""" This test will prove the next cases to procure the module uniqueness:
- Test 1: Can't be created two Serial Numbers with the same name
"""
def test_1_1_1product_1serialnumber_2p_in(self):
""" Test 1.1. Creating 2 pickings with 1 product for the same serial
number, in the receipts scope, with the next form:
- Picking 1 IN
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
- Picking 2 IN
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
Warehouse: Your Company
"""
lot_id = self.env.ref('product_unique_serial.serial_number_demo_1')
# Creating move line for picking
product = self.env.ref('product_unique_serial.product_demo_1')
stock_move_datas = [{
'product_id': product.id,
'qty': 1.0
}]
# Creating the pickings
picking_data_1 = {
'name': 'Test Picking IN 1',
}
picking_data_2 = {
'name': 'Test Picking IN 2',
}
picking_1 = self.create_stock_picking(
stock_move_datas, picking_data_1,
self.env.ref('stock.picking_type_in'))
picking_2 = self.create_stock_picking(
stock_move_datas, picking_data_2,
self.env.ref('stock.picking_type_in'))
# Executing the wizard for pickings transfering
self.transfer_picking(picking_1, lot_id)
with self.assertRaises(exceptions.ValidationError) as err:
self.transfer_picking(picking_2, [lot_id])
msg = self.msg_increase % (product.name, 1.0, lot_id.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_1_2_1product_1serialnumber_2p_track_incoming(self):
""" Test 1.2. (track incoming) Creating 2 pickings with 1 product for
the same serial number, in the receipts scope, with the next form:
- Picking 1 IN
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
- Picking 2 IN
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
Warehouse: Your Company
Comment: The product in this case should track_incoming
instead of track all
"""
lot_id = self.env.ref('product_unique_serial.serial_number_demo_1')
# Creating move line for picking
product = self.env.ref('product_unique_serial.product_demo_1')
# track_incoming and lot_unique_ok to test unicity
self.assertTrue(product.write({'track_all': False,
'track_incoming': True,
'lot_unique_ok': True}),
"Cannot write product %s" % (product.name))
stock_move_datas = [{
'product_id': product.id,
'qty': 1.0
}]
# Creating the pickings
picking_data_1 = {
'name': 'Test Picking IN 1',
}
picking_data_2 = {
'name': 'Test Picking IN 2',
}
picking_1 = self.create_stock_picking(
stock_move_datas, picking_data_1,
self.env.ref('stock.picking_type_in'))
picking_2 = self.create_stock_picking(
stock_move_datas, picking_data_2,
self.env.ref('stock.picking_type_in'))
# Executing the wizard for pickings transfering
self.transfer_picking(picking_1, lot_id)
with self.assertRaises(exceptions.ValidationError) as err:
self.transfer_picking(picking_2, [lot_id])
msg = self.msg_increase % (product.name, 1.0, lot_id.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_2_1_1product_1serialnumber_2p_out(self):
""" Test 2.1. Creating 2 pickings with 1 product for the same serial
number, in the delivery orders scope, with the next form:
- Picking 1 OUT
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
- Picking 2 OUT
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
NOTE: To can operate this case, we need an IN PICKING
Warehouse: Your Company
"""
lot_id = self.env.ref('product_unique_serial.serial_number_demo_1')
# Creating move line for picking
product = self.env.ref('product_unique_serial.product_demo_1')
stock_move_datas = [{
'product_id': product.id,
'qty': 1.0
}]
# Creating the pickings
picking_data_in = {
'name': 'Test Picking IN 1',
}
picking_data_out_1 = {
'name': 'Test Picking OUT 1',
}
picking_data_out_2 = {
'name': 'Test Picking OUT 2',
}
# IN PROCESS
picking_in = self.create_stock_picking(
stock_move_datas, picking_data_in,
self.env.ref('stock.picking_type_in'))
self.transfer_picking(picking_in, lot_id)
# OUT PROCESS
picking_out_1 = self.create_stock_picking(
stock_move_datas, picking_data_out_1,
self.env.ref('stock.picking_type_out'))
picking_out_2 = self.create_stock_picking(
stock_move_datas, picking_data_out_2,
self.env.ref('stock.picking_type_out'))
# Executing the wizard for pickings transfering
self.transfer_picking(picking_out_1, lot_id)
with self.assertRaises(exceptions.ValidationError) as err:
self.transfer_picking(picking_out_2, lot_id)
msg = self.msg_increase % (product.name, 1.0, lot_id.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_2_2_1product_1serialnumber_2p_track_outgoing(self):
""" Test 2.2. (track outgoing) Creating 2 pickings with 1 product for
the same serial number, in the delivery orders scope, with the next
form:
- Picking 1 OUT
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
- Picking 2 OUT
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
NOTE: To can operate this case, we need an IN PICKING
Warehouse: Your Company
Comment: The product in this case should be check track_outgoing
instead of track all
"""
lot_id = self.env.ref('product_unique_serial.serial_number_demo_1')
# Creating move line for picking
product = self.env.ref('product_unique_serial.product_demo_1')
# track_outgoing and lot_unique_ok to test unicity
self.assertTrue(product.write({'track_all': False,
'track_outgoing': True,
'lot_unique_ok': True}),
"Cannot write product %s" % (product.name))
stock_move_datas = [{
'product_id': product.id,
'qty': 1.0
}]
# Creating the pickings
picking_data_in = {
'name': 'Test Picking IN 1',
}
picking_data_out_1 = {
'name': 'Test Picking OUT 1',
}
picking_data_out_2 = {
'name': 'Test Picking OUT 2',
}
# IN PROCESS
picking_in = self.create_stock_picking(
stock_move_datas, picking_data_in,
self.env.ref('stock.picking_type_in'))
self.transfer_picking(picking_in, lot_id)
# OUT PROCESS
picking_out_1 = self.create_stock_picking(
stock_move_datas, picking_data_out_1,
self.env.ref('stock.picking_type_out'))
picking_out_2 = self.create_stock_picking(
stock_move_datas, picking_data_out_2,
self.env.ref('stock.picking_type_out'))
# Executing the wizard for pickings transfering
self.transfer_picking(picking_out_1, lot_id)
with self.assertRaises(exceptions.ValidationError) as err:
self.transfer_picking(picking_out_2, lot_id)
msg = self.msg_increase % (product.name, 1.0, lot_id.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_3_1product_qtyno1_1serialnumber_1p_in(self):
""" Test 3. Creating a picking with 1 product for the same serial
number, in the delivery orders scope, with the next form:
- Picking 1 IN
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || >1 || 001 ||
=============================================
Warehouse: Your Company
"""
# Creating move line for picking
product = self.env.ref('product_unique_serial.product_demo_1')
lot_id = self.env.ref('product_unique_serial.serial_number_demo_2')
stock_move_datas = [{
'product_id': product.id,
'qty': 2.0
}]
# Creating the pickings
picking_data_1 = {
'name': 'Test Picking IN 1',
}
picking_1 = self.create_stock_picking(
stock_move_datas, picking_data_1,
self.env.ref('stock.picking_type_in'))
# Executing the wizard for pickings transfering
with self.assertRaises(exceptions.ValidationError) as err:
self.transfer_picking(picking_1, [lot_id])
msg = self.msg_greater % (product.name, 2.0, lot_id.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_4_1product_qty3_3serialnumber_1p_in(self):
""" Test 4. Creating a picking with 1 product for three serial numbers,
in the receipts scope, with the next form:
- Picking 1
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
|| A || 1 || 002 ||
=============================================
|| A || 1 || 003 ||
=============================================
Warehouse: Your Company
"""
# Creating move line for picking
product = self.env.ref('product_unique_serial.product_demo_1')
stock_move_datas = [
{'product_id': product.id, 'qty': 1.0},
{'product_id': product.id, 'qty': 1.0},
{'product_id': product.id, 'qty': 1.0}
]
# Creating the picking
picking_data_in = {
'name': 'Test Picking IN 1',
}
picking_in = self.create_stock_picking(
stock_move_datas, picking_data_in,
self.env.ref('stock.picking_type_in'))
# Executing the wizard for pickings transfering: this should be correct
# 'cause is the ideal case
self.transfer_picking(
picking_in,
[self.env.ref('product_unique_serial.serial_number_demo_1'),
self.env.ref('product_unique_serial.serial_number_demo_2'),
self.env.ref('product_unique_serial.serial_number_demo_3')]
)
def test_5_1product_1serialnumber_2p_internal(self):
""" Test 5. Creating 2 pickings with 1 product for the same serial
number, in the internal scope, with the next form:
- Picking 1 INTERNAL
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
- Picking 2 INTERNAL
=============================================
|| Product || Quantity || Serial Number ||
=============================================
|| A || 1 || 001 ||
=============================================
NOTE: To can operate this case, we need an IN PICKING
Warehouse: Your Company
"""
product = self.env.ref('product_unique_serial.product_demo_2')
stock_move_in_datas = [
{'product_id': product.id,
'qty': 1.0,
'source_loc': self.env.ref('stock.stock_location_suppliers').id,
'destination_loc': self.env.ref(
'stock.stock_location_components').id}]
stock_move_internal_datas = [
{'product_id': product.id,
'qty': 1.0,
'source_loc': self.env.ref('stock.stock_location_components').id,
'destination_loc': self.env.ref('stock.stock_location_14').id}]
picking_data_in = {
'name': 'Test Picking IN 1',
}
picking_data_internal_1 = {
'name': 'Test Picking INTERNAL 1',
}
picking_data_internal_2 = {
'name': 'Test Picking INTERNAL 2',
}
# IN PROCESS
picking_in = self.create_stock_picking(
stock_move_in_datas, picking_data_in,
self.env.ref('stock.picking_type_in'))
self.transfer_picking(
picking_in,
[self.env.ref('product_unique_serial.serial_number_demo_1')])
# INTERNAL PROCESS
picking_internal_1 = self.create_stock_picking(
stock_move_internal_datas, picking_data_internal_1,
self.env.ref('stock.picking_type_internal'))
picking_internal_2 = self.create_stock_picking(
stock_move_internal_datas, picking_data_internal_2,
self.env.ref('stock.picking_type_internal'))
self.transfer_picking(
picking_internal_1,
[self.env.ref('product_unique_serial.serial_number_demo_1')])
with self.assertRaisesRegexp(
exceptions.ValidationError,
"Product 'Nokia 2630' has active 'check no negative'"):
self.transfer_picking(
picking_internal_2,
[self.env.ref('product_unique_serial.serial_number_demo_1')])
@mute_logger('openerp.sql_db')
def test_6_1serialnumber_1product_2records(self):
""" Test 6. Creating 2 identical serial numbers """
product_id = self.env.ref('product_unique_serial.product_demo_1')
lot_data = {
'name': '86137801852520',
'ref': '86137801852520',
'product_id': product_id.id
}
self.stock_production_lot_obj.create(lot_data)
with self.assertRaisesRegexp(
IntegrityError, r'"stock_production_lot_name_ref_uniq"'):
self.stock_production_lot_obj.create(lot_data)
def test_7_1_1product_1serialnumber_track_production_in(self):
""" Test 7. Creating moves as production order with 1 product as
material, 2 moves with 1 qty and 1 same serial number for both """
product = self.env.ref('product_unique_serial.product_demo_1')
# track_production and lot_unique_ok to test unicity
self.assertTrue(product.write({'track_all': False,
# mrp module should be installed
# to use track_production field
'lot_unique_ok': True}),
"Cannot write product %s" % (product.name))
uom = self.env.ref('product.product_uom_unit')
location_stock = self.env.ref('stock.stock_location_stock')
location_production = self.env.ref('stock.location_production')
# create a lot to product
lot_vals = {'name': 'AB-092134', 'product_id': product.id}
lot_move = self.stock_production_lot_obj.create(lot_vals)
# create stock move values
stock_move_vals = {
'name': '[%s] %s' % (product.default_code, product.name),
'product_id': product.id,
'product_uom_qty': 1.0,
'product_uom': uom.id,
'product_uos_qty': 1.0,
'product_uos': uom.id,
'location_id': location_stock.id,
'location_dest_id': location_production.id,
'restrict_lot_id': lot_move.id
}
# create first move
stock_move_1 = self.stock_move_obj.create(stock_move_vals)
stock_move_1.action_confirm()
stock_move_1.action_done()
# create a second move
stock_move_2 = self.stock_move_obj.create(stock_move_vals)
stock_move_2.action_confirm()
# Error raised expected with message expected.
with self.assertRaises(exceptions.ValidationError) as err:
stock_move_2.action_done()
msg = self.msg_increase % (product.name, 1.0, lot_move.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_7_2_1product_1serialnumber_track_production_out(self):
""" Test 7.2. Creating moves as finished product, 1 product, 2 moves
with 1 qty and 1 same serial number for both """
product = self.env.ref('product_unique_serial.product_demo_2')
# track_incoming, track_prodcution and lot_unique_ok to test unicity
self.assertTrue(product.write({'track_all': False,
'track_incoming': True,
# mrp module should be installed
# to use track_production field
'lot_unique_ok': True}),
"Cannot write product %s" % (product.name))
uom = self.env.ref('product.product_uom_unit')
location_stock = self.env.ref('stock.stock_location_stock')
location_production = self.env.ref('stock.location_production')
# create a lot to product
lot_vals = {'name': 'bC3i391m9p9200', 'product_id': product.id}
lot_move = self.stock_production_lot_obj.create(lot_vals)
# create stock move values
stock_move_vals = {
'name': '[%s] %s' % (product.default_code, product.name),
'product_id': product.id,
'product_uom_qty': 1.0,
'product_uom': uom.id,
'product_uos_qty': 1.0,
'product_uos': uom.id,
'location_id': location_production.id,
'location_dest_id': location_stock.id,
'restrict_lot_id': lot_move.id
}
# create first move
stock_move_1 = self.stock_move_obj.create(stock_move_vals)
stock_move_1.action_confirm()
stock_move_1.action_done()
# create a second move
stock_move_2 = self.stock_move_obj.create(stock_move_vals)
stock_move_2.action_confirm()
# Error raised expected with message expected.
with self.assertRaises(exceptions.ValidationError) as err:
stock_move_2.action_done()
msg = self.msg_increase % (product.name, 1.0, lot_move.name, self.note)
self.assertEquals(err.exception.value, msg)
def test_8_inventory_adjustment(self):
"""Test 8. It tries to adjust inventory for a product that has \
selected 'unique piece' with as much new 1"""
lot_id = self.env.ref('product_unique_serial.serial_number_demo_4')
stock_inv = self.stock_inventory_obj.create({
'name': 'Adjust Test',
'location_id': self.stock_loc.id,
'filter': 'product',
'product_id': self.prod_d1.id})
stock_inv.prepare_inventory()
self.stock_inventory_line_obj.create({
'product_id': self.prod_d1.id,
'location_id': self.stock_loc.id,
'prod_lot_id': lot_id.id,
'product_qty': 5,
'inventory_id': stock_inv.id
})
with self.assertRaises(exceptions.ValidationError) as err:
stock_inv.action_done()
msg = self.msg_greater % (
self.prod_d1.name, 5.0, lot_id.name, self.note)
self.assertEquals(err.exception.value, msg)
|
mohamedhagag/community-addons
|
product_unique_serial/tests/test_for_unicity.py
|
Python
|
agpl-3.0
| 22,996
|
from . import test_stock_location_empty
|
OCA/stock-logistics-warehouse
|
stock_location_empty/tests/__init__.py
|
Python
|
agpl-3.0
| 40
|
from .item_status import * # noqa
|
vyos-legacy/vyconfd
|
vyconf/utils/__init__.py
|
Python
|
lgpl-2.1
| 35
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import re
import os
import sys
import shutil
import tempfile
import argparse
from llnl.util.filesystem import working_dir, mkdirp
import spack
from spack.util.executable import which
description = "runs source code style checks on Spack. requires flake8"
section = "developer"
level = "long"
def is_package(f):
"""Whether flake8 should consider a file as a core file or a package.
We run flake8 with different exceptions for the core and for
packages, since we allow `from spack import *` and poking globals
into packages.
"""
return f.startswith('var/spack/repos/') or 'docs/tutorial/examples' in f
#: List of directories to exclude from checks.
exclude_directories = [spack.external_path]
#: This is a dict that maps:
#: filename pattern ->
#: flake8 exemption code ->
#: list of patterns, for which matching lines should have codes applied.
#:
#: For each file, if the filename pattern matches, we'll add per-line
#: exemptions if any patterns in the sub-dict match.
pattern_exemptions = {
# exemptions applied only to package.py files.
r'package.py$': {
# Allow 'from spack import *' in packages, but no other wildcards
'F403': [
r'^from spack import \*$'
],
# Exempt lines with urls and descriptions from overlong line errors.
'E501': [
r'^\s*homepage\s*=',
r'^\s*url\s*=',
r'^\s*git\s*=',
r'^\s*svn\s*=',
r'^\s*hg\s*=',
r'^\s*list_url\s*=',
r'^\s*version\(',
r'^\s*variant\(',
r'^\s*provides\(',
r'^\s*extends\(',
r'^\s*depends_on\(',
r'^\s*conflicts\(',
r'^\s*resource\(',
r'^\s*patch\(',
],
# Exempt '@when' decorated functions from redefinition errors.
'F811': [
r'^\s*@when\(.*\)',
],
},
# exemptions applied to all files.
r'.py$': {
'E501': [
r'(https?|ftp|file)\:', # URLs
r'([\'"])[0-9a-fA-F]{32,}\1', # long hex checksums
]
},
}
# compile all regular expressions.
pattern_exemptions = dict(
(re.compile(file_pattern),
dict((code, [re.compile(p) for p in patterns])
for code, patterns in error_dict.items()))
for file_pattern, error_dict in pattern_exemptions.items())
def changed_files(args):
"""Get list of changed files in the Spack repository."""
git = which('git', required=True)
range = "{0}...".format(args.base)
git_args = [
# Add changed files committed since branching off of develop
['diff', '--name-only', '--diff-filter=ACMR', range],
# Add changed files that have been staged but not yet committed
['diff', '--name-only', '--diff-filter=ACMR', '--cached'],
# Add changed files that are unstaged
['diff', '--name-only', '--diff-filter=ACMR'],
]
# Add new files that are untracked
if args.untracked:
git_args.append(['ls-files', '--exclude-standard', '--other'])
# add everything if the user asked for it
if args.all:
git_args.append(['ls-files', '--exclude-standard'])
excludes = [os.path.realpath(f) for f in exclude_directories]
changed = set()
for arg_list in git_args:
files = git(*arg_list, output=str).split('\n')
for f in files:
# Ignore non-Python files
if not f.endswith('.py'):
continue
# Ignore files in the exclude locations
if any(os.path.realpath(f).startswith(e) for e in excludes):
continue
changed.add(f)
return sorted(changed)
def add_pattern_exemptions(line, codes):
"""Add a flake8 exemption to a line."""
if line.startswith('#'):
return line
line = line.rstrip('\n')
# Line is already ignored
if line.endswith('# noqa'):
return line + '\n'
orig_len = len(line)
exemptions = ','.join(sorted(set(codes)))
# append exemption to line
if '# noqa: ' in line:
line += ',{0}'.format(exemptions)
elif line: # ignore noqa on empty lines
line += ' # noqa: {0}'.format(exemptions)
# if THIS made the line too long, add an exemption for that
if len(line) > 79 and orig_len <= 79:
line += ',E501'
return line + '\n'
def filter_file(source, dest, output=False):
"""Filter a single file through all the patterns in pattern_exemptions."""
with open(source) as infile:
parent = os.path.dirname(dest)
mkdirp(parent)
with open(dest, 'w') as outfile:
for line in infile:
line_errors = []
# pattern exemptions
for file_pattern, errors in pattern_exemptions.items():
if not file_pattern.search(source):
continue
for code, patterns in errors.items():
for pattern in patterns:
if pattern.search(line):
line_errors.append(code)
break
if line_errors:
line = add_pattern_exemptions(line, line_errors)
outfile.write(line)
if output:
sys.stdout.write(line)
def setup_parser(subparser):
subparser.add_argument(
'-b', '--base', action='store', default='develop',
help="select base branch for collecting list of modified files")
subparser.add_argument(
'-k', '--keep-temp', action='store_true',
help="do not delete temporary directory where flake8 runs. "
"use for debugging, to see filtered files")
subparser.add_argument(
'-a', '--all', action='store_true',
help="check all files, not just changed files")
subparser.add_argument(
'-o', '--output', action='store_true',
help="send filtered files to stdout as well as temp files")
subparser.add_argument(
'-r', '--root-relative', action='store_true', default=False,
help="print root-relative paths (default is cwd-relative)")
subparser.add_argument(
'-U', '--no-untracked', dest='untracked', action='store_false',
default=True, help="exclude untracked files from checks")
subparser.add_argument(
'files', nargs=argparse.REMAINDER, help="specific files to check")
def flake8(parser, args):
flake8 = which('flake8', required=True)
temp = tempfile.mkdtemp()
try:
file_list = args.files
if file_list:
def prefix_relative(path):
return os.path.relpath(
os.path.abspath(os.path.realpath(path)), spack.prefix)
file_list = [prefix_relative(p) for p in file_list]
with working_dir(spack.prefix):
if not file_list:
file_list = changed_files(args)
print('=======================================================')
print('flake8: running flake8 code checks on spack.')
print()
print('Modified files:')
for filename in file_list:
print(' {0}'.format(filename.strip()))
print('=======================================================')
# filter files into a temporary directory with exemptions added.
for filename in file_list:
src_path = os.path.join(spack.prefix, filename)
dest_path = os.path.join(temp, filename)
filter_file(src_path, dest_path, args.output)
# run flake8 on the temporary tree, once for core, once for pkgs
package_file_list = [f for f in file_list if is_package(f)]
file_list = [f for f in file_list if not is_package(f)]
returncode = 0
with working_dir(temp):
output = ''
if file_list:
output += flake8(
'--format', 'pylint',
'--config=%s' % os.path.join(spack.prefix, '.flake8'),
*file_list, fail_on_error=False, output=str)
returncode |= flake8.returncode
if package_file_list:
output += flake8(
'--format', 'pylint',
'--config=%s' % os.path.join(spack.prefix,
'.flake8_packages'),
*package_file_list, fail_on_error=False, output=str)
returncode |= flake8.returncode
if args.root_relative:
# print results relative to repo root.
print(output)
else:
# print results relative to current working directory
def cwd_relative(path):
return '{0}: ['.format(os.path.relpath(
os.path.join(spack.prefix, path.group(1)), os.getcwd()))
for line in output.split('\n'):
print(re.sub(r'^(.*): \[', cwd_relative, line))
if returncode != 0:
print('Flake8 found errors.')
sys.exit(1)
else:
print('Flake8 checks were clean.')
finally:
if args.keep_temp:
print('Temporary files are in: ', temp)
else:
shutil.rmtree(temp, ignore_errors=True)
|
skosukhin/spack
|
lib/spack/spack/cmd/flake8.py
|
Python
|
lgpl-2.1
| 10,620
|
#!/usr/bin/python
"""
Resources:
http://code.google.com/p/pybluez/
http://lightblue.sourceforge.net/
http://code.google.com/p/python-bluetooth-scanner
"""
from __future__ import with_statement
import select
import logging
import bluetooth
import gobject
import util.misc as misc_utils
_moduleLogger = logging.getLogger(__name__)
class _BluetoothConnection(gobject.GObject):
__gsignals__ = {
'data_ready' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
),
'closed' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
),
}
def __init__(self, socket, addr, protocol):
gobject.GObject.__init__(self)
self._socket = socket
self._address = addr
self._dataId = gobject.io_add_watch (self._socket, gobject.IO_IN, self._on_data)
self._protocol = protocol
def close(self):
gobject.source_remove(self._dataId)
self._dataId = None
self._socket.close()
self._socket = None
self.emit("closed")
@property
def socket(self):
return self._socket
@property
def address(self):
return self._address
@property
def protocol(self):
return self._protocol
@misc_utils.log_exception(_moduleLogger)
def _on_data(self, source, condition):
self.emit("data_ready")
return True
gobject.type_register(_BluetoothConnection)
class _BluetoothListener(gobject.GObject):
__gsignals__ = {
'incoming_connection' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, ),
),
'start_listening' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
),
'stop_listening' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
),
}
def __init__(self, protocol, timeout):
gobject.GObject.__init__(self)
self._timeout = timeout
self._protocol = protocol
self._socket = None
self._incomingId = None
def start(self):
assert self._socket is None and self._incomingId is None
self._socket = bluetooth.BluetoothSocket(self._protocol["transport"])
self._socket.settimeout(self._timeout)
self._socket.bind(("", bluetooth.PORT_ANY))
self._socket.listen(1)
self._incomingId = gobject.io_add_watch(
self._socket, gobject.IO_IN, self._on_incoming
)
bluetooth.advertise_service(self._socket, self._protocol["name"], self._protocol["uuid"])
self.emit("start_listening")
def stop(self):
if self._socket is None or self._incomingId is None:
return
gobject.source_remove(self._incomingId)
self._incomingId = None
bluetooth.stop_advertising(self._socket)
self._socket.close()
self._socket = None
self.emit("stop_listening")
@property
def isListening(self):
return self._socket is not None and self._incomingId is not None
@property
def socket(self):
assert self._socket is not None
return self._socket
@misc_utils.log_exception(_moduleLogger)
def _on_incoming(self, source, condition):
newSocket, (address, port) = self._socket.accept()
newSocket.settimeout(self._timeout)
connection = _BluetoothConnection(newSocket, address, self._protocol)
self.emit("incoming_connection", connection)
return True
gobject.type_register(_BluetoothListener)
class _DeviceDiscoverer(bluetooth.DeviceDiscoverer):
def __init__(self, timeout):
bluetooth.DeviceDiscoverer.__init__(self)
self._timeout = timeout
self._devices = []
self._devicesInProgress = []
@property
def devices(self):
return self._devices
def find_devices(self, *args, **kwds):
# Ensure we always start clean and is the reason we overroad this
self._devicesInProgress = []
newArgs = [self]
newArgs.extend(args)
bluetooth.DeviceDiscoverer.find_devices(*newArgs, **kwds)
def process_inquiry(self):
# The default impl calls into some hci code but an example used select,
# so going with the example
while self.is_inquiring or 0 < len(self.names_to_find):
# The whole reason for overriding this
_moduleLogger.debug("Event (%r, %r)"% (self.is_inquiring, self.names_to_find))
rfds = select.select([self], [], [], self._timeout)[0]
if self in rfds:
self.process_event()
@misc_utils.log_exception(_moduleLogger)
def device_discovered(self, address, deviceclass, name):
device = address, deviceclass, name
_moduleLogger.debug("Device Discovered %r" % (device, ))
self._devicesInProgress.append(device)
@misc_utils.log_exception(_moduleLogger)
def inquiry_complete(self):
_moduleLogger.debug("Inquiry Complete")
self._devices = self._devicesInProgress
class BluetoothBackend(gobject.GObject):
__gsignals__ = {
'login' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
),
'logout' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
),
'contacts_update' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, ),
),
}
def __init__(self):
gobject.GObject.__init__(self)
self._disco = None
self._timeout = 8
self._listeners = {}
self._protocols = []
self._isListening = True
def add_protocol(self, protocol):
assert not self.is_logged_in()
self._protocols.append(protocol)
def login(self):
self._disco = _DeviceDiscoverer(self._timeout)
isListening = self._isListening
for protocol in self._protocols:
protoId = protocol["uuid"]
self._listeners[protoId] = _BluetoothListener(protocol, self._timeout)
if isListening:
self._listeners[protoId].start()
self.emit("login")
def logout(self):
for protocol in self._protocols:
protoId = protocol["uuid"]
listener = self._listeners[protoId]
listener.close()
self._listeners.clear()
self._disco.cancel_inquiry() # precaution
self.emit("logout")
def is_logged_in(self):
if self._listeners:
return True
else:
return False
def is_listening(self):
return self._isListening
def enable_listening(self, enable):
if enable:
for listener in self._listeners.itervalues():
assert not listener.isListening
for listener in self._listeners.itervalues():
listener.start()
else:
for listener in self._listeners.itervalues():
assert listener.isListening
for listener in self._listeners.itervalues():
listener.stop()
def get_contacts(self):
try:
self._disco.find_devices(
duration=self._timeout,
flush_cache = True,
lookup_names = True,
)
self._disco.process_inquiry()
except bluetooth.BluetoothError, e:
# lightblue does this, so I guess I will too
_moduleLogger.error("Error while getting contacts, attempting to cancel")
try:
self._disco.cancel_inquiry()
finally:
raise e
return self._disco.devices
def get_contact_services(self, address):
services = bluetooth.find_service(address = address)
return services
def connect(self, addr, transport, port):
sock = bluetooth.BluetoothSocket(transport)
sock.settimeout(self._timeout)
try:
sock.connect((addr, port))
except bluetooth.error, e:
sock.close()
raise
return _BluetoothConnection(sock, addr, "")
gobject.type_register(BluetoothBackend)
class BluetoothClass(object):
def __init__(self, description):
self.description = description
def __str__(self):
return self.description
MAJOR_CLASS = BluetoothClass("Major Class")
MAJOR_CLASS.MISCELLANEOUS = BluetoothClass("Miscellaneous")
MAJOR_CLASS.COMPUTER = BluetoothClass("Computer")
MAJOR_CLASS.PHONE = BluetoothClass("Phone")
MAJOR_CLASS.LAN = BluetoothClass("LAN/Network Access Point")
MAJOR_CLASS.AV = BluetoothClass("Audio/Video")
MAJOR_CLASS.PERIPHERAL = BluetoothClass("Peripheral")
MAJOR_CLASS.IMAGING = BluetoothClass("Imaging")
MAJOR_CLASS.UNCATEGORIZED = BluetoothClass("Uncategorized")
MAJOR_CLASS.MISCELLANEOUS.RESERVED = BluetoothClass("Reserved")
MAJOR_CLASS.COMPUTER.UNCATEGORIZED = BluetoothClass("Uncategorized, code for device not assigned")
MAJOR_CLASS.COMPUTER.DESKTOP = BluetoothClass("Desktop workstation")
MAJOR_CLASS.COMPUTER.SERVER = BluetoothClass("Server-class computer")
MAJOR_CLASS.COMPUTER.LAPTOP = BluetoothClass("Laptop")
MAJOR_CLASS.COMPUTER.HANDHELD = BluetoothClass("Handheld PC/PDA (clam shell)")
MAJOR_CLASS.COMPUTER.PALM_SIZE = BluetoothClass("Palm sized PC/PDA")
MAJOR_CLASS.COMPUTER.WEARABLE = BluetoothClass("Wearable computer (Watch sized)")
MAJOR_CLASS.COMPUTER.RESERVED = BluetoothClass("Reserved")
MAJOR_CLASS.PHONE.UNCATEGORIZED = BluetoothClass("Uncategorized, code for device not assigned")
MAJOR_CLASS.PHONE.CELLULAR = BluetoothClass("Cellular")
MAJOR_CLASS.PHONE.CORDLESS = BluetoothClass("Cordless")
MAJOR_CLASS.PHONE.SMART_PHONE = BluetoothClass("Smart phone")
MAJOR_CLASS.PHONE.MODEM = BluetoothClass("Wired modem or voice gateway")
MAJOR_CLASS.PHONE.ISDN = BluetoothClass("Common ISDN Access")
MAJOR_CLASS.PHONE.RESERVED = BluetoothClass("Reserved")
MAJOR_CLASS.LAN.UNCATEGORIZED = BluetoothClass("Uncategorized")
MAJOR_CLASS.LAN.RESERVED = BluetoothClass("Reserved")
MAJOR_CLASS.AV.UNCATEGORIZED = BluetoothClass("Uncategorized, code for device not assigned")
MAJOR_CLASS.AV.HEADSET = BluetoothClass("Device conforms to headset profile")
MAJOR_CLASS.AV.HANDS_FREE = BluetoothClass("Hands-free")
MAJOR_CLASS.AV.MICROPHONE = BluetoothClass("Microphone")
MAJOR_CLASS.AV.LOUDSPEAKER = BluetoothClass("Loudspeaker")
MAJOR_CLASS.AV.HEADPHONES = BluetoothClass("Headphones")
MAJOR_CLASS.AV.PORTABLE_AUDIO = BluetoothClass("Portable Audio")
MAJOR_CLASS.AV.CAR_AUDIO = BluetoothClass("Car Audio")
MAJOR_CLASS.AV.SET_TOP_BOX = BluetoothClass("Set-top box")
MAJOR_CLASS.AV.HIFI_AUDIO_DEVICE = BluetoothClass("HiFi Audio Device")
MAJOR_CLASS.AV.VCR = BluetoothClass("VCR")
MAJOR_CLASS.AV.VIDEO_CAMERA = BluetoothClass("Video Camera")
MAJOR_CLASS.AV.CAMCORDER = BluetoothClass("Camcorder")
MAJOR_CLASS.AV.VIDEO_MONITOR = BluetoothClass("Video Monitor")
MAJOR_CLASS.AV.VIDEO_DISPLAY = BluetoothClass("Video Display and Loudspeaker")
MAJOR_CLASS.AV.VIDEO_CONFERENCING = BluetoothClass("Video Conferencing")
MAJOR_CLASS.AV.GAMING = BluetoothClass("Gaming/Toy")
MAJOR_CLASS.AV.RESERVED = BluetoothClass("Reserved")
MAJOR_CLASS.PERIPHERAL.UNCATEGORIZED = BluetoothClass("Uncategorized, code for device not assigned")
MAJOR_CLASS.PERIPHERAL.JOYSTICK = BluetoothClass("Joystick")
MAJOR_CLASS.PERIPHERAL.GAMEPAD = BluetoothClass("Gamepad")
MAJOR_CLASS.PERIPHERAL.REMOTE_CONTROL = BluetoothClass("Remote Control")
MAJOR_CLASS.PERIPHERAL.SENSING_DEVICE = BluetoothClass("Sensing Device")
MAJOR_CLASS.PERIPHERAL.DIGITIZER_TABLET = BluetoothClass("Digitizer Tablet")
MAJOR_CLASS.PERIPHERAL.CARD_READER = BluetoothClass("Card Reader (e.g. SIM Card Reader)")
MAJOR_CLASS.PERIPHERAL.RESERVED = BluetoothClass("Reserved")
MAJOR_CLASS.IMAGING.UNCATEGORIZED = BluetoothClass("Uncategorized, code for device not assigned")
MAJOR_CLASS.IMAGING.DISPLAY = BluetoothClass("Display")
MAJOR_CLASS.IMAGING.CAMERA = BluetoothClass("Camera")
MAJOR_CLASS.IMAGING.SCANNER = BluetoothClass("Scanner")
MAJOR_CLASS.IMAGING.PRINTER = BluetoothClass("Printer")
MAJOR_CLASS.IMAGING.RESERVED = BluetoothClass("Reserved")
SERVICE_CLASS = BluetoothClass("Service Class")
SERVICE_CLASS.LIMITED = BluetoothClass("Limited Discoverable Mode")
SERVICE_CLASS.POSITIONING = BluetoothClass("Positioning (Location identification)")
SERVICE_CLASS.NETWORKING = BluetoothClass("Networking (LAN, Ad hoc, ...)")
SERVICE_CLASS.RENDERING = BluetoothClass("Rendering (Printing, speaking, ...)")
SERVICE_CLASS.CAPTURING = BluetoothClass("Capturing (Scanner, microphone, ...)")
SERVICE_CLASS.OBJECT_TRANSFER = BluetoothClass("Object Transfer (v-Inbox, v-Folder, ...)")
SERVICE_CLASS.AUDIO = BluetoothClass("Audio (Speaker, Microphone, Headset service, ...")
SERVICE_CLASS.TELEPHONY = BluetoothClass("Telephony (Cordless telephony, Modem, Headset service, ...)")
SERVICE_CLASS.INFORMATION = BluetoothClass("Information (WEB-server, WAP-server, ...)")
_ORDERED_MAJOR_CLASSES = (
MAJOR_CLASS.MISCELLANEOUS,
MAJOR_CLASS.COMPUTER,
MAJOR_CLASS.PHONE,
MAJOR_CLASS.LAN,
MAJOR_CLASS.AV,
MAJOR_CLASS.PERIPHERAL,
MAJOR_CLASS.IMAGING,
)
_SERVICE_CLASSES = (
(13 - 13, SERVICE_CLASS.LIMITED),
(16 - 13, SERVICE_CLASS.POSITIONING),
(17 - 13, SERVICE_CLASS.NETWORKING),
(18 - 13, SERVICE_CLASS.RENDERING),
(19 - 13, SERVICE_CLASS.CAPTURING),
(20 - 13, SERVICE_CLASS.OBJECT_TRANSFER),
(21 - 13, SERVICE_CLASS.AUDIO),
(22 - 13, SERVICE_CLASS.TELEPHONY),
(23 - 13, SERVICE_CLASS.INFORMATION),
)
def _parse_device_class(deviceclass):
# get some information out of the device class and display it.
# voodoo magic specified at:
#
# https://www.bluetooth.org/foundry/assignnumb/document/baseband
majorClass = (deviceclass >> 8) & 0xf
minorClass = (deviceclass >> 2) & 0x3f
serviceClasses = (deviceclass >> 13) & 0x7ff
return majorClass, minorClass, serviceClasses
def parse_device_class(deviceclass):
majorClassCode, minorClassCode, serviceClassCodes = _parse_device_class(deviceclass)
try:
majorClass = _ORDERED_MAJOR_CLASSES[majorClassCode]
except IndexError:
majorClass = MAJOR_CLASS.UNCATEGORIZED
serviceClasses = []
for bitpos, cls in _SERVICE_CLASSES:
if serviceClassCodes & (1 << bitpos):
serviceClasses.append(cls)
return majorClass, minorClassCode, serviceClasses
|
epage/telepathy-bluewire
|
src/protocol/backend.py
|
Python
|
lgpl-2.1
| 13,051
|
#!/bin/sh
"""": # -*-python-*-
bup_python="$(dirname "$0")/bup-python" || exit $?
exec "$bup_python" "$0" ${1+"$@"}
"""
# end of bup preamble
import glob, os, sys, tempfile
from bup import options, git, bloom
from bup.helpers import (add_error, debug1, handle_ctrl_c, log, progress, qprogress,
saved_errors)
optspec = """
bup bloom [options...]
--
ruin ruin the specified bloom file (clearing the bitfield)
f,force ignore existing bloom file and regenerate it from scratch
o,output= output bloom filename (default: auto)
d,dir= input directory to look for idx files (default: auto)
k,hashes= number of hash functions to use (4 or 5) (default: auto)
c,check= check the given .idx file against the bloom filter
"""
def ruin_bloom(bloomfilename):
rbloomfilename = git.repo_rel(bloomfilename)
if not os.path.exists(bloomfilename):
log("%s\n" % bloomfilename)
add_error("bloom: %s not found to ruin\n" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1)
b.map[16:16+2**b.bits] = '\0' * 2**b.bits
def check_bloom(path, bloomfilename, idx):
rbloomfilename = git.repo_rel(bloomfilename)
ridx = git.repo_rel(idx)
if not os.path.exists(bloomfilename):
log("bloom: %s: does not exist.\n" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename)
if not b.valid():
add_error("bloom: %r is invalid.\n" % rbloomfilename)
return
base = os.path.basename(idx)
if base not in b.idxnames:
log("bloom: %s does not contain the idx.\n" % rbloomfilename)
return
if base == idx:
idx = os.path.join(path, idx)
log("bloom: bloom file: %s\n" % rbloomfilename)
log("bloom: checking %s\n" % ridx)
for objsha in git.open_idx(idx):
if not b.exists(objsha):
add_error("bloom: ERROR: object %s missing"
% str(objsha).encode('hex'))
_first = None
def do_bloom(path, outfilename):
global _first
b = None
if os.path.exists(outfilename) and not opt.force:
b = bloom.ShaBloom(outfilename)
if not b.valid():
debug1("bloom: Existing invalid bloom found, regenerating.\n")
b = None
add = []
rest = []
add_count = 0
rest_count = 0
for i,name in enumerate(glob.glob('%s/*.idx' % path)):
progress('bloom: counting: %d\r' % i)
ix = git.open_idx(name)
ixbase = os.path.basename(name)
if b and (ixbase in b.idxnames):
rest.append(name)
rest_count += len(ix)
else:
add.append(name)
add_count += len(ix)
total = add_count + rest_count
if not add:
debug1("bloom: nothing to do.\n")
return
if b:
if len(b) != rest_count:
debug1("bloom: size %d != idx total %d, regenerating\n"
% (len(b), rest_count))
b = None
elif (b.bits < bloom.MAX_BLOOM_BITS and
b.pfalse_positive(add_count) > bloom.MAX_PFALSE_POSITIVE):
debug1("bloom: regenerating: adding %d entries gives "
"%.2f%% false positives.\n"
% (add_count, b.pfalse_positive(add_count)))
b = None
else:
b = bloom.ShaBloom(outfilename, readwrite=True, expected=add_count)
if not b: # Need all idxs to build from scratch
add += rest
add_count += rest_count
del rest
del rest_count
msg = b is None and 'creating from' or 'adding'
if not _first: _first = path
dirprefix = (_first != path) and git.repo_rel(path)+': ' or ''
progress('bloom: %s%s %d file%s (%d object%s).\n'
% (dirprefix, msg,
len(add), len(add)!=1 and 's' or '',
add_count, add_count!=1 and 's' or ''))
tfname = None
if b is None:
tfname = os.path.join(path, 'bup.tmp.bloom')
b = bloom.create(tfname, expected=add_count, k=opt.k)
count = 0
icount = 0
for name in add:
ix = git.open_idx(name)
qprogress('bloom: writing %.2f%% (%d/%d objects)\r'
% (icount*100.0/add_count, icount, add_count))
b.add_idx(ix)
count += 1
icount += len(ix)
# Currently, there's an open file object for tfname inside b.
# Make sure it's closed before rename.
b.close()
if tfname:
os.rename(tfname, outfilename)
handle_ctrl_c()
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if extra:
o.fatal('no positional parameters expected')
git.check_repo_or_die()
if not opt.check and opt.k and opt.k not in (4,5):
o.fatal('only k values of 4 and 5 are supported')
paths = opt.dir and [opt.dir] or git.all_packdirs()
for path in paths:
debug1('bloom: scanning %s\n' % path)
outfilename = opt.output or os.path.join(path, 'bup.bloom')
if opt.check:
check_bloom(path, outfilename, opt.check)
elif opt.ruin:
ruin_bloom(outfilename)
else:
do_bloom(path, outfilename)
if saved_errors:
log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors))
sys.exit(1)
elif opt.check:
log('All tests passed.\n')
|
gevaerts/bup
|
cmd/bloom-cmd.py
|
Python
|
lgpl-2.1
| 5,247
|
class Foo(object):
def set(self, value):
self.field = value
def get(self):
return self.field
a = Foo()
a.set("hello world")
z = a.get()
print z
z
a
|
retoo/pystructure
|
s101g/examples/simple/simple.py
|
Python
|
lgpl-2.1
| 195
|
# Higgins - A multi-media server
# Copyright (c) 2007-2009 Michael Frank <msfrank@syntaxjockey.com>
#
# This program is free software; for license information see
# the COPYING file.
from higgins.logger import Loggable
class UPnPLogger(Loggable):
log_domain = "upnp"
logger = UPnPLogger()
|
msfrank/Higgins
|
higgins/upnp/logger.py
|
Python
|
lgpl-2.1
| 296
|
# Copyright © 2017 Collabora Ltd.
#
# This file is part of pfg.
#
# pfg is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option)
# any later version.
#
# pfg is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pfg. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alexandros Frantzis <alexandros.frantzis@collabora.com>
|
afrantzis/pixel-format-guide
|
tests/__init__.py
|
Python
|
lgpl-2.1
| 768
|
#! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
distribution = LogUniform(1.0, 2.5)
size = 10000
sample = distribution.getSample(size)
factory = LogUniformFactory
covariance = CovarianceMatrix
estimatedDistribution = factory.build(sample)
print "Distribution =", distribution
print "Estimated distribution=", estimatedDistribution
estimatedDistribution = factory.build()
print "Default distribution=", estimatedDistribution
estimatedDistribution = factory.build(distribution.getParametersCollection())
print "Distribution from parameters=", estimatedDistribution
estimatedLogUniform = factory.buildAsLogUniform(sample)
print "LogUniform =", distribution
print "Estimated logUniform=", estimatedLogUniform
estimatedLogUniform = factory.buildAsLogUniform()
print "Default logUniform=", estimatedLogUniform
estimatedLogUniform = factory.buildAsLogUniform(distribution.getParametersCollection())
print "LogUniform from parameters=", estimatedLogUniform
except :
import sys
print "t_LogUniformFactory_std.py", sys.exc_type, sys.exc_value
|
dbarbier/privot
|
python/test/t_LogUniformFactory_std.py
|
Python
|
lgpl-3.0
| 1,188
|
'''
/*******************************************************************************
*
* Copyright (c) 2015 Fraunhofer FOKUS, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
* AUTHORS: Louay Bassbouss (louay.bassbouss@fokus.fraunhofer.de)
*
******************************************************************************/
'''
from django.template import TemplateSyntaxError, Node, Variable, Library
from django.conf import settings
register = Library()
# I found some tricks in URLNode and url from defaulttags.py:
# https://code.djangoproject.com/browser/django/trunk/django/template/defaulttags.py
@register.tag
def value_from_settings(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one " \
"argument (settings constant to retrieve)" % bits[0])
settingsvar = bits[1]
settingsvar = settingsvar[1:-1] if settingsvar[0] == '"' else settingsvar
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
raise TemplateSyntaxError("'value_from_settings' didn't recognise " \
"the arguments '%s'" % ", ".join(bits))
return ValueFromSettings(settingsvar, asvar)
class ValueFromSettings(Node):
def __init__(self, settingsvar, asvar):
self.arg = Variable(settingsvar)
self.asvar = asvar
def render(self, context):
ret_val = getattr(settings,str(self.arg))
if self.asvar:
context[self.asvar] = ret_val
return ''
else:
return ret_val
|
fraunhoferfokus/fixmycity
|
dummy/templatetags/value_from_settings.py
|
Python
|
lgpl-3.0
| 2,196
|
#!/usr/bin/env python3
# cbus/protocol/application/sal.py - SAL interface
# Copyright 2012-2019 Michael Farrell <micolous+git@gmail.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import annotations
import abc
from typing import FrozenSet, Sequence, Union
from cbus.common import Application
__all__ = ['SAL', 'BaseApplication']
class SAL(abc.ABC):
"""
Describes an decoder/encoder
"""
@abc.abstractmethod
def encode(self) -> bytes:
return bytes()
@property
@abc.abstractmethod
def application(self) -> Union[int, Application]:
raise NotImplementedError('application')
class BaseApplication(abc.ABC):
"""
Describes an decoder for all commands sent to an application.
"""
@staticmethod
@abc.abstractmethod
def supported_applications() -> FrozenSet[Union[int, Application]]:
"""
Gets a list of supported Application IDs for the application.
All application IDs must be in the range 0x00 - 0xff.
"""
raise NotImplementedError('supported_applications')
@staticmethod
@abc.abstractmethod
def decode_sals(data: bytes) -> Sequence[SAL]:
"""
Decodes a SAL message
"""
raise NotImplementedError('decode_sals')
|
micolous/cbus
|
cbus/protocol/application/sal.py
|
Python
|
lgpl-3.0
| 1,939
|
"""This demo program uses the interface to SNES solver for variational
inequalities to solve a contact mechanics problems in FEniCS. The
example considers a heavy hyperelastic circle in a box of the same
size"""
# Copyright (C) 2012 Corrado Maurini
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Corrado Maurini 2013
#
# First added: 2012-09-03
# Last changed: 2014-02-21
#
from dolfin import *
# Check that DOLFIN is configured with PETSc and CGAL
if not has_petsc_snes():
print "DOLFIN must be compiled with PETSc version > 3.2 to run this demo."
exit(0)
# Create mesh
mesh = Mesh("../circle_yplane.xml.gz")
V = VectorFunctionSpace(mesh, "Lagrange", 1)
# Define functions
du = TrialFunction(V) # Incremental displacement
v = TestFunction(V) # Test function
u = Function(V) # Displacement from previous iteration
B = Constant((0.0, -0.05)) # Body force per unit volume
# Kinematics
I = Identity(u.geometric_dimension()) # Identity tensor
F = I + grad(u) # Deformation gradient
C = F.T*F # Right Cauchy-Green tensor
# Invariants of deformation tensors
Ic = tr(C)
J = det(F)
# Elasticity parameters
E, nu = 10.0, 0.3
mu, lmbda = Constant(E/(2*(1 + nu))), Constant(E*nu/((1 + nu)*(1 - 2*nu)))
# Stored strain energy density (compressible neo-Hookean model)
psi = (mu/2)*(Ic - 2) - mu*ln(J) + (lmbda/2)*(ln(J))**2
# Total potential energy
Pi = psi*dx - dot(B, u)*dx
# Compute first variation of Pi (directional derivative about u in the
# direction of v)
F = derivative(Pi, u, v)
# Compute Jacobian of F
J = derivative(F, u, du)
# Symmetry condition (to block rigid body rotations)
tol = mesh.hmin()
def symmetry_line(x):
return abs(x[0]) < DOLFIN_EPS
bc = DirichletBC(V.sub(0), 0., symmetry_line, method="pointwise")
# The displacement u must be such that the current configuration x+u
# remains in the box [xmin,xmax] x [umin,ymax]
constraint_u = Expression(("xmax - x[0]","ymax - x[1]"), \
xmax=1.0+DOLFIN_EPS, ymax=1.0)
constraint_l = Expression(("xmin - x[0]","ymin - x[1]"), \
xmin=-1.0-DOLFIN_EPS, ymin=-1.0)
umin = interpolate(constraint_l, V)
umax = interpolate(constraint_u, V)
# Define the solver parameters
snes_solver_parameters = {"nonlinear_solver": "snes",
"snes_solver" : { "linear_solver" : "lu",
"maximum_iterations": 20,
"report": True,
"error_on_nonconvergence": False,
}}
# Set up the non-linear problem
problem = NonlinearVariationalProblem(F, u, bc, J=J)
# Set up the non-linear solver
solver = NonlinearVariationalSolver(problem)
solver.parameters.update(snes_solver_parameters)
info(solver.parameters, True)
# Solve the problem
(iter, converged) = solver.solve(umin, umax)
# Check for convergence
if not converged:
warning("This demo is a complex nonlinear problem. Convergence is not guaranteed when modifying some parameters or using PETSC 3.2.")
# Save solution in VTK format
file = File("displacement.pvd")
file << u
# plot the current configuration
plot(u, mode="displacement", wireframe=True, title="Displacement field")
interactive()
|
akshmakov/Dolfin-Fijee-Fork
|
demo/undocumented/contact-vi-snes/python/demo_contact-vi-snes.py
|
Python
|
lgpl-3.0
| 4,001
|
# -*- coding: utf-8 -*-
# Copyright(C) 2009-2016 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
# yapf-compatible
from __future__ import unicode_literals
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from requests.exceptions import ConnectionError, SSLError
from weboob.browser.browsers import LoginBrowser, URL, need_login, StatesMixin
from weboob.capabilities.base import find_object
from weboob.capabilities.bank import (
AccountNotFound, Account, AddRecipientStep, AddRecipientTimeout,
TransferInvalidRecipient, Loan,
)
from weboob.capabilities.bill import Subscription, Document, DocumentTypes
from weboob.capabilities.profile import ProfileMissing
from weboob.tools.decorators import retry
from weboob.tools.capabilities.bank.transactions import sorted_transactions
from weboob.browser.exceptions import ServerError
from weboob.browser.elements import DataError
from weboob.exceptions import BrowserIncorrectPassword, BrowserUnavailable
from weboob.tools.value import Value, ValueBool
from weboob.tools.capabilities.bank.investments import create_french_liquidity
from .pages import (
LoginPage, AccountsPage, AccountsIBANPage, HistoryPage, TransferInitPage,
ConnectionThresholdPage, LifeInsurancesPage, LifeInsurancesHistoryPage,
LifeInsurancesDetailPage, NatioVieProPage, CapitalisationPage,
MarketListPage, MarketPage, MarketHistoryPage, MarketSynPage, BNPKeyboard,
RecipientsPage, ValidateTransferPage, RegisterTransferPage, AdvisorPage,
AddRecipPage, ActivateRecipPage, ProfilePage, ListDetailCardPage, ListErrorPage,
UselessPage, TransferAssertionError, LoanDetailsPage,
)
from .document_pages import DocumentsPage, DocumentsResearchPage, TitulairePage, RIBPage
__all__ = ['BNPPartPro', 'HelloBank']
class BNPParibasBrowser(LoginBrowser, StatesMixin):
TIMEOUT = 30.0
login = URL(
r'identification-wspl-pres/identification\?acceptRedirection=true×tamp=(?P<timestamp>\d+)',
r'SEEA-pa01/devServer/seeaserver',
r'https://mabanqueprivee.bnpparibas.net/fr/espace-prive/comptes-et-contrats\?u=%2FSEEA-pa01%2FdevServer%2Fseeaserver',
LoginPage
)
list_error_page = URL(
r'https://mabanque.bnpparibas/rsc/contrib/document/properties/identification-fr-part-V1.json', ListErrorPage
)
useless_page = URL(r'/fr/connexion/comptes-et-contrats', UselessPage)
con_threshold = URL(
r'/fr/connexion/100-connexions',
r'/fr/connexion/mot-de-passe-expire',
r'/fr/espace-prive/100-connexions.*',
r'/fr/espace-pro/100-connexions-pro.*',
r'/fr/espace-pro/changer-son-mot-de-passe',
r'/fr/espace-client/100-connexions',
r'/fr/espace-prive/mot-de-passe-expire',
r'/fr/client/mdp-expire',
r'/fr/client/100-connexion',
r'/fr/systeme/page-indisponible',
ConnectionThresholdPage
)
accounts = URL(r'udc-wspl/rest/getlstcpt', AccountsPage)
loan_details = URL(r'caraccomptes-wspl/rpc/(?P<loan_type>.*)', LoanDetailsPage)
ibans = URL(r'rib-wspl/rpc/comptes', AccountsIBANPage)
history = URL(r'rop2-wspl/rest/releveOp', HistoryPage)
history_old = URL(r'rop-wspl/rest/releveOp', HistoryPage)
transfer_init = URL(r'virement-wspl/rest/initialisationVirement', TransferInitPage)
lifeinsurances = URL(r'mefav-wspl/rest/infosContrat', LifeInsurancesPage)
lifeinsurances_history = URL(r'mefav-wspl/rest/listMouvements', LifeInsurancesHistoryPage)
lifeinsurances_detail = URL(r'mefav-wspl/rest/detailMouvement', LifeInsurancesDetailPage)
natio_vie_pro = URL(r'/mefav-wspl/rest/natioViePro', NatioVieProPage)
capitalisation_page = URL(
r'https://www.clients.assurance-vie.fr/servlets/helios.cinrj.htmlnav.runtime.FrontServlet', CapitalisationPage
)
market_list = URL(r'pe-war/rpc/SAVaccountDetails/get', MarketListPage)
market_syn = URL(r'pe-war/rpc/synthesis/get', MarketSynPage)
market = URL(r'pe-war/rpc/portfolioDetails/get', MarketPage)
market_history = URL(r'/pe-war/rpc/turnOverHistory/get', MarketHistoryPage)
recipients = URL(r'/virement-wspl/rest/listerBeneficiaire', RecipientsPage)
add_recip = URL(r'/virement-wspl/rest/ajouterBeneficiaire', AddRecipPage)
activate_recip_sms = URL(r'/virement-wspl/rest/activerBeneficiaire', ActivateRecipPage)
activate_recip_digital_key = URL(r'/virement-wspl/rest/verifierAuthentForte', ActivateRecipPage)
validate_transfer = URL(r'/virement-wspl/rest/validationVirement', ValidateTransferPage)
register_transfer = URL(r'/virement-wspl/rest/enregistrerVirement', RegisterTransferPage)
advisor = URL(r'/conseiller-wspl/rest/monConseiller', AdvisorPage)
titulaire = URL(r'/demat-wspl/rest/listerTitulairesDemat', TitulairePage)
document = URL(r'/demat-wspl/rest/listerDocuments', DocumentsPage)
document_research = URL(r'/demat-wspl/rest/rechercheCriteresDemat', DocumentsResearchPage)
rib_page = URL(r'/rib-wspl/rpc/restituerRIB', RIBPage)
profile = URL(r'/kyc-wspl/rest/informationsClient', ProfilePage)
list_detail_card = URL(r'/udcarte-wspl/rest/listeDetailCartes', ListDetailCardPage)
STATE_DURATION = 10
need_reload_state = False
__states__ = ('need_reload_state', 'rcpt_transfer_id')
def __init__(self, config, *args, **kwargs):
super(BNPParibasBrowser, self).__init__(config['login'].get(), config['password'].get(), *args, **kwargs)
self.accounts_list = None
self.card_to_transaction_type = {}
self.rotating_password = config['rotating_password'].get()
self.digital_key = config['digital_key'].get()
self.rcpt_transfer_id = None
@retry(ConnectionError, tries=3)
def open(self, *args, **kwargs):
return super(BNPParibasBrowser, self).open(*args, **kwargs)
def do_login(self):
if not (self.username.isdigit() and self.password.isdigit()):
raise BrowserIncorrectPassword()
timestamp = lambda: int(time.time() * 1e3)
self.login.go(timestamp=timestamp())
if self.login.is_here():
self.page.login(self.username, self.password)
def load_state(self, state):
# reload state only for new recipient feature
if state.get('need_reload_state'):
state.pop('url', None)
self.need_reload_state = False
super(BNPParibasBrowser, self).load_state(state)
def change_pass(self, oldpass, newpass):
res = self.open('/identification-wspl-pres/grille?accessible=false')
url = '/identification-wspl-pres/grille/%s' % res.json()['data']['idGrille']
keyboard = self.open(url)
vk = BNPKeyboard(self, keyboard)
data = {}
data['codeAppli'] = 'PORTAIL'
data['idGrille'] = res.json()['data']['idGrille']
data['typeGrille'] = res.json()['data']['typeGrille']
data['confirmNouveauPassword'] = vk.get_string_code(newpass)
data['nouveauPassword'] = vk.get_string_code(newpass)
data['passwordActuel'] = vk.get_string_code(oldpass)
response = self.location('/mcs-wspl/rpc/modifiercodesecret', data=data)
if response.json().get('messageIden').lower() == 'nouveau mot de passe invalide':
return False
return True
@need_login
def get_profile(self):
self.profile.go(json={}, method='POST')
profile = self.page.get_profile()
if profile:
return profile
raise ProfileMissing(self.page.get_error_message())
def is_loan(self, account):
return account.type in (
Account.TYPE_LOAN, Account.TYPE_MORTGAGE, Account.TYPE_CONSUMER_CREDIT, Account.TYPE_REVOLVING_CREDIT
)
@need_login
def iter_accounts(self):
if self.accounts_list is None:
self.accounts_list = []
# In case of password renewal, we need to go on ibans twice.
self.ibans.go()
ibans = self.page.get_ibans_dict() if self.ibans.is_here() else self.ibans.go().get_ibans_dict()
# This page might be unavailable.
try:
ibans.update(self.transfer_init.go(json={'modeBeneficiaire': '0'}).get_ibans_dict('Crediteur'))
except (TransferAssertionError, AttributeError):
pass
accounts = list(self.accounts.go().iter_accounts(ibans=ibans))
self.market_syn.go(json={}, method='POST') # do a post on the given URL
market_accounts = self.page.get_list() # get the list of 'Comptes Titres'
checked_accounts = set()
for account in accounts:
if self.is_loan(account):
account = Loan.from_dict(account.to_dict())
if account.type in (Account.TYPE_MORTGAGE, Account.TYPE_CONSUMER_CREDIT):
self.loan_details.go(data={'iban': account.id}, loan_type='creditPret')
self.page.fill_loan_details(obj=account)
elif account.type == Account.TYPE_REVOLVING_CREDIT:
self.loan_details.go(data={'iban': account.id}, loan_type='creditConsoProvisio')
self.page.fill_revolving_details(obj=account)
elif account.type == Account.TYPE_LOAN:
self.loan_details.go(data={'iban': account.id}, loan_type='creditPretPersoPro')
self.page.fill_loan_details(obj=account)
for market_acc in market_accounts:
if all((
market_acc['securityAccountNumber'].endswith(account.number[-4:]),
account.type in (Account.TYPE_MARKET, Account.TYPE_PEA),
account.label == market_acc['securityAccountName'],
not account.iban,
)):
if account.id in checked_accounts:
# in this case, we have identified two accounts for the same CompteTitre
raise DataError('we have two market accounts mapped to a same "CompteTitre" dictionary')
checked_accounts.add(account.id)
account.balance = market_acc.get('valorisation', account.balance)
account.valuation_diff = market_acc['profitLoss']
break
self.accounts_list.append(account)
# Fetching capitalisation contracts from the "Assurances Vie" space (some are not in the BNP API):
params = self.natio_vie_pro.go().get_params()
try:
self.capitalisation_page.go(params=params)
except ServerError:
self.logger.warning("An Internal Server Error occurred")
except SSLError as e:
self.logger.warning("SSL Error occurred : %s", e)
certificate_errors = (
'SEC_ERROR_EXPIRED_CERTIFICATE', # nss
'certificate verify failed', # openssl
)
if all(error not in str(e) for error in certificate_errors):
raise e
finally:
if self.capitalisation_page.is_here() and self.page.has_contracts():
for account in self.page.iter_capitalisation():
# Life Insurance accounts may appear BOTH in the API and the "Assurances Vie" domain,
# It is better to keep the API version since it contains the unitvalue:
if account.number not in [a.number for a in self.accounts_list]:
self.logger.warning("We found an account that only appears on the old BNP website.")
self.accounts_list.append(account)
else:
self.logger.warning("This account was skipped because it already appears in the API.")
return iter(self.accounts_list)
@need_login
def get_account(self, _id):
return find_object(self.iter_accounts(), id=_id, error=AccountNotFound)
@need_login
def iter_history(self, account, coming=False):
# The accounts from the "Assurances Vie" space have no available history:
if hasattr(account, '_details'):
return []
if account.type == Account.TYPE_PEA and account.label.endswith('Espèces'):
return []
if account.type == Account.TYPE_LIFE_INSURANCE:
return self.iter_lifeinsurance_history(account, coming)
elif account.type in (Account.TYPE_MARKET, Account.TYPE_PEA):
if coming:
return []
try:
self.market_list.go(json={}, method='POST')
except ServerError:
self.logger.warning("An Internal Server Error occurred")
return []
for market_acc in self.page.get_list():
if account.number[-4:] == market_acc['securityAccountNumber'][-4:]:
self.page = self.market_history.go(
json={
"securityAccountNumber": market_acc['securityAccountNumber'],
}
)
return self.page.iter_history()
return []
else:
if not self.card_to_transaction_type:
self.list_detail_card.go()
self.card_to_transaction_type = self.page.get_card_to_transaction_type()
data = {
"ibanCrypte": account.id,
"pastOrPending": 1,
"triAV": 0,
"startDate": (datetime.now() - relativedelta(years=1)).strftime('%d%m%Y'),
"endDate": datetime.now().strftime('%d%m%Y')
}
try:
self.history.go(json=data)
except BrowserUnavailable:
# old url is still used for certain connections bu we don't know which one is,
# so the same HistoryPage is attained by the old url in another URL object
data['startDate'] = (datetime.now() - relativedelta(years=3)).strftime('%d%m%Y')
# old url authorizes up to 3 years of history
self.history_old.go(data=data)
if coming:
return sorted_transactions(self.page.iter_coming())
else:
return sorted_transactions(self.page.iter_history())
@need_login
def iter_lifeinsurance_history(self, account, coming=False):
self.lifeinsurances_history.go(json={
"ibanCrypte": account.id,
})
for tr in self.page.iter_history(coming):
page = self.lifeinsurances_detail.go(
json={
"ibanCrypte": account.id,
"idMouvement": tr._op.get('idMouvement'),
"ordreMouvement": tr._op.get('ordreMouvement'),
"codeTypeMouvement": tr._op.get('codeTypeMouvement'),
}
)
tr.investments = list(page.iter_investments())
yield tr
@need_login
def iter_coming_operations(self, account):
return self.iter_history(account, coming=True)
@need_login
def iter_investment(self, account):
if account.type == Account.TYPE_PEA and 'espèces' in account.label.lower():
return [create_french_liquidity(account.balance)]
# Life insurances and PERP may be scraped from the API or from the "Assurance Vie" space,
# so we need to discriminate between both using account._details:
if account.type in (account.TYPE_LIFE_INSURANCE, account.TYPE_PERP, account.TYPE_CAPITALISATION):
if hasattr(account, '_details'):
# Going to the "Assurances Vie" page
natiovie_params = self.natio_vie_pro.go().get_params()
self.capitalisation_page.go(params=natiovie_params)
# Fetching the form to get the contract investments:
capitalisation_params = self.page.get_params(account)
self.capitalisation_page.go(params=capitalisation_params)
return self.page.iter_investments()
else:
# No capitalisation contract has yet been found in the API:
assert account.type != account.TYPE_CAPITALISATION
self.lifeinsurances.go(json={
"ibanCrypte": account.id,
})
return self.page.iter_investments()
elif account.type in (account.TYPE_MARKET, account.TYPE_PEA):
try:
self.market_list.go(json={}, method='POST')
except ServerError:
self.logger.warning("An Internal Server Error occurred")
return iter([])
for market_acc in self.page.get_list():
if account.number[-4:] == market_acc['securityAccountNumber'][-4:] and not account.iban:
# Sometimes generate an Internal Server Error ...
try:
self.market.go(json={
"securityAccountNumber": market_acc['securityAccountNumber'],
})
except ServerError:
self.logger.warning("An Internal Server Error occurred")
break
return self.page.iter_investments()
return iter([])
@need_login
def iter_recipients(self, origin_account_id):
try:
if (
not origin_account_id in self.transfer_init.go(json={
'modeBeneficiaire': '0'
}).get_ibans_dict('Debiteur')
):
raise NotImplementedError()
except TransferAssertionError:
return
# avoid recipient with same iban
seen = set()
for recipient in self.page.transferable_on(origin_account_ibancrypte=origin_account_id):
if recipient.iban not in seen:
seen.add(recipient.iban)
yield recipient
if self.page.can_transfer_to_recipients(origin_account_id):
for recipient in self.recipients.go(json={'type': 'TOUS'}).iter_recipients():
if recipient.iban not in seen:
seen.add(recipient.iban)
yield recipient
@need_login
def new_recipient(self, recipient, **params):
if 'code' in params:
# for sms authentication
return self.send_code(recipient, **params)
# prepare commun data for all authentication method
data = {}
data['adresseBeneficiaire'] = ''
data['iban'] = recipient.iban
data['libelleBeneficiaire'] = recipient.label
data['notification'] = True
data['typeBeneficiaire'] = ''
# provisional
if self.digital_key:
if 'digital_key' in params:
return self.new_recipient_digital_key(recipient, data)
# need to be on recipient page send sms or mobile notification
# needed to get the phone number, enabling the possibility to send sms.
# all users with validated phone number can receive sms code
self.recipients.go(json={'type': 'TOUS'})
# check type of recipient activation
type_activation = 'sms'
# provisional
if self.digital_key:
if self.page.has_digital_key():
# force users with digital key activated to use digital key authentication
type_activation = 'digital_key'
if type_activation == 'sms':
# post recipient data sending sms with same request
data['typeEnvoi'] = 'SMS'
recipient = self.add_recip.go(json=data).get_recipient(recipient)
self.rcpt_transfer_id = recipient._transfer_id
self.need_reload_state = True
raise AddRecipientStep(recipient, Value('code', label='Saisissez le code reçu par SMS.'))
elif type_activation == 'digital_key':
# recipient validated with digital key are immediatly available
recipient.enabled_date = datetime.today()
raise AddRecipientStep(
recipient,
ValueBool(
'digital_key',
label=
'Validez pour recevoir une demande sur votre application bancaire. La validation de votre bénéficiaire peut prendre plusieurs minutes.'
)
)
@need_login
def send_code(self, recipient, **params):
"""
add recipient with sms otp authentication
"""
data = {}
data['idBeneficiaire'] = self.rcpt_transfer_id
data['typeActivation'] = 1
data['codeActivation'] = params['code']
self.rcpt_transfer_id = None
return self.activate_recip_sms.go(json=data).get_recipient(recipient)
@need_login
def new_recipient_digital_key(self, recipient, data):
"""
add recipient with 'clé digitale' authentication
"""
# post recipient data, sending app notification with same request
data['typeEnvoi'] = 'AF'
self.add_recip.go(json=data)
recipient = self.page.get_recipient(recipient)
# prepare data for polling
assert recipient._id_transaction
polling_data = {}
polling_data['idBeneficiaire'] = recipient._transfer_id
polling_data['idTransaction'] = recipient._id_transaction
polling_data['typeActivation'] = 2
timeout = time.time() + 300.00 # float(second), like bnp website
# polling
while time.time() < timeout:
time.sleep(5) # like website
self.activate_recip_digital_key.go(json=polling_data)
if self.page.is_recipient_validated():
break
else:
raise AddRecipientTimeout()
return recipient
@need_login
def prepare_transfer(self, account, recipient, amount, reason, exec_date):
data = {}
data['devise'] = account.currency
data['motif'] = reason
data['dateExecution'] = exec_date.strftime('%d-%m-%Y')
data['compteDebiteur'] = account.id
data['montant'] = str(amount)
data['typeVirement'] = 'SEPA'
if recipient.category == u'Externe':
data['idBeneficiaire'] = recipient._transfer_id
else:
data['compteCrediteur'] = recipient.id
return data
@need_login
def init_transfer(self, account, recipient, amount, reason, exec_date):
if recipient._web_state == 'En attente':
raise TransferInvalidRecipient(message="Le bénéficiaire sélectionné n'est pas activé")
data = self.prepare_transfer(account, recipient, amount, reason, exec_date)
return self.validate_transfer.go(json=data).handle_response(account, recipient, amount, reason)
@need_login
def execute_transfer(self, transfer):
self.register_transfer.go(json={'referenceVirement': transfer.id})
return self.page.handle_response(transfer)
@need_login
def get_advisor(self):
self.advisor.stay_or_go()
if self.page.has_error():
return None
return self.page.get_advisor()
@need_login
def iter_threads(self):
raise NotImplementedError()
@need_login
def get_thread(self, thread):
raise NotImplementedError()
def _fetch_rib_document(self, subscription):
self.rib_page.go(
params={
'contractId': subscription.id,
'i18nSiteType': 'part', # site type value doesn't seem to matter as long as it's present
'i18nLang': 'fr',
'i18nVersion': 'V1',
},
)
if self.rib_page.is_here() and self.page.is_rib_available():
d = Document()
d.id = subscription.id + '_RIB'
d.url = self.page.url
d.type = DocumentTypes.RIB
d.format = 'pdf'
d.label = 'RIB'
return d
@need_login
def iter_documents(self, subscription):
rib = self._fetch_rib_document(subscription)
if rib:
yield rib
titulaires = self.titulaire.go().get_titulaires()
# Calling '/demat-wspl/rest/listerDocuments' before the request on 'document'
# is necessary when you specify an ikpi, otherwise no documents are returned
self.document.go()
docs = []
id_docs = []
iter_documents_functions = [self.page.iter_documents, self.page.iter_documents_pro]
for iter_documents in iter_documents_functions:
for doc in iter_documents(sub_id=subscription.id, sub_number=subscription._number, baseurl=self.BASEURL):
docs.append(doc)
id_docs.append(doc.id)
# documents are sorted by type then date, sort them directly by date
docs = sorted(docs, key=lambda doc: doc.date, reverse=True)
for doc in docs:
yield doc
# When we only have one titulaire, no need to use the ikpi parameter in the request,
# all document are provided with this simple request
data = {
'dateDebut': (datetime.now() - relativedelta(years=3)).strftime('%d/%m/%Y'),
'dateFin': datetime.now().strftime('%d/%m/%Y'),
}
len_titulaires = len(titulaires)
self.logger.info('The total number of titulaires on this connection is %s.', len_titulaires)
# Ikpi is necessary for multi titulaires accounts to get each document of each titulaires
if len_titulaires > 1:
data['ikpiPersonne'] = subscription._iduser
self.document_research.go(json=data)
for doc in self.page.iter_documents(
sub_id=subscription.id, sub_number=subscription._number, baseurl=self.BASEURL
):
if doc.id not in id_docs:
yield doc
@need_login
def iter_subscription(self):
acc_list = self.iter_accounts()
for acc in acc_list:
sub = Subscription()
sub.label = acc.label
sub.subscriber = acc._subscriber
sub.id = acc.id
# number is the hidden number of an account like "****1234"
# and it's used in the parsing of the docs in iter_documents
sub._number = acc.number
# iduser is the ikpi affiliate to the account,
# usefull for multi titulaires connexions
sub._iduser = acc._iduser
yield sub
class BNPPartPro(BNPParibasBrowser):
BASEURL_TEMPLATE = r'https://%s.bnpparibas/'
BASEURL = BASEURL_TEMPLATE % 'mabanque'
def __init__(self, config=None, *args, **kwargs):
self.config = config
super(BNPPartPro, self).__init__(self.config, *args, **kwargs)
def switch(self, subdomain):
self.BASEURL = self.BASEURL_TEMPLATE % subdomain
class HelloBank(BNPParibasBrowser):
BASEURL = 'https://www.hellobank.fr/'
|
laurentb/weboob
|
modules/bnporc/pp/browser.py
|
Python
|
lgpl-3.0
| 27,890
|
"""Funções de conversão usada pelo scraper do Itaú."""
import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from decimal import Decimal
def date(s):
"""Converte strings 'DD/MM' em datetime.date.
Leva em consideração ano anterior para meses maiores que o mês corrente.
"""
dt = parse(s, dayfirst=True)
# Se o mês do lançamento > mês atual, o lançamento é do ano passado.
if dt.month > datetime.date.today().month:
dt += relativedelta(years=-1)
dt = dt.date()
return dt
def decimal(s):
"""Converte strings para Decimal('-9876.54').
>>> assert decimal('9.876,54-') == Decimal('-9876.54')
>>> assert decimal('9.876,54 D') == Decimal('-9876.54')
>>> assert decimal('9.876,54 C') == Decimal('9876.54')
>>> assert decimal('R$ 9.876,54') == Decimal('9876.54')
>>> assert decimal('R$ -9.876,54') == Decimal('-9876.54')
"""
s = s.replace('.', '')
s = s.replace(',', '.')
if s.startswith('R$ '):
s = s[3:]
if s.endswith('-'):
s = s[-1] + s[:-1]
elif s.endswith(' D'):
s = '-' + s[:-2]
elif s.endswith(' C'):
s = s[:-2]
return Decimal(s)
def is_balance(s):
"""Retorna True quando s é uma entrada de saldo em vez de lançamento."""
return s in ('S A L D O',
'(-) SALDO A LIBERAR',
'SALDO FINAL DISPONIVEL',
'SALDO ANTERIOR')
def statements(iterable):
"""Converte dados do extrato de texto para tipos Python.
Linhas de saldo são ignoradas.
Entrada: (('21/07', 'Lançamento', '9.876,54-'), ...)
Saída..: ((datetime.date(2017, 7, 21), 'Lançamento', Decimal('-9876.54')), ...)
"""
return ((date(a), b, decimal(c)) for a, b, c in iterable if not is_balance(b))
def card_statements(iterable):
"""Converte dados do extrato do cartão de texto para tipos Python.
Entrada: (('21/07', 'Lançamento', '9.876,54 D'), ...)
Saída..: ((datetime.date(2017, 7, 21), 'Lançamento', Decimal('-9876.54')), ...)
"""
return ((date(a), b, decimal(c)) for a, b, c in iterable)
def card_summary(iterable):
"""Converte dados do resumo do cartão de texto para tipos Python.
Entrada: (('Item do Resumo', 'R$ -9.876,54'), ...)
Saída..: (('Item do Resumo', Decimal('-9876.54')), ...)
"""
return ((a, decimal(b)) for a, b in iterable)
|
henriquebastos/itauscraper
|
itauscraper/converter.py
|
Python
|
lgpl-3.0
| 2,425
|
from pycp2k.inputsection import InputSection
from ._each421 import _each421
class _cartesian_eigs1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.Backup_copies = None
self.EACH = _each421()
self._name = "CARTESIAN_EIGS"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Backup_copies': 'BACKUP_COPIES'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
SINGROUP/pycp2k
|
pycp2k/classes/_cartesian_eigs1.py
|
Python
|
lgpl-3.0
| 746
|
from distutils.core import setup
setup(
name='curie',
version='0.1.1',
author='Eric Viara',
author_email='eric.viara@curie.fr',
packages=['curie'],
url='http://pypi.python.org/pypi/TowelStuff/',
license='LICENSE.txt',
description='NaviCell Python Binding',
long_description=open('README.txt').read()
)
|
sysbio-curie/NaviCell
|
bindings/python/setup.py
|
Python
|
lgpl-3.0
| 340
|
import random
from gevent.pool import Group
from base import TestCase, declares_queues
from nucleon.amqp import Connection
from nucleon.amqp.spec import FrameQueueDeclareOk
qname = 'test%s' % (random.random(),)
queues = [qname + '.%s' % (i,) for i in xrange(100)]
class TestLimits(TestCase):
@declares_queues(*queues)
def test_parallel_queue_declare(self):
conn = Connection(self.amqp_url)
conn.connect()
channel = conn.allocate_channel()
def declare(name):
return channel.queue_declare(queue=name)
g = Group()
res = g.map(declare, queues)
assert len(res) == len(queues)
assert all(isinstance(r, FrameQueueDeclareOk) for r in res)
|
seatme/nucleon.amqp
|
tests/test_limits.py
|
Python
|
lgpl-3.0
| 726
|
from functools import wraps
import logging
import time
try:
from collections import OrderedDict
except ImportError:
try:
# ordereddict available on pypi for Python < 2.7
from ordereddict import OrderedDict
except ImportError:
# Otherwise fall back on normal dict
OrderedDict = dict
def cached_property(func):
""" Wraps a method on a class to make it a property and caches the result the first time it is evaluated
"""
attr_name = '_cached_prop_' + func.__name__
@property
@wraps(func)
def get(self):
try:
return getattr(self, attr_name)
except AttributeError:
value = func(self)
setattr(self, attr_name, value)
return value
return get
class Timer(object):
""" Context manager for logging the time taken for an operation
"""
def __init__(self, log, description):
self._enabled = log.isEnabledFor(logging.INFO)
self._log = log
self._description = description
self._start_time = None
def __enter__(self):
if not self._enabled:
return self
try:
self._start_time = time.perf_counter()
except AttributeError:
# Python < 3.3
self._start_time = time.clock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._enabled:
return
try:
end_time = time.perf_counter()
except AttributeError:
# Python < 3.3
end_time = time.clock()
elapsed_time = (end_time - self._start_time) * 1.0e3
self._log.info("{0}: Took {1} ms".format(self._description, elapsed_time))
|
adamreeve/npTDMS
|
nptdms/utils.py
|
Python
|
lgpl-3.0
| 1,733
|
import RPi.GPIO as GPIO
import datetime
import time
import pandas as pd
import logging
import logging.handlers
import sys
logger = logging.getLogger('fridge')
handler = logging.StreamHandler()
fHandler = logging.FileHandler('fridge.log')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
fHandler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(fHandler)
logger.setLevel(logging.DEBUG)
logging.captureWarnings(True)
dataLog = logging.getLogger('fridge.data')
dataFormatter = logging.Formatter("%(asctime)s, %(message)s", "%Y-%m-%d %H:%M:%S")
dataFileName = 'fridge-' + str(datetime.datetime.now()) + '.data'
dataHandler = logging.handlers.RotatingFileHandler(dataFileName, mode='w', maxBytes=10000, backupCount=2)
dataHandler.setFormatter(dataFormatter)
dataLog.addHandler(dataHandler)
dataLog.setLevel(logging.INFO)
class Fridge:
def __init__(self, heaterGpio, coolerGpio, ambientTempSensorRomCode):
self.initGpio(heaterGpio, coolerGpio)
self.heater = TemperatureElement(heaterGpio, name='heater')
self.cooler = TemperatureElement(coolerGpio, name='cooler')
self.ambientTempSensor = DS18B20(ambientTempSensorRomCode, name='TempSens')
self.resultPeriod = datetime.timedelta(minutes=10)
self.maxResults = 1000
self.lastResultTime = None
self.resultTime = datetime.datetime.now()
self.resultsFile = 'results.txt'
fo = open(self.resultsFile, 'w')
fo.close()
def initGpio(self, heaterGpioPin, coolerGpioPin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(heaterGpioPin, GPIO.OUT)
GPIO.setup(coolerGpioPin, GPIO.OUT)
def updateResultsLog(self, dataFile):
if datetime.datetime.now() >= self.resultTime:
now = datetime.datetime.now()
names = ['date', 'set', 'meas', 'heater', 'cooler']
d = pd.read_csv(dataFile, names=names)
d['date'] = pd.to_datetime(d['date'])
d['error'] = d.meas - d.set
d['absError'] = d['error'].abs()
if self.lastResultTime == None:
dt = d
else:
start = self.lastResultTime
end = self.resultTime
mask = (d['date'] > start) & (d['date'] <= end)
dt = d.loc[mask]
mean = dt.meas.mean()
maxErr = dt.error.max()
minErr = dt.error.min()
meanErr = dt.error.mean()
meanAbsErr = dt.absError.mean()
set = d['set'].iloc[-1]
names = ['date', 'set', 'mean', 'maxErr', 'minErr', 'meanErr', 'meanAbsErr']
d_r = pd.read_csv(self.resultsFile, names=names)
try:
fi = open(self.resultsFile, 'r')
resBefore = fi.read()
resBefore = resBefore.split('\n')
fi.close()
except:
whatever = 1000
fo = open(self.resultsFile, 'w')
fo.write('{:11s}'.format('Date'))
fo.write('{:9s}'.format('Time'))
fo.write('{:5s}'.format('set'))
fo.write('{:5s}'.format('mean'))
fo.write('{:5s}'.format('maxE'))
fo.write('{:5s}'.format('minE'))
fo.write('{:6s}'.format('meanE'))
fo.write('{:9s}'.format('meanAbsE') + '\n')
fo.write( self.resultTime.strftime('%Y-%m-%d %H:%M:%S') + ' ' + '{:4.1f}'.format(set) + ' ' + '{:4.1f}'.format(mean) + ' ' + '{:4.1f}'.format(maxErr) + ' ' + '{:4.1f}'.format(minErr) + ' ' + '{:5.1f}'.format(meanErr) + ' ' + '{:8.1f}'.format(meanAbsErr) + '\n' )
if len(resBefore) >= 2:
for i in xrange(1, len(resBefore)-1, 1):
fo.write(resBefore[i] + '\n')
if i > self.maxResults:
break
fo.close()
self.lastResultTime = self.resultTime
self.resultTime = now + self.resultPeriod
class TemperatureElement:
def __init__(self, bcmGpioNum, name='Name'):
self.name = name
self.gpioPin = bcmGpioNum
self.on = None
self.lastOnTime = None
self.minOnTime = datetime.timedelta(minutes=1)
self.minOffTime = datetime.timedelta(minutes=3)
try:
GPIO.output(self.gpioPin, False)
self.lastOffTime = datetime.datetime.now()
except:
logger.error('Failed to switch off in temp el init')
raise
def isOn(self):
if(GPIO.input(self.gpioPin)):
return True
else:
return False
def status(self):
if(GPIO.input(self.gpioPin)):
try:
onFor = str(datetime.datetime.now()-self.lastOnTime).split('.')[0]
except:
onFor = 'No Last On Time'
logger.debug(self.name + " been ON for " + onFor)
return self.name + " ON for " + onFor
else:
try:
offFor = str(datetime.datetime.now()-self.lastOffTime).split('.')[0]
except:
offFor = 'No Last Off Time'
logger.debug(self.name +" been OFF for " + offFor)
return self.name +" OFF for " + offFor
def turnOff(self):
now = datetime.datetime.now()
switchOff = False
#if not been on/off yet then can switch off
if self.on == None:
switchOff = True
#if not been on yet, and not currently off then can switch off
elif self.lastOnTime == None and self.on != False:
switchOff = True
#if on, and have been on for at least minOnTime then can switch off
elif self.on == True:
if (now - self.lastOnTime) > self.minOnTime:
switchOff = True
else:
logger.debug(self.name + ' Unable to switch off. Min On Time not met' )
elif self.on == False:
switchOff = False # Already off
else:
logger.debug(self.name + ' Unable to switch off. Valid condition not found.' )
#Switch on if have decided to
if switchOff == True:
try:
GPIO.output(self.gpioPin, False)
self.lastOffTime = now
self.on = False
logger.debug(self.name + ' Switched Off Return 1' )
return 1
except:
logger.debug(self.name + ' Exception Return -1' )
raise
return -1
else:
logger.debug(self.name + ' No Change Return 0.' )
return 0
def turnOn(self):
now = datetime.datetime.now()
switchOn = False
#if not been on/off yet then can switch on
if self.on == None:
switchOn = True
#if not been off yet, and not currently on then can switch on
elif self.lastOffTime == None and self.on != True:
switchOn = True
#if off, and have been off for at least minOffTime then can switch on
elif self.on == False:
if (now - self.lastOffTime) > self.minOffTime:
switchOn = True
else:
logger.debug(self.name + ' Unable to switch on. Min Off Time not met' )
elif self.on == True:
switchOn = False # Already off
else:
logger.debug(self.name + ' Unable to switch on. Valid condition not found.' )
#Switch on if have decided to
if switchOn == True:
try:
GPIO.output(self.gpioPin, True)
self.lastOnTime = now
self.on = True
logger.debug(self.name + ' Switched On Return 1' )
return 1
except:
logger.debug(self.name + ' Exception Return -1' )
raise
return -1
else:
logger.debug(self.name + ' No Change Return 0' )
return 0
class DS18B20:
def __init__(self, romCode, name='Name'):
self.name = name
self.romCode = romCode
def getTemp(self):
tempFile = open('/sys/bus/w1/devices/' + self.romCode + '/w1_slave')
tempText = tempFile.read()
tempFile.close()
tempData = tempText.split("\n")[1].split(" ")[9]
temp = float(tempData[2:]) / 1000
logger.debug(self.name + ' ' + str(temp))
return temp
heaterGpio = 6
coolerGpio = 5
tempSensRomCode='28-0316027c72ff'
fridge = Fridge(heaterGpio, coolerGpio, tempSensRomCode)
fridge.heater.minOffTime=datetime.timedelta(seconds=1)
fridge.heater.minOnTime=datetime.timedelta(seconds=1)
fridge.cooler.minOffTime=datetime.timedelta(minutes=3)
fridge.cooler.minOnTime=datetime.timedelta(minutes=1)
fridge.ambientTempSensor.getTemp()
samplePeriod = datetime.timedelta(seconds=10)
setTemp = 21
heaterOnHyst = 0.2 #Amount below set temp that heater is asked to switch on at
heaterOffHyst = 0.1 #Amount below set temp that heater is asked to switch off at
coolerOnHyst = 1.5 #Amount above set temp that cooler is asked to switch on at
coolerOffHyst = 1 #Amount above set temp that cooler is asked to switch off at
i=0
while True:
try:
i=i+1
loopStartTime = datetime.datetime.now()
temp = fridge.ambientTempSensor.getTemp()
logger.debug('i=' + str(i) + ' Error=' + str(temp-setTemp) + ' Temp=' + str(temp) + ' Set temp=' + str(setTemp))
temp = fridge.ambientTempSensor.getTemp()
fridge.heater.status()
fridge.cooler.status()
#Heater decision
#If heater not on and temp is below set - heaterOnHyst then try to switch on
if not fridge.heater.isOn():
if temp < (setTemp - heaterOnHyst):
fridge.heater.turnOn()
#If heater is on and temp above setTemp - heaetr OffHyst then try to switch off
if fridge.heater.isOn():
if temp > (setTemp - heaterOffHyst):
fridge.heater.turnOff()
#Cooler decision
#If cooler not on and temp is above set + coolerOnHyst then try to switch cooler on
if not fridge.cooler.isOn():
if temp > (setTemp + coolerOnHyst):
fridge.cooler.turnOn()
#If cooler is on and temp below setTemp + coolerOffHyst then try to switch off
if fridge.cooler.isOn():
if temp < (setTemp + coolerOffHyst):
fridge.cooler.turnOff()
dataLog.info('{}'.format(setTemp) + ', ' + '{}'.format(temp) + ', ' + str(fridge.heater.isOn()) + ', ' + '{}'.format(fridge.cooler.isOn()) )
fridge.updateResultsLog(dataFileName)
while datetime.datetime.now() < (loopStartTime + samplePeriod):
doNothing = 1
except KeyboardInterrupt:
logger.info('Ctrl-c Exit.')
fridge.heater.turnOff()
fridge.cooler.turnOff()
sys.exit()
|
iainr/fridgid
|
Fridge.py
|
Python
|
lgpl-3.0
| 9,372
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012-2014 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.capabilities.bank import CapBankWealth, AccountNotFound, Account
from weboob.capabilities.base import find_object
from weboob.capabilities.profile import CapProfile
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import ValueBackendPassword, Value
from .bred import BredBrowser
from .dispobank import DispoBankBrowser
__all__ = ['BredModule']
class BredModule(Module, CapBankWealth, CapProfile):
NAME = 'bred'
MAINTAINER = u'Romain Bignon'
EMAIL = 'romain@weboob.org'
VERSION = '2.1'
DESCRIPTION = u'Bred'
LICENSE = 'LGPLv3+'
CONFIG = BackendConfig(
ValueBackendPassword('login', label='Identifiant', masked=False),
ValueBackendPassword('password', label='Mot de passe'),
Value('website', label="Site d'accès", default='bred',
choices={'bred': 'BRED', 'dispobank': 'DispoBank'}),
Value('accnum', label='Numéro du compte bancaire (optionnel)', default='', masked=False),
)
BROWSERS = {
'bred': BredBrowser,
'dispobank': DispoBankBrowser,
}
def create_default_browser(self):
self.BROWSER = self.BROWSERS[self.config['website'].get()]
return self.create_browser(
self.config['accnum'].get().replace(' ', '').zfill(11),
self.config['login'].get(),
self.config['password'].get(),
weboob=self.weboob,
)
def iter_accounts(self):
return self.browser.get_accounts_list()
def get_account(self, _id):
return find_object(self.browser.get_accounts_list(), id=_id, error=AccountNotFound)
def iter_history(self, account):
return self.browser.get_history(account)
def iter_coming(self, account):
return self.browser.get_history(account, coming=True)
def iter_investment(self, account):
return self.browser.get_investment(account)
def get_profile(self):
return self.browser.get_profile()
def fill_account(self, account, fields):
if self.config['website'].get() != 'bred':
return
self.browser.fill_account(account, fields)
OBJECTS = {
Account: fill_account,
}
|
laurentb/weboob
|
modules/bred/module.py
|
Python
|
lgpl-3.0
| 3,023
|
from pycp2k.inputsection import InputSection
from ._dielectric_cube1 import _dielectric_cube1
from ._dirichlet_bc_cube1 import _dirichlet_bc_cube1
from ._dirichlet_cstr_charge_cube1 import _dirichlet_cstr_charge_cube1
class _implicit_psolver1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.DIELECTRIC_CUBE = _dielectric_cube1()
self.DIRICHLET_BC_CUBE = _dirichlet_bc_cube1()
self.DIRICHLET_CSTR_CHARGE_CUBE = _dirichlet_cstr_charge_cube1()
self._name = "IMPLICIT_PSOLVER"
self._subsections = {'DIRICHLET_BC_CUBE': 'DIRICHLET_BC_CUBE', 'DIRICHLET_CSTR_CHARGE_CUBE': 'DIRICHLET_CSTR_CHARGE_CUBE', 'DIELECTRIC_CUBE': 'DIELECTRIC_CUBE'}
|
SINGROUP/pycp2k
|
pycp2k/classes/_implicit_psolver1.py
|
Python
|
lgpl-3.0
| 709
|
from distutils.core import setup
setup(
name='jerboa',
packages=['jerboa'], # this must be the same as the name above
version='0.2.1-alpha',
description='',
author='Matt Badger',
author_email='foss@lighthouseuk.net',
url='https://github.com/LighthouseUK/jerboa', # use the URL to the github repo
download_url='https://github.com/LighthouseUK/jerboa/tarball/0.2.1-alpha', # I'll explain this in a second
keywords=['gae', 'lighthouse', 'jerboa', 'webapp2'], # arbitrary keywords
classifiers=[],
requires=['webapp2', 'blinker', 'wtforms', 'jinja2', 'pytz', 'babel', 'pycrypto'],
# tests_require=['WebTest']
)
|
LighthouseUK/jerboa
|
setup.py
|
Python
|
lgpl-3.0
| 659
|
# This file is part of saddle-bags.
#
# saddle-bags is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# saddle-bags is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with saddle-bags. If not, see <http://www.gnu.org/licenses/>.
from saddlebags.HlaSequence import HlaSequence
import logging
class SubmissionBatch():
def __init__(self, includeInitialSubmission):
if(includeInitialSubmission):
# Starting with a single empty submission in the batch.
self.submissionBatch = [AlleleSubmission()]
else:
# Starting with an empty batch
self.submissionBatch = []
self.enaUserName = None
self.enaPassword = None
self.ipdSubmitterId = None
self.ipdSubmitterName = None
self.ipdAltContact = None
self.ipdSubmitterEmail = None
self.labOfOrigin = None
self.labContact = None
self.studyAccession = None
self.chooseStudy = "2" # 2 = new study. 1 = existing study, use the studyaccession number. Study=Project
self.studyId = None
self.studyShortTitle = None
self.studyAbstract = None
class AlleleSubmission():
def __init__(self):
self.submittedAllele=HlaSequence()
self.localAlleleName = None
self.closestAlleleWrittenDescription = None
self.ipdSubmissionIdentifier = None
self.ipdSubmissionVersion = None
self.enaAccessionIdentifier = None
# TODO: i think this column is intended for use identifying cell line names, if we are submitting the HLA types of cell lines. I'm just using it as a sample ID or cellnum. Lets see where that breaks.
self.cellId = None
self.ethnicOrigin = None
self.sex = None
self.consanguineous = None
self.homozygous = None
# Necessary = A,B, DRB1. The rest are extra, and they help James trust the submitted sequence.
# I store the typed alleles as a dictionary. Key is the Locus (HLA-A) and the value is a String with the alleles, separated by a comma (02:01,03:01:14)
self.typedAlleles = {}
self.materialAvailability = None
self.cellBank = None
self.primarySequencingMethodology = None
self.secondarySequencingMethodology = None
self.primerType = None
self.primers = None
self.sequencedInIsolation = None
self.sequencingDirection = None
self.numOfReactions = None
self.methodComments = None
self.citations = None
self.enaSubmissionText = None
self.ipdSubmissionText = None
self.isPseudoGene = False # A null allele uses pseudogene if length of the coding sequence is not a multiple of 3.
|
transplantation-immunology/EMBL-HLA-Submission
|
saddlebags/AlleleSubmission.py
|
Python
|
lgpl-3.0
| 3,176
|
# -*- coding: utf-8 -*-
import webapp2
from boilerplate import models
from boilerplate import forms
from boilerplate.handlers import BaseHandler
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import ndb
from google.appengine.api import users as googleusers
from collections import OrderedDict, Counter
from wtforms import fields
class Logout(BaseHandler):
def get(self):
self.redirect(googleusers.create_logout_url(dest_url=self.uri_for('home')))
class Geochart(BaseHandler):
def get(self):
users = models.User.query().fetch(projection=['country'])
users_by_country = Counter()
for user in users:
if user.country:
users_by_country[user.country] += 1
params = {
"data": users_by_country.items()
}
return self.render_template('admin/geochart.html', **params)
class EditProfileForm(forms.EditProfileForm):
activated = fields.BooleanField('Activated')
class List(BaseHandler):
def get(self):
p = self.request.get('p')
q = self.request.get('q')
c = self.request.get('c')
forward = True if p not in ['prev'] else False
cursor = Cursor(urlsafe=c)
if q:
qry = models.User.query(ndb.OR(models.User.last_name == q,
models.User.email == q,
models.User.username == q))
else:
qry = models.User.query()
PAGE_SIZE = 5
if forward:
users, next_cursor, more = qry.order(models.User.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
if next_cursor and more:
self.view.next_cursor = next_cursor
if c:
self.view.prev_cursor = cursor.reversed()
else:
users, next_cursor, more = qry.order(-models.User.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
users = list(reversed(users))
if next_cursor and more:
self.view.prev_cursor = next_cursor
self.view.next_cursor = cursor.reversed()
def pager_url(p, cursor):
params = OrderedDict()
if q:
params['q'] = q
if p in ['prev']:
params['p'] = p
if cursor:
params['c'] = cursor.urlsafe()
return self.uri_for('user-list', **params)
self.view.pager_url = pager_url
self.view.q = q
params = {
"list_columns": [('username', 'Username'),
('last_name', 'Last Name'),
('email', 'E-Mail'),
('country', 'Country')],
"users" : users,
"count" : qry.count()
}
# FIXME: admin_user should probably go into BaseHandler
params['admin_user'] = googleusers.is_current_user_admin()
return self.render_template('admin/users.html', **params)
class Edit(BaseHandler):
def get_or_404(self, user_id):
try:
user = models.User.get_by_id(long(user_id))
if user:
return user
except ValueError:
pass
self.abort(404)
def edit(self, user_id):
if self.request.POST:
user = self.get_or_404(user_id)
if self.form.validate():
self.form.populate_obj(user)
user.put()
self.add_message("Changes saved!", 'success')
return self.redirect_to("user-edit", user_id=user_id)
else:
self.add_message("Could not save changes!", 'error')
else:
user = self.get_or_404(user_id)
self.form.process(obj=user)
params = {
'user' : user
}
return self.render_template('admin/edituser.html', **params)
@webapp2.cached_property
def form(self):
return EditProfileForm(self)
|
nortd/bomfu
|
admin/users.py
|
Python
|
lgpl-3.0
| 4,033
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Cameron White <cawhite@pdx.edu> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2016 humbug <bah> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import json
class GithubException(Exception):
"""
Error handling in PyGithub is done with exceptions. This class is the base of all exceptions raised by PyGithub (but :class:`github.GithubException.BadAttributeException`).
Some other types of exceptions might be raised by underlying libraries, for example for network-related issues.
"""
def __init__(self, status, data):
super().__init__()
self.__status = status
self.__data = data
self.args = [status, data]
@property
def status(self):
"""
The status returned by the Github API
"""
return self.__status
@property
def data(self):
"""
The (decoded) data returned by the Github API
"""
return self.__data
def __str__(self):
return "{status} {data}".format(status=self.status, data=json.dumps(self.data))
class BadCredentialsException(GithubException):
"""
Exception raised in case of bad credentials (when Github API replies with a 401 or 403 HTML status)
"""
class UnknownObjectException(GithubException):
"""
Exception raised when a non-existing object is requested (when Github API replies with a 404 HTML status)
"""
class BadUserAgentException(GithubException):
"""
Exception raised when request is sent with a bad user agent header (when Github API replies with a 403 bad user agent HTML status)
"""
class RateLimitExceededException(GithubException):
"""
Exception raised when the rate limit is exceeded (when Github API replies with a 403 rate limit exceeded HTML status)
"""
class BadAttributeException(Exception):
"""
Exception raised when Github returns an attribute with the wrong type.
"""
def __init__(self, actualValue, expectedType, transformationException):
self.__actualValue = actualValue
self.__expectedType = expectedType
self.__transformationException = transformationException
@property
def actual_value(self):
"""
The value returned by Github
"""
return self.__actualValue
@property
def expected_type(self):
"""
The type PyGithub expected
"""
return self.__expectedType
@property
def transformation_exception(self):
"""
The exception raised when PyGithub tried to parse the value
"""
return self.__transformationException
class TwoFactorException(GithubException):
"""
Exception raised when Github requires a onetime password for two-factor authentication
"""
class IncompletableObject(GithubException):
"""
Exception raised when we can not request an object from Github because the data returned did not include a URL
"""
class MockException(Exception):
"""
Exception raised when there is a missing or invalid data in the mock values
"""
|
ahmad88me/PyGithub
|
github/GithubException.py
|
Python
|
lgpl-3.0
| 5,277
|
# -*- coding: utf-8 -*-
"""
To be used in conjunction with:
NR099910-004-10006 - Repeated lowering, OrcaFlex Gumbel Script
NR099910-004-10001 - Lifting Analysis Methodology - Probabilistic Approach
===============================================================================================
Version 13
Corrected bug in WriteResults():
Exception: Excel worksheet name 'pennant line Max Effective Tension 3.50m' must be <= 31 chars.
14.03.2016
===============================================================================================
Version 12
Small change: confidence_Level passed as an argument to plotProbability().
rarossi, 29.02.2016
===============================================================================================
Version 11
Performance optimisation in gumbelFit() and summaryDataFrame().
Minor change required in createResultsPanel().
Basically using pandas.stuff by individual indexing is slow.
It is better to move things around in bunches and assigning by slicing.
Test case: > 4x faster
%timeit runfile('GumbelFit.py')
1 loops, best of 3: 20.2 s per loop
%timeit runfile('GumbelFit_opt.py')
1 loops, best of 3: 4.47 s per loop
by rarossi, 05.01.2016
===============================================================================================
Version 10
Changes:
- Both moment estimators and MLE distribution parameters are used, and results for both presented
in Excel files and plot.
- Revised the Gumbel fit plot. Plots can be found in a separate subfolder. The different confidence
levels analysed are shown in the plot, together with estimates
- For minimum samples containing zero values a warning is now given, and a Gumbel fitting is not
performed. Instead, the sample empirical value for the considered confidence level is reported.
- Results files updated.
by rlohne, 12.11.2015
===============================================================================================
Version 9
Changes:
- Major change in format of output spreadsheets:
Statistical results: name identifier replaced by 3 columns: Hs, Tp and WaveDir
Summary of predicted min max: idem as above. Also added one column at the start with the
confidence level and merged all confidence levels tabs into the same sheet. This is to have all
results in the same page. This file also saved as a text file for convenience
- Roll back to allowing white spaces in names, since this is unavoidable due to Orcaflex loads
names, e.g, 'Bend Moment'. The error was caused due to empty column in the end of results file
resultant from a small bug in postCalcActions.py. postCalcActions.py corrected.
- Removal ambiguous flag UseMLE. Only UseMomentEstimators is kept. If set to False then MLE is used
- Add support for Abs variable, fitting then like Max variables.
- Fix identation keeping 4-spaces throughout the code and max line width of 100 characters.
by rarossi, 25.08.2015
===============================================================================================
Version 8
Changes from previous version:
Some cleanup in code
Restructured plotting
Changed the way result file is read. Previous version required that the Result txt-file be opened
and saved using Excel, as an error occured if not. Now this is fixed, but requires that object
names do not have any spaces in them. Use underscores etc.
Some small changes to make it Python 3 compatible. It has also been tested and found
working on Python 2.7.
===============================================================================================
@author: rlohne
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats as ss
import os
# from time import time
def readResultFile(InputFile):
"""Read input file, and return data frame with results, together with number of columns"""
df = pd.read_table(InputFile)
sample = pd.DataFrame(df)
numRowsSample = len(sample)
numColsSample = len(sample.columns)
return sample, numRowsSample, numColsSample
def createResultsPanel(numRows, numCols, seedNo, confidence_Level, sample):
""""Create empty data panel for results (matrix)"""
ind = ['none']*(10+3*len(confidence_Level))
ind[0:9] = 'Hs', 'Tp', 'WaveDir', 'StDev', 'Mean', 'Max', 'Min', 'beta ME', 'mu ME', 'beta MLE'
ind[10] = 'mu MLE'
count = 0
for i in range(len(confidence_Level)):
count = i + 11
a = str(confidence_Level[i])
ind[count] = 'g ME (' + a + ')'
for i in range(len(confidence_Level)):
count = i + 11 + len(confidence_Level)
a = str(confidence_Level[i])
ind[count] = 'g MLE(' + a + ')'
for i in range(len(confidence_Level)):
count = i + 11 + 2*len(confidence_Level)
a = str(confidence_Level[i])
ind[count] = 'sample (' + a + ')'
seaStates = numRows/seedNo
colnames = [_ for _ in range(int(seaStates))]
# Create a panel that holds all data frames
name = ['none']*(len(sample.columns)-3)
for i in range(len(sample.columns)-3):
name[i] = sample.columns[i+3]
results = pd.Panel(items=name, major_axis=colnames, minor_axis=ind, dtype='O')
#
# Sketch to start thinking about converting this Panel into a MultiIndex'd DataFrame
# First try to make the result as similar as possible to the Panel.
# Alternativelly, the index could be replaced by the sea state tuple (Hs, Tp, WaveDir), but
# doing so would mean a lot more work here...
#
mindex = pd.MultiIndex.from_product([name, ind], names=['loads', 'params'])
res_df = pd.DataFrame(np.zeros(shape=(len(colnames), len(mindex))),
index=colnames, columns=mindex)
#
# Convertions:
#
# Using Panel Using DataFrame
# results.major_axis == res_df.index
# mindex = res_df.columns
# results.minor_axis == mindex.levels[1] # !!!sorting order not kept!!! not used
# results.items == mindex.levels[0]
# results.iloc[row, column, :] == res_df.iloc[column][mindex.levels[0][row]]
# results.iloc[row, column]['Hs'] == res_df.iloc[column][mindex.levels[0][row]]['Hs']
return res_df # , colnames
def gumbelFit(confidence_Level, sample, results, seedNo, colnames):
"""Fill in results, Calculate statistics"""
evalv = ['none']*(seedNo)
# Define Euler constant used for Gumbel statistics
gamma = 0.5772 # Euler constant
noCL = len(confidence_Level)
for row in range(len(sample.columns)-3):
c = 0
tmp_data = [0]*(11+3*noCL) # need to update this 14 here !!!
for column in range(len(results.index)):
evalv = sample.iloc[c:c+seedNo, row+3].tolist()
sortEvalv = sorted(evalv)
c = (column+1)*seedNo
tmp_data[0] = sample.iloc[column*seedNo, 0] # Hs
tmp_data[1] = sample.iloc[column*seedNo, 1] # Tp
tmp_data[2] = sample.iloc[column*seedNo, 2] # WaveDir
tmp_data[3] = np.std(evalv)
tmp_data[4] = np.average(evalv)
tmp_data[5] = np.max(evalv)
tmp_data[6] = np.min(evalv)
# Check if column name contains 'Min'.
# If true, sample is assumed to be minima, and left skewed distribution is used
if 'Min' in sample.columns[row+3]:
muMLE, betaMLE = ss.gumbel_l.fit(evalv)
betaMoment = tmp_data[3]*(np.sqrt(6))/np.pi
muMoment = tmp_data[4]+gamma*betaMoment
tmp_data[7] = betaMoment # beta ME
tmp_data[8] = muMoment # mu ME
tmp_data[9] = betaMLE # beta MLE
tmp_data[10] = muMLE # mu MLE
count = 0
for i in range(len(confidence_Level)):
count = i + 11
if 0 not in evalv:
tmp_data[count] = ss.gumbel_l.ppf((1-confidence_Level[i]),
muMoment, betaMoment)
tmp_data[count+noCL] = ss.gumbel_l.ppf((1-confidence_Level[i]),
muMLE, betaMLE)
else:
tmp_data[count] = 'use sample value'
tmp_data[count+noCL] = 'use sample value'
sampleIndex = seedNo-(confidence_Level[i])*seedNo
enoughSeeds = seedNo >= round(1/(1-confidence_Level[i]), 4)
if enoughSeeds:
tmp_data[count+2*noCL] = sortEvalv[int(sampleIndex)-1]
else:
tmp_data[count+2*noCL] = 'need to run more seeds for this confidence level'
elif 'Max' in sample.columns[row+3] or 'Abs' in sample.columns[row+3]:
# Else, sample is maxima or max absolute, right skewed distribution is to be used.
muMLE, betaMLE = ss.gumbel_r.fit(evalv)
betaMoment = tmp_data[3]*(np.sqrt(6))/np.pi
muMoment = tmp_data[4]-gamma*betaMoment
tmp_data[7] = betaMoment # beta ME
tmp_data[8] = muMoment # mu ME
tmp_data[9] = betaMLE # beta MLE
tmp_data[10] = muMLE # mu MLE
count = 0
for i in range(len(confidence_Level)):
count = i + 11
if 0 not in evalv:
tmp_data[count] = ss.gumbel_r.ppf((confidence_Level[i]),
muMoment, betaMoment)
tmp_data[count+noCL] = ss.gumbel_r.ppf((confidence_Level[i]),
muMLE, betaMLE)
else:
tmp_data[count] = 'use sample value'
tmp_data[count+noCL] = 'use sample value'
sampleIndex = confidence_Level[i]*seedNo
enoughSeeds = seedNo >= round(1/(1-confidence_Level[i]), 4)
if enoughSeeds:
tmp_data[count+2*noCL] = sortEvalv[int(sampleIndex)-1]
else:
tmp_data[count+2*noCL] = 'need to run more seeds for this confidence level'
else:
tmp_data[7] = 'Error! Name must contain Max, Min or Abs.'
# finally feed tmp_data into the results dataframe
# this is done for performance, since item assignment by index in pandas
# panels is VERY slow...
results.iloc[column][results.columns.levels[0][row]] = tmp_data
return results
def plotProbability(results, sample, colnames, seedNo, confidence_Level,
Objectplot, PlotWd, PlotHs, PlotT):
""""Make diagnosis plots"""
if not os.path.isdir('Plots'):
os.mkdir('Plots')
evalv = ['none']*(seedNo)
loads_names = results.columns.levels[0]
for row in range(len(loads_names)):
c = 0
for column in range(len(results.items)):
evalv = sample.iloc[c:c+seedNo, row+3].tolist()
sortEvalv = sorted(evalv)
c = (column+1)*seedNo
if (loads_names[row] in Objectplot and sample.iloc[c-seedNo, 2] in PlotWd and
sample.iloc[c-seedNo, 0] in PlotHs and sample.iloc[c-seedNo, 1] in PlotT):
fig = plt.figure(num=None, figsize=(12, 12), dpi=240, facecolor='w', edgecolor='k')
savepng = True
if savepng: figpng = plt.figure(num=None, figsize=(165/25.4, 90/25.4), dpi=96,
facecolor='w', edgecolor='k')
betaME = results.iloc[column][loads_names[row]]['beta ME']
muME = results.iloc[column][loads_names[row]]['mu ME']
betaMLE = results.iloc[column][loads_names[row]]['beta MLE']
muMLE = results.iloc[column][loads_names[row]]['mu MLE']
# First supblot is histogram of observations and pdf of the fitted distribution
ax = fig.add_subplot(211)
n, bins, patches = ax.hist(evalv, 10, histtype='bar',
normed="1", cumulative=False)
plt.setp(patches, 'facecolor', 'g', 'alpha', 0.5)
name = sample.columns[row+3]
ax.set_xlabel(name)
a = min(evalv)-0.05*min(evalv)
b = max(evalv)+0.05*min(evalv)
pdfsample = np.linspace(a, b, 1000)
if 'Min' in sample.columns[row+3]:
yME = ss.gumbel_l.pdf(pdfsample, muME, betaME) # Create Gumbel PDF
yMLE = ss.gumbel_l.pdf(pdfsample, muMLE, betaMLE)
elif 'Max' in sample.columns[row+3] or 'Abs' in sample.columns[row+3]:
yME = ss.gumbel_r.pdf( pdfsample, muME, betaME)
yMLE = ss.gumbel_r.pdf( pdfsample, muMLE, betaMLE)
ax.plot(pdfsample, yME, 'r', pdfsample, yMLE, 'b')
ax.legend(('Gumbel - ME', 'Gumbel - MLE'), bbox_to_anchor=(0.01, 0.99),
loc=2, borderaxespad=0.)
# Second subplot is the Gumbel plot (log log) showing fitted distribution
# as a straight line, and observations as scatter points
ae = fig.add_subplot(212)
if savepng: aepng = figpng.add_subplot(111)
sampleRange = np.array(range(1, seedNo+1))
factor = float(1)/float((seedNo+1))
sampleCDF = np.multiply(sampleRange, factor)
if 'Min' in sample.columns[row+3]:
loglogValueME = [ss.gumbel_l.ppf(1-conf, muME, betaME)
for conf in confidence_Level]
loglogValueMLE = [ss.gumbel_l.ppf(1-conf, muMLE, betaMLE)
for conf in confidence_Level]
a = sorted(evalv)
a.append(loglogValueME[-1])
b = sorted(evalv)
b.append(loglogValueMLE[-1])
loglog_cdfME = -np.log(-ss.gumbel_l.logsf(a, muME, betaME))
loglog_cdfMLE = -np.log(-ss.gumbel_l.logsf(b, muMLE, betaMLE))
ae.scatter(sorted(evalv), -np.log(-np.log(1-sampleCDF)),
marker='*', color='k')
ae.plot(a, loglog_cdfME, 'r')
ae.plot(b, loglog_cdfMLE, 'b')
ae.set_ylabel('Cumulative probability')
ylim = [-np.log(-np.log(1-confidence_Level[0]))-1,
max(-np.log(-np.log(confidence_Level[-1]))+1,
-np.log(-np.log(1-sampleCDF[-1]))+1)]
ae.set_ylim(ylim[0], ylim[1])
loglogConf = [-np.log(-np.log(conf)) for conf in confidence_Level]
xlim = [min(sorted(evalv)[0], min(loglogValueME), min(loglogValueMLE)),
sorted(evalv)[-1]]
ae.set_xlim(xlim[0], xlim[1])
if savepng:
aepng.scatter(sorted(evalv), -np.log(-np.log(1-sampleCDF)),
marker='*', color='k')
aepng.plot(a, loglog_cdfME, 'r')
aepng.plot(b, loglog_cdfMLE, 'b')
aepng.set_ylabel('Cumulative probability')
aepng.set_ylim(ylim[0], ylim[1])
aepng.set_xlim(xlim[0], xlim[1])
for i in range(len(confidence_Level)):
ae.plot([xlim[0], xlim[1]], [loglogConf[i], loglogConf[i]],
'k--', alpha=0.2)
ae.annotate(str(round(confidence_Level[i], 4)), xy=(xlim[1],
loglogConf[i]), xytext=(xlim[1], loglogConf[i]))
ae.plot([loglogValueME[i], loglogValueME[i]], [ylim[0], loglogConf[i]],
'r--')
ae.annotate(str(round(loglogValueME[i], 2)),
xy=(loglogValueME[i], ylim[0]),
xytext=(loglogValueME[i], ylim[0]-2),
arrowprops=dict(arrowstyle="->", color='red'))
ae.plot([loglogValueMLE[i], loglogValueMLE[i]], [ylim[0], loglogConf[i]],
'b--')
ae.annotate(str(round(loglogValueMLE[i], 2)),
xy=(loglogValueMLE[i], ylim[0]),
xytext=(loglogValueMLE[i], ylim[0]-1),
arrowprops=dict(arrowstyle="->", color='blue'))
if savepng:
aepng.plot([xlim[0], xlim[1]], [loglogConf[i], loglogConf[i]], 'k--',
alpha=0.2)
aepng.annotate(str(round(confidence_Level[i], 4)),
xy=(xlim[1], loglogConf[i]),
xytext=(xlim[1], loglogConf[i]))
aepng.plot([loglogValueME[i], loglogValueME[i]],
[ylim[0], loglogConf[i]], 'r--')
aepng.annotate(str(round(loglogValueME[i], 2)),
xy=(loglogValueME[i], ylim[0]),
xytext=(loglogValueME[i], ylim[0]-2),
arrowprops=dict(arrowstyle="->", color='red'))
aepng.plot([loglogValueMLE[i], loglogValueMLE[i]],
[ylim[0], loglogConf[i]], 'b--')
aepng.annotate(str(round(loglogValueMLE[i], 2)),
xy=(loglogValueMLE[i], ylim[0]),
xytext=(loglogValueMLE[i], ylim[0]-1),
arrowprops=dict(arrowstyle="->", color='blue'))
rank = seedNo-(confidence_Level[i])*seedNo
enoughSeeds = seedNo >= round(1/(1-confidence_Level[i]), 4)
if enoughSeeds:
x = sortEvalv[int(rank)-1]
y = -np.log(-np.log(1-sampleCDF[int(rank)-1]))
ae.annotate('p'+str(confidence_Level[i])+' = '+str(round(x, 2)),
xy=(x, y), xytext=(x, y+1.0),
arrowprops=dict(arrowstyle="->", color='black'))
if savepng:
aepng.annotate('p'+str(confidence_Level[i])+' = '+str(round(x, 2)),
xy=(x, y), xytext=(x, y+1.0),
arrowprops=dict(arrowstyle="->", color='black'))
elif 'Max' in sample.columns[row+3] or 'Abs' in sample.columns[row+3]:
loglogValueME = [ss.gumbel_r.ppf(conf, muME, betaME)
for conf in confidence_Level]
loglogValueMLE = [ss.gumbel_r.ppf(conf, muMLE, betaMLE)
for conf in confidence_Level]
a = sorted(evalv)
a.append(loglogValueME[-1])
b = sorted(evalv)
b.append(loglogValueMLE[-1])
loglog_cdfME = -np.log(-ss.gumbel_r.logcdf(a, muME, betaME))
loglog_cdfMLE = -np.log(-ss.gumbel_r.logcdf(b, muMLE, betaMLE))
ae.scatter(sorted(evalv), -np.log(-np.log(sampleCDF)), marker='*', color='k')
ae.plot(a, loglog_cdfME, 'r')
ae.plot(b, loglog_cdfMLE, 'b')
ae.set_ylabel('Cumulative probability')
ylim = [-np.log(-np.log(1-confidence_Level[0]))-1,
max(-np.log(-np.log(confidence_Level[-1]))+1,
-np.log(-np.log(1-sampleCDF[-1]))+1)]
ae.set_ylim(ylim[0], ylim[1])
loglogConf = [-np.log(-np.log(conf)) for conf in confidence_Level]
xlim = [sorted(evalv)[0], max(sorted(evalv)[-1], max(loglogValueME),
max(loglogValueMLE))]
ae.set_xlim(xlim[0], xlim[1])
if savepng:
aepng.scatter(sorted(evalv), -np.log(-np.log(sampleCDF)),
marker='*', color='k')
aepng.plot(a, loglog_cdfME, 'r')
aepng.plot(b, loglog_cdfMLE, 'b')
aepng.set_ylabel('Cumulative probability')
aepng.set_ylim(ylim[0], ylim[1])
aepng.set_xlim(xlim[0], xlim[1])
for i in range(len(confidence_Level)):
ae.plot([xlim[0], xlim[1]], [loglogConf[i], loglogConf[i]],
'k--', alpha=0.2)
ae.annotate(str(round(confidence_Level[i], 4)),
xy=(xlim[1], loglogConf[i]),
xytext=(xlim[1], loglogConf[i]))
ae.plot([loglogValueME[i], loglogValueME[i]],
[ylim[0], loglogConf[i]], 'r--')
ae.annotate(str(round(loglogValueME[i], 2)),
xy=(loglogValueME[i], ylim[0]),
xytext=(loglogValueME[i], ylim[0]-2),
arrowprops=dict(arrowstyle="->", color='red'))
ae.plot([loglogValueMLE[i], loglogValueMLE[i]], [-2, loglogConf[i]], 'b--')
ae.annotate(str(round(loglogValueMLE[i], 2)),
xy=(loglogValueMLE[i], ylim[0]),
xytext=(loglogValueMLE[i], ylim[0]-1),
arrowprops=dict(arrowstyle="->", color='blue'))
if savepng:
aepng.plot([xlim[0], xlim[1]], [loglogConf[i], loglogConf[i]],
'k--', alpha=0.2)
aepng.annotate(str(round(confidence_Level[i], 4)),
xy=(xlim[1], loglogConf[i]),
xytext=(xlim[1], loglogConf[i]))
aepng.plot([loglogValueME[i], loglogValueME[i]],
[ylim[0], loglogConf[i]], 'r--')
aepng.annotate(str(round(loglogValueME[i], 2)),
xy=(loglogValueME[i], ylim[0]),
xytext=(loglogValueME[i], ylim[0]-2),
arrowprops=dict(arrowstyle="->", color='red'))
aepng.plot([loglogValueMLE[i], loglogValueMLE[i]],
[-2, loglogConf[i]], 'b--')
aepng.annotate(str(round(loglogValueMLE[i], 2)),
xy=(loglogValueMLE[i], ylim[0]),
xytext=(loglogValueMLE[i], ylim[0]-1),
arrowprops=dict(arrowstyle="->", color='blue'))
rank = confidence_Level[i]*seedNo
enoughSeeds = seedNo >= round(1/(1-confidence_Level[i]), 4)
if enoughSeeds:
x = sortEvalv[int(rank)-1]
y = -np.log(-np.log(sampleCDF[int(rank)-1]))
ae.annotate('p'+str(confidence_Level[i])+' = '+str(round(x, 2)),
xy=(x, y), xytext=(x, y-1.0),
arrowprops=dict(arrowstyle="->", color='black'))
if savepng:
aepng.annotate('p'+str(confidence_Level[i])+' = '+str(round(x, 2)),
xy=(x, y), xytext=(x, y-1.0),
arrowprops=dict(arrowstyle="->", color='black'))
name = '%s Hs %.2f Tp %d wdir %d' % (sample.columns[row+3],
sample.iloc[c-seedNo, 0],
sample.iloc[c-seedNo, 1],
sample.iloc[c-seedNo, 2])
fig.tight_layout(pad=0, w_pad=0, h_pad=0)
if savepng: figpng.tight_layout(pad=0, w_pad=0, h_pad=0)
os.chdir('Plots')
fig.savefig('Gumbel-plot '+name+'.pdf', bbox_inches='tight')
plt.close(fig)
if savepng:
figpng.savefig('Gumbel-plot '+name+'.png', bbox_inches='tight')
plt.close(figpng)
os.chdir('..')
def summaryDataFrame(results, confidence_Level):
""""Create summary data frame containing Gumbel estimates"""
# Swap params and loads at the columns' hierarchy
res_swap = results.swaplevel(i=0, j=1, axis=1)
# create ME, MLE and sample dataFrames and add to a summary_resultsel
index = ['Confidence level', 'Hs', 'Tp', 'WaveDir']
# python >= 3.5 - cool!:
# for k, nm in enumerate([['ME', *['g ME (%s)' % str(c) for c in confidence_Level]],
# ['MLE', *['g MLE(%s)' % str(c) for c in confidence_Level]],
# ['sample', *['sample (%s)' % str(c) for c in confidence_Level]]]):
# python <= 3.4 - bleh!:
for k, nm in enumerate([
[item for sublist in
[['ME'], ['g ME (%s)' % str(c) for c in confidence_Level]]
for item in sublist],
[item for sublist in
[['MLE'], ['g MLE(%s)' % str(c) for c in confidence_Level]]
for item in sublist],
[item for sublist in
[['sample'], ['sample (%s)' % str(c) for c in confidence_Level]]
for item in sublist]
]):
for i, c in enumerate(confidence_Level):
df = res_swap[nm[i+1]] # ############## STOPPED HERE #####
df['Confidence level'] = [c]*len(df)
df['Hs'] = results.iloc[0, :, 0]
df['Tp'] = results.iloc[0, :, 1]
df['WaveDir'] = results.iloc[0, :, 2]
if i == 0: # In the 1st i-iteraction
df0 = df.set_index(index).reset_index() # create a DataFrame with the
else: # 1st conf. level. Then update
df0 = df0.append(df.set_index(index).reset_index()) # this df in the next iters.
if k == 0: # In the first k-iteraction, create a Panel with
summary = pd.Panel({nm[0]: df0}) # the 'ME' DataFrame. Then in the next iteractions,
else: # update this panel with the 'MLE' and finally
summary[nm[0]] = df0 # with the 'sample' DataFrames.
summary['ME and sample'] = summary['ME'] # Add to panel the 'XX and sample' DataFrames,
summary['MLE and sample'] = summary['MLE'] # starting with a bare copy of the existing DFs,
for method in ['ME', 'MLE']: # and then replacing occurrences of
for varname in results.items: # 'use sample value' by the sample value.
idx = summary[method][varname] == 'use sample value'
summary[method+' and sample'][varname][idx] = summary['sample'][varname][idx]
return summary
def writeResults(results, summaryResults, StatResultsFile, SummaryFile, seaStates):
"""Write results to Excel and text file"""
# results.to_excel(StatResultsFile, index=False)
# excel tab name max length is 31 characters
results_short = results.rename(items=lambda s: s[:31])
results_short.to_excel(StatResultsFile, index=False)
summaryResults.to_excel(SummaryFile, index=False)
return None
def main(InputFile, confidence_Level, seedNo, StatResultsFile, SummaryFile, Plot, Objectplot,
PlotWd, PlotHs, PlotT):
""""========================MAIN=============================="""
# #t00 = time()
# Read result file
# #print('Reading input')
sample, numRowsSample, numColsSample = readResultFile(InputFile)
# Create panel for all results
# #t0 = time()
results, colnames = createResultsPanel(numRowsSample, numColsSample, seedNo,
confidence_Level, sample)
# #t_cr = time() - t0
# Do Gumbel fit
# #print('Gumbel fit')
# #t0 = time()
results = gumbelFit(confidence_Level, sample, results, seedNo, colnames)
# #t_gf = time()-t0
# Creates a summary file giving predicted max/min for each load case and object analysed
# #print('Summarising and writing results to file')
# #t0 = time()
SummaryResults = summaryDataFrame(results, confidence_Level)
# #t_sm = time()-t0
# Creates a result file giving all statistical results for Gumbel fit
seaStates = int(numRowsSample/seedNo)
# #t0 = time()
writeResults(results, SummaryResults, StatResultsFile, SummaryFile, seaStates)
# # t_wr = time()-t0
# Plot if required
if Plot:
print('Plotting')
plotProbability(results, sample, colnames, seedNo, confidence_Level,
Objectplot, PlotWd, PlotHs, PlotT)
# #print('Done')
# #ttot = time()-t00
# #print('tot\tgumbel\tsummary\twrite\tcreate')
# #print('%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t' % (ttot, t_gf, t_sm, t_wr, t_cr))
return results, SummaryResults
if __name__ == '__main__':
# --------------------USER INPUT----------------------------#
# Define number of seeds run for each load case
seedNo = 10
# Define confidence level
confidence_Level = [0.9, 0.99]
# Define if you want histogram and corresponding Gumbel fit plot for each load case
# True = yes, False = no
Plot = True
# Also, specify which object you want plots for (Fill in object name (column header from
# Input file)), and for which sea states you want plotting for
Objectplot = ['CraneWire Max Tension', 'CraneWire Min Tension',
'sling1 Max Tension', 'sling1 Min Tension',
'sling2 Max Tension', 'sling2 Min Tension',
'sling3 Max Tension', 'sling3 Min Tension',
'sling4 Max Tension', 'sling4 Min Tension']
PlotWd = [165, 180, 195]
PlotHs = [2.3, 3.5]
PlotT = [7, 8, 14]
# Specify input file that contains data
InputFile = 'Results.txt'
# Specify file name for summary results
SummaryFile = 'Summary of predicted max_min_opt.xlsx'
# Specify file name for statistical results
StatResultsFile = 'Statistical results_opt.xlsx'
# #-----------------END USER INPUT------------------------#
Results, SummaryResults = main(InputFile, confidence_Level, seedNo, StatResultsFile,
SummaryFile, Plot, Objectplot, PlotWd, PlotHs, PlotT)
|
haphaeu/yoshimi
|
PandasDataFrame/GumbelFit_opt3.py
|
Python
|
lgpl-3.0
| 31,937
|
#!/usr/bin/env python3
import re
import os
import os.path
import sys
def main():
already_found = []
url_matcher = re.compile(r'(https?://(www.)?)?((youtu.be|youtube.(com|de|ch|at))/watch\?v=[-_0-9A-Za-z]{11}|youtu.be/[-_0-9A-Za-z]{11})')
backup_matcher = re.compile(r'youtu')
argc = len(sys.argv)
if argc == 1:
whole_input = sys.stdin.read()
elif argc == 2:
with open(sys.argv[1], mode='rt', encoding='utf8') as inf:
whole_input = inf.read()
else:
raise Exception()
os.makedirs('./urls', exist_ok=True)
num_found = 0
filename_ctr = 0
for match in url_matcher.finditer(whole_input):
num_found += 1
already_found.append((match.start(), match.end()))
written = False
while (not written) and (filename_ctr < 31337):
try:
with open(os.path.join('./urls/', '{0}.txt'.format(filename_ctr)), mode='xt', encoding='utf8') as outf:
print(match.group(0), file=outf)
written = True
except OSError:
pass
filename_ctr += 1
if filename_ctr >= 31337:
print("Error: hit infinite loop while attempting to create files. Exiting.", file=sys.stderr)
sys.exit(1)
num_backup_candidates = 0
whole_len = len(whole_input)
for match in backup_matcher.finditer(whole_input):
ms = match.start()
me = match.end()
for (s, e) in already_found:
if ms >= s and me <= e:
break
else:
s = max(ms - 33, 0)
e = min(me + 33, whole_len)
num_backup_candidates += 1
print('found unmatched candidate: ' + whole_input[s:e])
print('found {0} unmatched candidates and created {1} URL files'.format(num_backup_candidates, num_found))
print('done')
if __name__ == "__main__":
main()
|
sseering/ytdlWrapper
|
urlfind.py
|
Python
|
unlicense
| 1,923
|
from django.apps import AppConfig
class CompetencesConfig(AppConfig):
name = 'competences'
verbose_name = "Компетенции"
|
ITOO-UrFU/open-programs
|
open_programs/apps/competences/apps.py
|
Python
|
unlicense
| 141
|
#import some things we need
import httplib2
from oauth2client.client import SignedJwtAssertionCredentials #included with the Google Apps Directory API
from apiclient.discovery import build
import csv
def downloadUsers(domain, account, customerId):
superAdmin = 'is@' + domain
serviceAccount = account + '@developer.gserviceaccount.com'
p12File = domain + '.p12'
scope = 'https://www.googleapis.com/auth/admin.directory.user https://www.googleapis.com/auth/admin.directory.orgunit https://www.googleapis.com/auth/admin.directory.group https://www.googleapis.com/auth/admin.directory.device.chromeos'
#read then close the key file
keyFile = file(p12File, 'rb')
key = keyFile.read()
keyFile.close()
#build credentials
credentials = SignedJwtAssertionCredentials(serviceAccount, key, scope, prn=superAdmin)
#authenticate
http = httplib2.Http()
httplib2.debuglevel = False #change this to True if you want to see the output
http = credentials.authorize(http=http)
directoryService = build(serviceName='admin', version='directory_v1', http=http)
#create and/or open a file that we'll append to
outputFileName = domain + '_userList.csv'
outputFile = open(outputFileName, 'a')
outputFile.write('primaryEmail, lastLoginTime, name, isAdmin, orgUnitPath\n') #write the headers
pageToken = None #this is the variable where we'll store the next page token
while True:
try:
page = directoryService.users().list(domain=domain, customer=customerId, maxResults='500', pageToken=pageToken).execute()
users = page['users']
for user in users: #parse the users from the page variable
primaryEmail = user['primaryEmail']
lastLoginTime = user['lastLoginTime']
name = user['name']['fullName']
isAdmin = user['isAdmin']
orgUnitPath = user['orgUnitPath']
#print primaryEmail, lastLoginTime, name, isAdmin, orgUnitPath
#log to a file
outputFile.write(primaryEmail + ',' + str(lastLoginTime) + ',' + name + ',' + str(isAdmin) + ',' + str(orgUnitPath))
outputFile.write( '\n')
pageToken = page['nextPageToken'] #this will error if there's no nextPageToken
except:
print 'We probably reached the end of ' + domain
break
outputFile.close()
#open and read the csv file that contains the list of domains, account numbers, and customer IDs
domainListFile = open('domainList.csv', 'rb')
domainList = csv.reader(domainListFile)
for row in domainList:
domain = row[0] #the first entry in this row is the domain
account = row[1]
customerId = row[2]
downloadUsers(domain, account, customerId)
'''
for user in page:
primaryEmail = page.get(user['primaryEmail'])
lastLoginTime = page.get('lastLoginTime')
name = page.get('name')
isAdmin = page.get('isAdmin')
orgUnitPath = page.get('orgUnitPath')
newPage = page.get('nextPageToken')
print primaryEmail, lastLoginTime, name, isAdmin, orgUnitPath
'''
'''
#create a user
userinfo = {'primaryEmail': 'newTest@example.com',
'name': { 'givenName': 'New', 'familyName': 'Test' },
'password': 'passwordfornewuser1',
'orgUnitPath':'/Archive'}
directoryService.users().insert(body=userinfo).execute()
'''
'''
#move a user to an org
userOrg = {'orgUnitPath':'/Archive'}
directoryService.users().patch(userKey='newTest@example.com', body=userOrg).execute()
'''
'''
user = directoryService.users().get(userKey = 'newTest@example.com')
pprint.pprint(user.execute())
'''
|
misterhay/GoogleAppsProvisioning
|
oauth2_getAllUsers.py
|
Python
|
unlicense
| 3,688
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from jinja2 import Template as Jinja2Template
from atlas.providers import providers
class Template(Jinja2Template):
def __init__(self, *args, **kwargs):
super(Template, self).__init__(*args, **kwargs)
for provider in providers:
self.globals[provider] = providers[provider]
|
citruspi/Atlas
|
atlas/templating/template.py
|
Python
|
unlicense
| 356
|
__author__ = 'canderson'
import os
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class MainPage(Handler):
def get(self):
#self.write("asciichan!")
self.render('form.html')
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)
|
W0mpRat/WebDev03
|
UdacityFrameWork.py
|
Python
|
unlicense
| 781
|
#
# common.py
#
# Copyright (C) 2009 Justin Noah <justinnoah@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2009 Damien Churchill <damoxc@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
def get_resource(filename):
import pkg_resources, os
return pkg_resources.resource_filename("autobot", os.path.join("data", filename))
|
justinnoah/autobot
|
autobot/common.py
|
Python
|
apache-2.0
| 1,761
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test oplog manager methods
"""
import itertools
import re
import sys
import time
import bson
import gridfs
import pymongo
sys.path[0:0] = [""]
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.namespace_config import NamespaceConfig
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.test_utils import (assert_soon,
close_client,
ReplicaSetSingle)
from mongo_connector.util import bson_ts_to_long
from tests import unittest
class TestOplogManager(unittest.TestCase):
"""Defines all the testing methods, as well as a method that sets up the
cluster
"""
def setUp(self):
self.repl_set = ReplicaSetSingle().start()
self.primary_conn = self.repl_set.client()
self.oplog_coll = self.primary_conn.local['oplog.rs']
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(
namespace_options={
'test.*': True,
'gridfs.*': {'gridfs': True}
}
),
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
pass # OplogThread may not have been started
self.primary_conn.drop_database("test")
close_client(self.primary_conn)
self.repl_set.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp is None - all oplog entries excluding no-ops are returned.
cursor = self.opman.get_oplog_cursor(None)
self.assertEqual(cursor.count(),
self.primary_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'}}).count())
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.primary_conn["test"]["test"].insert_one(doc)
latest_timestamp = self.opman.get_last_oplog_timestamp()
cursor = self.opman.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = next(cursor)['o']['_id']
retrieved = self.primary_conn.test.test.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
self.primary_conn["test"]["test"].insert_many(
[{"i": i} for i in range(2, 1002)])
oplog_cursor = self.oplog_coll.find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.(system|\$cmd)')}},
sort=[("ts", pymongo.ASCENDING)]
)
# initial insert + 1000 more inserts
self.assertEqual(oplog_cursor.count(), 1 + 1000)
pivot = oplog_cursor.skip(400).limit(-1)[0]
goc_cursor = self.opman.get_oplog_cursor(pivot["ts"])
self.assertEqual(goc_cursor.count(), 1 + 1000 - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.assertEqual(self.opman.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
oplog = self.primary_conn["local"]["oplog.rs"]
oplog = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman.get_last_oplog_timestamp(),
oplog["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog, with gridfs collections
3. non-empty oplog, specified a namespace-set, none of the oplog
entries are for collections in the namespace-set
"""
# Test with empty oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
last_ts = self.opman.dump_collection()
self.assertEqual(last_ts, None)
# Test with non-empty oplog with gridfs collections
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
# Insert 10 gridfs files
for i in range(10):
fs = gridfs.GridFS(self.primary_conn["gridfs"],
collection="test" + str(i))
fs.put(b"hello world")
# Insert 1000 documents
for i in range(1000):
self.primary_conn["test"]["test"].insert_one({
"i": i + 500
})
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
self.assertEqual(len(self.opman.doc_managers[0]._search()), 1010)
# Case 3
# 1MB oplog so that we can rollover quickly
repl_set = ReplicaSetSingle(oplogSize=1).start()
conn = repl_set.client()
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(namespace_set=["test.test"]),
)
# Insert a document into an included collection
conn["test"]["test"].insert_one({"test": 1})
# Cause the oplog to rollover on a non-included collection
while conn["local"]["oplog.rs"].find_one({"ns": "test.test"}):
conn["test"]["ignored"].insert_many(
[{"test": "1" * 1024} for _ in range(1024)])
last_ts = opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, opman.dump_collection())
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
conn.close()
repl_set.stop()
def test_skipped_oplog_entry_updates_checkpoint(self):
repl_set = ReplicaSetSingle().start()
conn = repl_set.client()
opman = OplogThread(
primary_client=conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=NamespaceConfig(namespace_set=["test.test"]),
)
opman.start()
# Insert a document into an included collection
conn["test"]["test"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to non-skipped "
"entry.")
self.assertEqual(len(opman.doc_managers[0]._search()), 1)
# Make sure that the oplog thread updates its checkpoint on every
# oplog entry.
conn["test"]["ignored"].insert_one({"test": 1})
last_ts = opman.get_last_oplog_timestamp()
assert_soon(lambda: last_ts == opman.checkpoint,
"OplogThread never updated checkpoint to skipped entry.")
opman.join()
conn.close()
repl_set.stop()
def test_dump_collection_with_error(self):
"""Test the dump_collection method with invalid documents.
Cases:
1. non-empty oplog, continue_on_error=True, invalid documents
"""
# non-empty oplog, continue_on_error=True, invalid documents
self.opman.continue_on_error = True
self.opman.oplog = self.primary_conn["local"]["oplog.rs"]
docs = [{'a': i} for i in range(100)]
for i in range(50, 60):
docs[i]['_upsert_exception'] = True
self.primary_conn['test']['test'].insert_many(docs)
last_ts = self.opman.get_last_oplog_timestamp()
self.assertEqual(last_ts, self.opman.dump_collection())
docs = self.opman.doc_managers[0]._search()
docs.sort(key=lambda doc: doc['a'])
self.assertEqual(len(docs), 90)
expected_a = itertools.chain(range(0, 50), range(60, 100))
for doc, correct_a in zip(docs, expected_a):
self.assertEqual(doc['a'], correct_a)
def test_dump_collection_cancel(self):
"""Test that dump_collection returns None when cancelled."""
self.primary_conn["test"]["test"].insert_one({"test": "1"})
# Pretend that the OplogThead was cancelled
self.opman.running = False
self.assertIsNone(self.opman.dump_collection())
def test_init_cursor(self):
"""Test the init_cursor method
Cases:
1. no last checkpoint, no collection dump
2. no last checkpoint, collection dump ok and stuff to dump
3. no last checkpoint, nothing to dump, stuff in oplog
4. no last checkpoint, nothing to dump, nothing in oplog
5. no last checkpoint, no collection dump, stuff in oplog
6. last checkpoint exists
7. last checkpoint is behind
"""
# N.B. these sub-cases build off of each other and cannot be re-ordered
# without side-effects
# No last checkpoint, no collection dump, nothing in oplog
# "change oplog collection" to put nothing in oplog
self.opman.oplog = self.primary_conn["test"]["emptycollection"]
self.opman.collection_dump = False
self.assertTrue(all(doc['op'] == 'n'
for doc in self.opman.init_cursor()[0]))
self.assertEqual(self.opman.checkpoint, None)
# No last checkpoint, empty collections, nothing in oplog
self.opman.collection_dump = True
cursor, cursor_empty = self.opman.init_cursor()
self.assertEqual(cursor, None)
self.assertTrue(cursor_empty)
self.assertEqual(self.opman.checkpoint, None)
# No last checkpoint, empty collections, something in oplog
self.opman.oplog = self.primary_conn['local']['oplog.rs']
collection = self.primary_conn["test"]["test"]
collection.insert_one({"i": 1})
collection.delete_one({"i": 1})
time.sleep(3)
last_ts = self.opman.get_last_oplog_timestamp()
cursor, cursor_empty = self.opman.init_cursor()
self.assertFalse(cursor_empty)
self.assertEqual(self.opman.checkpoint, last_ts)
self.assertEqual(self.opman.read_last_checkpoint(), last_ts)
# No last checkpoint, no collection dump, something in oplog
# If collection dump is false the checkpoint should not be set
self.opman.checkpoint = None
self.opman.oplog_progress = LockingDict()
self.opman.collection_dump = False
collection.insert_one({"i": 2})
cursor, cursor_empty = self.opman.init_cursor()
for doc in cursor:
last_doc = doc
self.assertEqual(last_doc['o']['i'], 2)
self.assertIsNone(self.opman.checkpoint)
# Last checkpoint exists, no collection dump, something in oplog
collection.insert_many([{"i": i + 500} for i in range(1000)])
entry = list(
self.primary_conn["local"]["oplog.rs"].find(skip=200, limit=-2))
self.opman.update_checkpoint(entry[0]["ts"])
cursor, cursor_empty = self.opman.init_cursor()
self.assertEqual(next(cursor)["ts"], entry[1]["ts"])
self.assertEqual(self.opman.checkpoint, entry[0]["ts"])
self.assertEqual(self.opman.read_last_checkpoint(), entry[0]["ts"])
# Last checkpoint is behind
self.opman.update_checkpoint(bson.Timestamp(1, 0))
cursor, cursor_empty = self.opman.init_cursor()
self.assertTrue(cursor_empty)
self.assertEqual(cursor, None)
self.assertEqual(self.opman.checkpoint, bson.Timestamp(1, 0))
def test_namespace_mapping(self):
"""Test mapping of namespaces
Cases:
upsert/delete/update of documents:
1. in namespace set, mapping provided
2. outside of namespace set, mapping provided
"""
source_ns = ["test.test1", "test.test2"]
phony_ns = ["test.phony1", "test.phony2"]
dest_mapping = {"test.test1": "test.test1_dest",
"test.test2": "test.test2_dest"}
self.opman.namespace_config = NamespaceConfig(
namespace_set=source_ns, namespace_options=dest_mapping)
docman = self.opman.doc_managers[0]
# start replicating
self.opman.start()
base_doc = {"_id": 1, "name": "superman"}
# doc in namespace set
for ns in source_ns:
db, coll = ns.split(".", 1)
# test insert
self.primary_conn[db][coll].insert_one(base_doc)
assert_soon(lambda: len(docman._search()) == 1)
self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns])
bad = [d for d in docman._search() if d["ns"] == ns]
self.assertEqual(len(bad), 0)
# test update
self.primary_conn[db][coll].update_one(
{"_id": 1},
{"$set": {"weakness": "kryptonite"}}
)
def update_complete():
docs = docman._search()
for d in docs:
if d.get("weakness") == "kryptonite":
return True
return False
assert_soon(update_complete)
self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns])
bad = [d for d in docman._search() if d["ns"] == ns]
self.assertEqual(len(bad), 0)
# test delete
self.primary_conn[db][coll].delete_one({"_id": 1})
assert_soon(lambda: len(docman._search()) == 0)
bad = [d for d in docman._search()
if d["ns"] == dest_mapping[ns]]
self.assertEqual(len(bad), 0)
# cleanup
self.primary_conn[db][coll].delete_many({})
self.opman.doc_managers[0]._delete()
# doc not in namespace set
for ns in phony_ns:
db, coll = ns.split(".", 1)
# test insert
self.primary_conn[db][coll].insert_one(base_doc)
time.sleep(1)
self.assertEqual(len(docman._search()), 0)
# test update
self.primary_conn[db][coll].update_one(
{"_id": 1},
{"$set": {"weakness": "kryptonite"}}
)
time.sleep(1)
self.assertEqual(len(docman._search()), 0)
def test_many_targets(self):
"""Test that one OplogThread is capable of replicating to more than
one target.
"""
doc_managers = [DocManager(), DocManager(), DocManager()]
self.opman.doc_managers = doc_managers
# start replicating
self.opman.start()
self.primary_conn["test"]["test"].insert_one({
"name": "kermit",
"color": "green"
})
self.primary_conn["test"]["test"].insert_one({
"name": "elmo",
"color": "firetruck red"
})
assert_soon(
lambda: sum(len(d._search()) for d in doc_managers) == 6,
"OplogThread should be able to replicate to multiple targets"
)
self.primary_conn["test"]["test"].delete_one({"name": "elmo"})
assert_soon(
lambda: sum(len(d._search()) for d in doc_managers) == 3,
"OplogThread should be able to replicate to multiple targets"
)
for d in doc_managers:
self.assertEqual(d._search()[0]["name"], "kermit")
def test_upgrade_oplog_progress(self):
first_oplog_ts = self.opman.oplog.find_one()['ts']
# Old format oplog progress file:
progress = {str(self.opman.oplog): bson_ts_to_long(first_oplog_ts)}
# Set up oplog managers to use the old format.
oplog_progress = LockingDict()
oplog_progress.dict = progress
self.opman.oplog_progress = oplog_progress
# Cause the oplog managers to update their checkpoints.
self.opman.update_checkpoint(first_oplog_ts)
# New format should be in place now.
new_format = {self.opman.replset_name: first_oplog_ts}
self.assertEqual(
new_format,
self.opman.oplog_progress.get_dict()
)
if __name__ == '__main__':
unittest.main()
|
sailthru/mongo-connector
|
tests/test_oplog_manager.py
|
Python
|
apache-2.0
| 17,080
|
import uuid
from motey.models.image import Image
from motey.models.service_state import ServiceState
class Service(object):
"""
Model object. Represent a service.
A service can have multiple states, action types and service types.
"""
def __init__(self, service_name, images, id=uuid.uuid4().hex, state=ServiceState.INITIAL, state_message=''):
"""
Constructor of the service model.
:param service_name: the name of the service
:type service_name: str
:param images: list of images which are asociated with the service
:type images: list
:param id: autogenerated id of the service
:type id: uuid
:param state: current state of the service. Default `INITIAL`.
:type state: motey.models.service_state.ServiceState
:param state_message: message for the current service state
:type state_message: str
"""
self.id = id
self.service_name = service_name
self.images = images
self.state = state
self.state_message = state_message
def __iter__(self):
yield 'id', self.id
yield 'service_name', self.service_name
yield 'images', [dict(image) for image in self.images]
yield 'state', self.state
yield 'state_message', self.state_message
@staticmethod
def transform(data):
"""
Static method to translate the service dict data into a service model.
:param data: service dict to be transformed
:type data: dict
:return: the translated service model, None if something went wrong
"""
if 'service_name' not in data or 'images' not in data:
return None
return Service(
id=data['id'] if 'id' in data else uuid.uuid4().hex,
service_name=data['service_name'],
images=[Image.transform(image) for image in data['images']],
state=data['state'] if 'state' in data else ServiceState.INITIAL,
state_message=data['state_message'] if 'state_message' in data else ''
)
|
Neoklosch/Motey
|
motey/models/service.py
|
Python
|
apache-2.0
| 2,107
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench
========================
"""
# Imports
import unittest
from modules.testOpenclDecorator import testOpenclDecorator
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(testOpenclDecorator)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
|
mF2C/COMPSs
|
tests/sources/python/1_decorator_opencl/src/decorator_opencl.py
|
Python
|
apache-2.0
| 367
|
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CheckJSON:
def __init__(self, key, obj, ip_info=False):
self.key_error = 'Field is not applicable to this license.'
if ip_info is True:
self.key_error = 'No IP info returned.'
self.key = key
self.obj = obj
def key_valid(self):
if self.key not in self.obj:
raise KeyError(self.key_error)
else:
return self.obj[self.key]
|
sbarbett/ip_intelligence
|
src/check_json.py
|
Python
|
apache-2.0
| 945
|
__author__ = 'admin'
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
self.fill_contact_fields(contact)
def modify(self, contact):
# modify contact
self.click_edit_button()
self.fill_contact_fields(contact)
def set_field_value(self, field_name, value):
if value is not None:
wd = self.app.wd
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(value)
def select_list_item(self, list_id, value):
if value is not None:
wd = self.app.wd
xpath = "//div[@id='content']/form/select[" + list_id + "]//option[" + value + "]"
if not wd.find_element_by_xpath(xpath).is_selected():
wd.find_element_by_xpath(xpath).click()
def fill_contact_fields(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.set_field_value("firstname", contact.firstname)
self.set_field_value("middlename", contact.middlename)
self.set_field_value("lastname", contact.lastname)
self.set_field_value("nickname", contact.nickname)
self.set_field_value("title", contact.title)
self.set_field_value("company", contact.company)
self.set_field_value("address", contact.address)
self.set_field_value("home", contact.phone_home)
self.set_field_value("mobile", contact.phone_mobile)
self.set_field_value("work", contact.phone_work)
self.set_field_value("fax", contact.fax)
self.set_field_value("email", contact.email_first)
self.set_field_value("email2", contact.email_second)
self.set_field_value("email3", contact.email_third)
self.set_field_value("homepage", contact.homepage)
self.set_field_value("homepage", contact.homepage)
self.select_list_item("1", contact.birth_day_list_item)
self.select_list_item("2", contact.birth_month_list_item)
self.set_field_value("byear", contact.birth_year)
self.select_list_item("3", contact.anniversary_day_list_item)
self.select_list_item("4", contact.anniversary_month_list_item)
self.set_field_value("ayear", contact.anniversary_year)
self.set_field_value("address2", contact.second_address)
self.set_field_value("phone2", contact.second_phone)
self.set_field_value("notes", contact.notes)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def delete_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
def delete_all_contacts(self):
wd = self.app.wd
mass_checkbox = wd.find_element_by_id("MassCB")
if not mass_checkbox.is_selected():
mass_checkbox.click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
def click_edit_button(self):
wd = self.app.wd
wd.find_element_by_css_selector("img[alt=\"Edit\"]").click()
def count(self):
wd = self.app.wd
return len(wd.find_elements_by_name("selected[]"))
|
dimchenkoAlexey/python_training
|
fixture/contact.py
|
Python
|
apache-2.0
| 3,347
|
import logging
from abc import abstractmethod
from string import Template
from parsl.providers.error import SchedulerMissingArgs, ScriptPathError
from parsl.launchers.error import BadLauncher
from parsl.providers.provider_base import ExecutionProvider
logger = logging.getLogger(__name__)
class ClusterProvider(ExecutionProvider):
""" This class defines behavior common to all cluster/supercompute-style scheduler systems.
Parameters
----------
label : str
Label for this provider.
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~parsl.channels.LocalChannel` (the default),
:class:`~parsl.channels.SSHChannel`, or
:class:`~parsl.channels.SSHInteractiveLoginChannel`.
walltime : str
Walltime requested per block in HH:MM:SS.
launcher : Launcher
Launcher for this provider.
cmd_timeout : int
Timeout for commands made to the scheduler in seconds
.. code:: python
+------------------
|
script_string ------->| submit
id <--------|---+
|
[ ids ] ------->| status
[statuses] <--------|----+
|
[ ids ] ------->| cancel
[cancel] <--------|----+
|
+-------------------
"""
def __init__(self,
label,
channel,
nodes_per_block,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
launcher,
cmd_timeout=10):
self._label = label
self.channel = channel
self.nodes_per_block = nodes_per_block
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.launcher = launcher
self.walltime = walltime
self.cmd_timeout = cmd_timeout
if not callable(self.launcher):
raise(BadLauncher(self.launcher,
"Launcher for executor: {} is of type: {}. Expects a parsl.launcher.launcher.Launcher or callable".format(
label, type(self.launcher))))
self.script_dir = None
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def execute_wait(self, cmd, timeout=None):
t = self.cmd_timeout
if timeout is not None:
t = timeout
return self.channel.execute_wait(cmd, t)
def _write_submit_script(self, template, script_filename, job_name, configs):
"""Generate submit script and write it to a file.
Args:
- template (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
- job_name (string) : job name
- configs (dict) : configs that get pushed into the template
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
"""
try:
submit_script = Template(template).substitute(jobname=job_name, **configs)
# submit_script = Template(template).safe_substitute(jobname=job_name, **configs)
with open(script_filename, 'w') as f:
f.write(submit_script)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ScriptPathError(script_filename, e))
except Exception as e:
print("Template : ", template)
print("Args : ", job_name)
print("Kwargs : ", configs)
logger.error("Uncategorized error: %s", e)
raise (e)
return True
@abstractmethod
def _status(self):
pass
def status(self, job_ids):
""" Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
if job_ids:
self._status()
return [self.resources[jid]['status'] for jid in job_ids]
@property
def label(self):
return self._label
|
Parsl/parsl
|
parsl/providers/cluster_provider.py
|
Python
|
apache-2.0
| 4,955
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Compute API documentation build configuration file
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['openstackdocstheme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
bug_tag = u'api-guide'
repository_name = 'openstack/nova'
bug_project = 'nova'
# Must set this variable to include year, month, day, hours, and minutes.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
copyright = u'2015, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.0'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'compute-api-guide'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ComputeAPI.tex', u'Compute API Documentation',
u'OpenStack contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'computeapi', u'Compute API Documentation',
[u'OpenStack contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ComputeAPIGuide', u'Compute API Guide',
u'OpenStack contributors', 'APIGuide',
'This guide teaches OpenStack Compute service users concepts about '
'managing resources in an OpenStack cloud with the Compute API.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'ComputeAPIGuide', u'Compute API Guide', u'OpenStack '
'contributors')
]
# -- Options for openstackdocstheme -------------------------------------------
openstack_projects = [
'nova',
]
|
gooddata/openstack-nova
|
api-guide/source/conf.py
|
Python
|
apache-2.0
| 9,312
|
# Copyright 2012 Colin Scott
# Copyright 2012 Andreas Wundsam
# Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
IOWorkers provide a convenient IO abstraction. Sends are fire-and-forget,
and read data is buffered and you can get notifications when data is
available.
"""
import sys
import errno
from collections import deque
import socket
from pox.lib.util import assert_type, makePinger
from pox.lib.recoco import Select, Task
from pox.core import core
log = core.getLogger()
_dummy_handler = lambda worker : None
def _call_safe (f, socket=None):
try:
f()
except Exception as e:
if socket:
log.error("Exception on socket %s..." % (socket))
log.exception(e)
class IOWorker (object):
"""
Generic IOWorker class.
Fire and forget semantics for send.
Received data is queued until read.
"""
def __init__(self):
self.send_buf = b""
self.receive_buf = b""
self.closed = False
self._custom_rx_handler = None
self._custom_close_handler = None
self._custom_connect_handler = None
self._connecting = False
self._shutdown_send = False
self.rx_handler = None
self.close_handler = None
self.connect_handler = None
def _handle_rx (self):
""" Can be overridden OR you can just use rx_handler """
self._custom_rx_handler(self)
def _handle_close (self):
""" Can be overridden OR you can just use close_handler """
self._custom_close_handler(self)
def _handle_connect (self):
""" Can be overridden OR you can just use connect_handler """
self._custom_connect_handler(self)
def _do_exception (self, loop):
self.close()
loop._workers.discard(self)
def _try_connect (self, loop):
if not self._connecting: return False
self._connecting = False
try:
self.socket.recv(0)
except socket.error as (s_errno, strerror):
if s_errno == 10035: # WSAEWOULDBLOCK
# Maybe we're still connecting after all...
self._connecting = True
return True
self.close()
loop._workers.discard(self)
return True
_call_safe(self._handle_connect)
return False
def _do_recv (self, loop):
if self._connecting and self._try_connect(loop): return
try:
data = self.socket.recv(loop._BUF_SIZE)
if len(data) == 0:
self.close()
loop._workers.discard(self)
else:
self._push_receive_data(data)
except socket.error as (s_errno, strerror):
if s_errno == errno.ENOENT:
# SSL library does this sometimes
log.error("Socket %s: ENOENT", str(self))
return
log.error("Socket %s error %i during recv: %s", str(self),
s_errno, strerror)
self.close()
loop._workers.discard(self)
def _do_send (self, loop):
if self._connecting and self._try_connect(loop): return
try:
if len(self.send_buf):
l = self.socket.send(self.send_buf)
if l > 0:
self._consume_send_buf(l)
if self._shutdown_send and len(self.send_buf) == 0:
self.socket.shutdown(socket.SHUT_WR)
except socket.error as (s_errno, strerror):
if s_errno != errno.EAGAIN:
log.error("Socket %s error %i during send: %s", str(self),
s_errno, strerror)
self.close()
loop._workers.discard(self)
@property
def available (self):
"""
Number of available bytes to read()
"""
return len(self.receive_buf)
@property
def connect_handler (self):
if self._custom_connect_handler is _dummy_handler:
return None
return self._custom_connect_handler
@connect_handler.setter
def connect_handler (self, callback):
"""
Handler to call when connected
"""
# Not sure if this is a good idea, but it might be...
if self.connect_handler is not None or callback is not None:
log.debug("Resetting connect_handler on %s?", self)
if callback is None: callback = _dummy_handler
self._custom_connect_handler = callback
@property
def close_handler (self):
if self._custom_close_handler is _dummy_handler:
return None
return self._custom_close_handler
@close_handler.setter
def close_handler (self, callback):
"""
Handler to call when closing
"""
# Not sure if this is a good idea, but it might be...
if self.close_handler is not None or callback is not None:
log.debug("Resetting close_handler on %s?", self)
if callback is None: callback = _dummy_handler
self._custom_close_handler = callback
@property
def rx_handler (self):
if self._custom_rx_handler is _dummy_handler:
return None
return self._custom_rx_handler
@rx_handler.setter
def rx_handler (self, callback):
"""
Handler to call when data is available to read
"""
# Not sure if this is a good idea, but it might be...
if self.rx_handler is not None or callback is not None:
log.debug("Resetting rx_handler on %s?", self)
if callback is None: callback = _dummy_handler
self._custom_rx_handler = callback
def send_fast (self, data):
return self.send(data)
def send (self, data):
""" Send data. Fire and forget. """
assert assert_type("data", data, [bytes], none_ok=False)
self.send_buf += data
def _push_receive_data (self, new_data):
# notify client of new received data. called by a Select loop
self.receive_buf += new_data
self._handle_rx()
def peek (self, length = None):
""" Peek up to length bytes from receive buffer. """
if length is None:
return self.receive_buf
else:
return self.receive_buf[:length]
def consume_receive_buf (self, l):
""" Consume receive buffer """
# called from the client
if len(self.receive_buf) < l:
raise RuntimeError("Receive buffer underrun")
self.receive_buf = self.receive_buf[l:]
def read (self, length = None):
"""
Read up to length bytes from receive buffer
(defaults to all)
"""
if length is None:
length = len(self.receive_buf)
r = self.receive_buf[:length]
self.receive_buf = self.receive_buf[length:]
return r
@property
def _ready_to_send (self):
# called by Select loop
return len(self.send_buf) > 0 or self._connecting
def _consume_send_buf (self, l):
# Throw out the first l bytes of the send buffer
# Called by Select loop
assert(len(self.send_buf)>=l)
self.send_buf = self.send_buf[l:]
def close (self):
""" Close this socket """
if self.closed: return
self.closed = True
_call_safe(self._handle_close)
def shutdown (self, send = True, recv = True):
"""
Shut down socket
"""
self._shutdown_send |= send
#TODO: recv
def __repr__ (self):
return "<" + self.__class__.__name__ + ">"
class RecocoIOWorker (IOWorker):
"""
An IOWorker that works with our RecocoIOLoop.
"""
# Set by register
on_close = None
pinger = None
def __init__ (self, socket):
"""
pinger is a pinger that will wake the RecocoIOLoop
on_close is a factory that hides details of Select loop
"""
IOWorker.__init__(self)
self.socket = socket
def fileno (self):
""" Return the wrapped sockets' fileno """
return self.socket.fileno()
def send_fast (self, data):
"""
send data from the client side. fire and forget.
Must only be called from the same cooperative context as the
IOWorker.
"""
if len(self.send_buf)==0 and not self._connecting and not self.closed:
try:
l = self.socket.send(data, socket.MSG_DONTWAIT)
if l == len(self.send_buf):
return
data = data[l]
except socket.error as (s_errno, strerror):
if s_errno != errno.EAGAIN:
log.error("Socket error: " + strerror)
self.close()
return
IOWorker.send(self, data)
self.pinger.ping()
def send (self, data):
IOWorker.send(self, data)
self.pinger.ping()
def close (self):
""" Register this socket to be closed. fire and forget """
# (don't close until Select loop is ready)
if self.closed: return
IOWorker.close(self)
# on_close is a function not a method
try:
self.socket.shutdown(socket.SHUT_RD)
except Exception:
pass
self.on_close(self)
if not hasattr(socket, "MSG_DONTWAIT"):
# Don't have this feature.
RecocoIOWorker.send_fast = RecocoIOWorker.send
log.debug("RecocoIOWorker.send_fast() not available")
else:
pass
def _format_lists (rlist, wlist, elist):
everything = set()
everything.update(rlist)
everything.update(wlist)
everything.update(elist)
if len(everything) == 0: return "None"
everything = list(everything)
everything.sort()
msg = ""
for fd in everything:
msg += str(fd).strip("<>").replace(" ", "-") + "|"
if fd in rlist: msg += "R"
if fd in wlist: msg += "W"
if fd in elist: msg += "X"
msg += " "
msg = msg.strip()
return msg
class RecocoIOLoop (Task):
"""
recoco task that handles the actual IO for our IO workers
"""
_select_timeout = 5
_BUF_SIZE = 8192
more_debugging = False
def __init__ (self, worker_type = RecocoIOWorker):
Task.__init__(self)
self._worker_type = worker_type
self._workers = set()
self.pinger = makePinger()
# socket.open() and socket.close() are performed by this Select task
# other threads register open() and close() requests by adding lambdas
# to this thread-safe queue.
self._pending_commands = deque()
def new_worker (self, *args, **kw):
'''
Return an IOWorker wrapping the given socket.
You can create a specific worker type by specifying
_worker_type.
'''
# Called from external threads.
# Does not register the IOWorker immediately with the select loop --
# rather, adds a command to the pending queue
_worker_type = kw.pop("_worker_type", None)
if _worker_type is None:
_worker_type = self._worker_type
assert issubclass(_worker_type, RecocoIOWorker)
worker = _worker_type(*args, **kw)
self.register_worker(worker)
return worker
def register_worker (self, worker):
"""
Register a worker with this ioloop
"""
# Our callback for io_worker.close():
def on_close (worker):
def close_worker (worker):
# Actually close the worker (called by Select loop)
worker.socket.close()
self._workers.discard(worker)
# schedule close_worker to be called by Select loop
self._pending_commands.append(lambda: close_worker(worker))
self.pinger.ping()
worker.on_close = on_close
worker.pinger = self.pinger
# Don't add immediately, since we may be in the wrong thread
self._pending_commands.append(lambda: self._workers.add(worker))
self.pinger.ping()
def stop (self):
self.running = False
self.pinger.ping()
def run (self):
self.running = True
while self.running and core.running:
try:
# First, execute pending commands
while len(self._pending_commands) > 0:
self._pending_commands.popleft()()
# Now grab workers
read_sockets = list(self._workers) + [ self.pinger ]
write_sockets = [ worker for worker in self._workers
if worker._ready_to_send ]
exception_sockets = list(self._workers)
if self.more_debugging:
log.debug("Select In : " + _format_lists(read_sockets,
write_sockets, exception_sockets))
rlist, wlist, elist = yield Select(read_sockets, write_sockets,
exception_sockets, self._select_timeout)
if self.more_debugging:
log.debug("Select Out: " + _format_lists(rlist, wlist, elist))
if self.pinger in rlist:
self.pinger.pongAll()
rlist.remove(self.pinger)
for worker in elist:
worker._do_exception(self)
if worker in rlist:
rlist.remove(worker)
if worker in wlist:
wlist.remove(worker)
for worker in rlist:
worker._do_recv(self)
for worker in wlist:
worker._do_send(self)
except GeneratorExit:
# Must be shutting down
break
except BaseException as e:
log.exception(e)
break
|
damomeen/pox-datapath
|
pox/lib/ioworker/__init__.py
|
Python
|
apache-2.0
| 12,928
|