blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f390a25d57a5ddd7b928a96666f049cdc978a3b | 005c16321899c0524f040bfea364636dfb9436dd | /myblog/settings.py | 7eef1adab2702ac4640f1b1c1da8315d70406d19 | [] | no_license | martin-martin/django-blog-api | 885f4581476fb7fcde44658c11a21a56e9be1028 | ce7f23fbfa304b296ebccbf02ffbac2f2e8b71a1 | refs/heads/master | 2023-08-01T06:41:25.342548 | 2021-03-23T09:55:36 | 2021-03-23T09:55:36 | 256,978,046 | 0 | 0 | null | 2021-09-22T18:57:07 | 2020-04-19T11:05:43 | Python | UTF-8 | Python | false | false | 3,164 | py | """
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5fnpzfttu3b6m5gawr(k%wsp7f79f&c2+bl4qliheiss_2a7xf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Django REST Framework
'rest_framework',
# my apps
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"breuss.martin@gmail.com"
] | breuss.martin@gmail.com |
62a80ae08072529984b1a0256c6be46b21908b1e | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/unsignedInt/Schema+Instance/NISTXML-SV-IV-atomic-unsignedInt-maxExclusive-5-1.py | c11af821e8d2a5c9ae1f1b7dfaaca7a056973162 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 295 | py | from output.models.nist_data.atomic.unsigned_int.schema_instance.nistschema_sv_iv_atomic_unsigned_int_max_exclusive_5_xsd.nistschema_sv_iv_atomic_unsigned_int_max_exclusive_5 import NistschemaSvIvAtomicUnsignedIntMaxExclusive5
obj = NistschemaSvIvAtomicUnsignedIntMaxExclusive5(
value=0
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
fe4511bceb8e23425fa960fa5fdac05119f7b611 | 8a141ce2232eb3ed1ec505ceecc7c9b431bf52c6 | /virtual/lib/python3.6/site-packages/bootstrap_py/pypi.py | 7e7833a39073390bc95c9e108bb97ea5bf6213f2 | [
"MIT"
] | permissive | monicaoyugi/Pitch | 9f11531e43070e397cbe9afb7aa36949a9e9a0c5 | 9ac865760884dd4514b60a415ce40ef22aa673ac | refs/heads/master | 2022-12-11T00:51:49.205736 | 2020-02-15T13:01:06 | 2020-02-15T13:01:06 | 238,953,626 | 0 | 1 | MIT | 2022-12-08T03:37:09 | 2020-02-07T15:10:29 | Python | UTF-8 | Python | false | false | 976 | py | # -*- coding: utf-8 -*-
"""bootstrap_py.pypi."""
import requests
import socket
from requests.exceptions import Timeout, HTTPError
from bootstrap_py.exceptions import BackendFailure, Conflict
#: PyPI JSONC API url
PYPI_URL = 'https://pypi.org/pypi/{0}/json'
def package_existent(name):
"""Search package.
* :class:`bootstrap_py.exceptions.Conflict` exception occurs
when user specified name has already existed.
* :class:`bootstrap_py.exceptions.BackendFailure` exception occurs
when PyPI service is down.
:param str name: package name
"""
try:
response = requests.get(PYPI_URL.format(name))
if response.ok:
msg = ('[error] "{0}" is registered already in PyPI.\n'
'\tSpecify another package name.').format(name)
raise Conflict(msg)
except (socket.gaierror,
Timeout,
ConnectionError,
HTTPError) as exc:
raise BackendFailure(exc)
| [
"monicaoyugi@gmail.com"
] | monicaoyugi@gmail.com |
0f167bfe47aff24f922605fa722f2da76518a893 | d13edf81cd374edd927d0cdc5109b0b35fde886a | /python/lnx2/__init__.py | 3353576f19b035057403b03ac984dc077d7e3c84 | [
"MIT"
] | permissive | piotrmaslanka/lnx2 | 6abe49167dcd46ff28db7d46185daf2ffb552c2f | c293201d947b6cff09ae7ba15e1c2a3acd2e4a7f | refs/heads/master | 2016-08-03T09:26:46.826574 | 2014-03-27T22:18:55 | 2014-03-27T22:18:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from lnx2.packet import Packet
from lnx2.exceptions import LNX2Error, PacketMalformedError, \
NothingToRead, NothingToSend
from lnx2.channel import Channel, RTM_NONE, RTM_MANUAL, RTM_AUTO, \
RTM_AUTO_ORDERED
from lnx2.connection import Connection
from lnx2.lnxsocket import ClientSocket, ServerSocket | [
"piotr.maslanka@henrietta.com.pl"
] | piotr.maslanka@henrietta.com.pl |
7a1335a92e08e2bb539af11845648d329f4ce995 | fd3f0fdc6af4d0b0205a70b7706caccab2c46dc0 | /0x0B-python-input_output/10-student.py | ed4a0271932ff2cea23672d9f0aa295adef4d322 | [] | no_license | Maynot2/holbertonschool-higher_level_programming | b41c0454a1d27fe34596fe4aacadf6fc8612cd23 | 230c3df96413cd22771d1c1b4c344961b4886a61 | refs/heads/main | 2023-05-04T05:43:19.457819 | 2021-05-12T14:51:56 | 2021-05-12T14:51:56 | 319,291,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | #!/usr/bin/python3
"""Holds a student class"""
def is_str_list(ary):
"""Checks if all elements of a list are strings"""
if type(ary) != list:
return False
for elm in ary:
if type(elm) != str:
return False
return True
class Student:
"""Models a real life student"""
def __init__(self, first_name, last_name, age):
"""
Initializes a student instance with a given first_name, last_name
and age
"""
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self, attrs=None):
if is_str_list(attrs):
return {k: v for k, v in vars(self).items() if k in attrs}
else:
return vars(self)
| [
"paulmanot@gmail.com"
] | paulmanot@gmail.com |
edcbc595f7c8bf6bfecec9a9027632eb33ee6baa | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5706278382862336_0/Python/Lameiro/a.py | 8a12ee4379870bbc29b5b97cb71c0c8160521f72 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | from pprint import pprint
from itertools import product
import sys
from fractions import Fraction
powers_of_two = {(2**i) for i in xrange(100)}
epsilon = 1e-13
#def is_power_of_two(x):
# for o in powers_of_two:
# if o <= x+epsilon:
# return True
# return False
def read_list_of(numtype):
x = raw_input()
x = x.split('/')
return map(numtype, x)
def calculate(p,q):
f = Fraction(p,q)
if f.denominator not in powers_of_two:
return "impossible"
p = p*1.0
count = 1
r = p/q
r = r*2
while r < 1:
r *= 2
count+=1
return count
def main():
for case_number in xrange(int(raw_input())):
p, q = read_list_of(int)
result = calculate(p, q)
print 'Case #%d: %s' % (case_number+1, result)
main()
# print calculate(5,8)
# print calculate(3,8)
# print calculate(1,2)
# print calculate(1,4)
# print calculate(3,4)
# print calculate(2,23)
# print calculate(123,31488)
#Fraction().
# print calculate(['aabc', 'abbc', 'abcc']) | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
40d3309b87632bb9b677e881c23eded5c2c4b491 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fv/overallhealth1year.py | 3552af84433f805643eed3a4b12e282197234a2a | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 11,372 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class OverallHealth1year(Mo):
"""
A class that represents the most current statistics for overall tenant health in a 1 year sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.fv.OverallHealth1year", "overall tenant health")
counter = CounterMeta("health", CounterCategory.GAUGE, "score", "health score")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "healthLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "healthMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "healthMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "healthAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "healthSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "healthTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "healthThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "healthTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "healthTr"
meta._counters.append(counter)
meta.moClassName = "fvOverallHealth1year"
meta.rnFormat = "CDfvOverallHealth1year"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current overall tenant health stats in 1 year"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.fv.Tenant")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.fv.OverallHealth")
meta.superClasses.add("cobra.model.stats.Item")
meta.rnPrefixes = [
('CDfvOverallHealth1year', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "healthAvg", "healthAvg", 9302, PropCategory.IMPLICIT_AVG)
prop.label = "health score average value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthAvg", prop)
prop = PropMeta("str", "healthLast", "healthLast", 9299, PropCategory.IMPLICIT_LASTREADING)
prop.label = "health score current value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthLast", prop)
prop = PropMeta("str", "healthMax", "healthMax", 9301, PropCategory.IMPLICIT_MAX)
prop.label = "health score maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthMax", prop)
prop = PropMeta("str", "healthMin", "healthMin", 9300, PropCategory.IMPLICIT_MIN)
prop.label = "health score minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthMin", prop)
prop = PropMeta("str", "healthSpct", "healthSpct", 9303, PropCategory.IMPLICIT_SUSPECT)
prop.label = "health score suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("healthSpct", prop)
prop = PropMeta("str", "healthThr", "healthThr", 9305, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "health score thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("healthThr", prop)
prop = PropMeta("str", "healthTr", "healthTr", 9307, PropCategory.IMPLICIT_TREND)
prop.label = "health score trend"
prop.isOper = True
prop.isStats = True
meta.props.add("healthTr", prop)
prop = PropMeta("str", "healthTrBase", "healthTrBase", 9306, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "health score trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("healthTrBase", prop)
prop = PropMeta("str", "healthTtl", "healthTtl", 9304, PropCategory.IMPLICIT_TOTAL)
prop.label = "health score total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("healthTtl", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
bf79fe7c794bce4db6b3e36ed5ae575b77a97dca | 63f917864d85f0f9e810cbb4e6163f48611a8b3d | /third_party/filebrowser/base.py | 0ef473f2b71074f514625206ae64785952c539dc | [] | no_license | davidraywilson/suit_materialized | 37aa521d52f8dd746b55b121262501147dffb95c | 035405defedd5ee8257b42aac82749794080af4f | refs/heads/master | 2021-01-18T14:05:01.797452 | 2015-06-03T02:03:55 | 2015-06-03T02:03:55 | 32,526,877 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,097 | py | # coding: utf-8
# imports
import os, re, datetime
from time import gmtime, strftime
# django imports
from django.conf import settings
# filebrowser imports
from filebrowser.settings import *
from filebrowser.conf import fb_settings
from filebrowser.functions import get_file_type, url_join, is_selectable, get_version_path
from django.utils.encoding import force_unicode
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
class FileObject(object):
"""
The FileObject represents a File on the Server.
PATH has to be relative to MEDIA_ROOT.
"""
def __init__(self, path):
'''
`os.path.split` Split the pathname path into a pair, (head, tail) where tail is the last pathname component and head is everything leading up to that. The tail part will never contain a slash; if path ends in a slash, tail will be empty. If there is no slash in path, head will be empty. If path is empty, both head and tail are empty.
'''
self.path = path
self.url_rel = path.replace("\\","/")
self.head, self.filename = os.path.split(path)
self.filename_lower = self.filename.lower() # important for sorting
self.filetype = get_file_type(self.filename) # strange if file no extension then this folder
def _filesize(self):
"""
Filesize.
"""
path = force_unicode(self.path)
if os.path.isfile(os.path.join(fb_settings.MEDIA_ROOT, path)) or os.path.isdir(os.path.join(fb_settings.MEDIA_ROOT, path)):
return os.path.getsize(os.path.join(fb_settings.MEDIA_ROOT, path))
return ""
filesize = property(_filesize)
def _date(self):
"""
Date.
"""
if os.path.isfile(os.path.join(fb_settings.MEDIA_ROOT, self.path)) or os.path.isdir(os.path.join(fb_settings.MEDIA_ROOT, self.path)):
return os.path.getmtime(os.path.join(fb_settings.MEDIA_ROOT, self.path))
return ""
date = property(_date)
def _datetime(self):
"""
Datetime Object.
"""
return datetime.datetime.fromtimestamp(self.date)
datetime = property(_datetime)
def _extension(self):
"""
Extension.
"""
return u"%s" % os.path.splitext(self.filename)[1]
extension = property(_extension)
def _filetype_checked(self):
if self.filetype == "Folder" and os.path.isdir(self.path_full):
return self.filetype
elif self.filetype != "Folder" and os.path.isfile(self.path_full):
return self.filetype
else:
return ""
filetype_checked = property(_filetype_checked)
def _path_full(self):
"""
Full server PATH including MEDIA_ROOT.
"""
return os.path.join(fb_settings.MEDIA_ROOT, self.path)
path_full = property(_path_full)
def _path_relative(self):
return self.path
path_relative = property(_path_relative)
def _path_relative_directory(self):
"""
Path relative to initial directory.
"""
directory_re = re.compile(r'^(%s)' % (fb_settings.DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
path_relative_directory = property(_path_relative_directory)
def _url_relative(self):
return self.url_rel
url_relative = property(_url_relative)
def _url_full(self):
"""
Full URL including MEDIA_URL.
"""
return force_unicode(url_join(fb_settings.MEDIA_URL, self.url_rel))
url_full = property(_url_full)
def _url_save(self):
"""
URL used for the filebrowsefield.
"""
if SAVE_FULL_URL:
return self.url_full
else:
return self.url_rel
url_save = property(_url_save)
def _url_thumbnail(self):
"""
Thumbnail URL.
"""
if self.filetype == "Image":
return u"%s" % url_join(fb_settings.MEDIA_URL, get_version_path(self.path, ADMIN_THUMBNAIL))
else:
return ""
url_thumbnail = property(_url_thumbnail)
def url_admin(self):
if self.filetype_checked == "Folder":
directory_re = re.compile(r'^(%s)' % (fb_settings.DIRECTORY))
value = directory_re.sub('', self.path)
return u"%s" % value
else:
return u"%s" % url_join(fb_settings.MEDIA_URL, self.path)
def _dimensions(self):
"""
Image Dimensions.
"""
if self.filetype == 'Image':
try:
im = Image.open(os.path.join(fb_settings.MEDIA_ROOT, self.path))
return im.size
except:
pass
else:
return False
dimensions = property(_dimensions)
def _width(self):
"""
Image Width.
"""
return self.dimensions[0]
width = property(_width)
def _height(self):
"""
Image Height.
"""
return self.dimensions[1]
height = property(_height)
def _orientation(self):
"""
Image Orientation.
"""
if self.dimensions:
if self.dimensions[0] >= self.dimensions[1]:
return "Landscape"
else:
return "Portrait"
else:
return None
orientation = property(_orientation)
def _is_empty(self):
"""
True if Folder is empty, False if not.
"""
if os.path.isdir(self.path_full):
if not os.listdir(self.path_full):
return True
else:
return False
else:
return None
is_empty = property(_is_empty)
def __repr__(self):
return force_unicode(self.url_save)
def __str__(self):
return force_unicode(self.url_save)
def __unicode__(self):
return force_unicode(self.url_save)
| [
"davidraywilson@live.com"
] | davidraywilson@live.com |
ad1e27f5c9102e1482dd8f3957d9d2fb4cd8bb45 | ae2caee4759ed54048acb223ee2423ce147c3986 | /bika/lims/exportimport/instruments/fiastar.py | c2b94c6ee14d5f5b9e4087f4ef90115684d7c723 | [] | no_license | cheslip/Bika-LIMS | b4bf620426cf63beb6cfcd40ca06be4eb78230ba | 8d524589a3191dc8d22bd400c16a8c8df8289f8f | refs/heads/master | 2021-01-18T04:05:55.985951 | 2012-01-30T12:31:52 | 2012-01-30T12:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,625 | py | """ Fiastar
"""
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.Archetypes.event import ObjectInitializedEvent
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import changeWorkflowState
from bika.lims import logger
from cStringIO import StringIO
from operator import itemgetter
from plone.i18n.normalizer.interfaces import IIDNormalizer
from zope.component import getUtility
import csv
import json
import plone
import zope
import zope.event
title = 'FIAStar'
class Export(BrowserView):
""" Writes workseet analyses to a CSV file that FIAStar can read.
Sends the CSV file to the response.
Requests "TSO2 & F SO2" for all requests.
uses analysis' PARENT UID as 'Sample name' col.
uses analysis' CONTAINER UID as 'Sample type' col.
(they are not always the same; think of multiple duplicates of the same
analysis.)
"""
def __call__(self, analyses):
tray = 1
now = DateTime().strftime('%Y%m%d-%H%M')
bsc = getToolByName(self.context, 'bika_setup_catalog')
uc = getToolByName(self.context, 'uid_catalog')
instrument = self.context.getInstrument()
norm = getUtility(IIDNormalizer).normalize
filename = '%s-%s.csv'%(self.context.getId(),
norm(instrument.getDataInterface()))
listname = '%s_%s_%s' %(self.context.getId(),
norm(instrument.Title()), now)
options = {'dilute_factor' : 1,
'method': 'F SO2 & T SO2'}
for k,v in instrument.getDataInterfaceOptions():
options[k] = v
# for looking up "cup" number (= slot) of ARs
parent_to_slot = {}
layout = self.context.getLayout()
for x in range(len(layout)):
a_uid = layout[x]['analysis_uid']
p_uid = uc(UID=a_uid)[0].getObject().aq_parent.UID()
layout[x]['parent_uid'] = p_uid
if not p_uid in parent_to_slot.keys():
parent_to_slot[p_uid] = int(layout[x]['position'])
# write rows, one per PARENT
header = [listname, options['method']]
rows = []
rows.append(header)
tmprows = []
ARs_exported = []
for x in range(len(layout)):
# create batch header row
c_uid = layout[x]['container_uid']
p_uid = layout[x]['parent_uid']
if p_uid in ARs_exported:
continue
cup = parent_to_slot[p_uid]
tmprows.append([tray,
cup,
p_uid,
c_uid,
options['dilute_factor'],
""])
ARs_exported.append(p_uid)
tmprows.sort(lambda a,b:cmp(a[1], b[1]))
rows += tmprows
ramdisk = StringIO()
writer = csv.writer(ramdisk, delimiter=';')
assert(writer)
writer.writerows(rows)
result = ramdisk.getvalue()
ramdisk.close()
#stream file to browser
setheader = self.request.RESPONSE.setHeader
setheader('Content-Length',len(result))
setheader('Content-Type', 'text/comma-separated-values')
setheader('Content-Disposition', 'inline; filename=%s' % filename)
self.request.RESPONSE.write(result)
def Import(context,request):
""" Read FIAStar analysis results
"""
template = "fiastar_import.pt"
csvfile = request.form['file']
pc = getToolByName(context, 'portal_catalog')
uc = getToolByName(context, 'uid_catalog')
bsc = getToolByName(context, 'bika_setup_catalog')
wf_tool = getToolByName(context, 'portal_workflow')
updateable_states = ['sample_received', 'assigned', 'not_requested']
now = DateTime().strftime('%Y%m%d-%H%M')
res = {'errors': [],
'log': [],}
options = {'dilute_factor' : 1,
'F SO2' : 'FSO2',
'T SO2' : 'TSO2'}
for k,v in options.items():
if k in request:
options[k] = request.get(k)
else:
options[k] = v
# kw_map to lookup Fiastar parameter -> service keyword and vice versa
kw_map = {}
for param in ['F SO2', 'T SO2']:
service = bsc(getKeyword = options[param])
if not service:
msg = _('import_service_keyword_not_found',
default = 'Service keyword ${keyword} not found',
mapping = {'keyword': options[param], })
res['errors'].append(context.translate(msg))
continue
service = service[0].getObject()
kw_map[param] = service
kw_map[service.getKeyword()] = param
# all errors at this point are fatal ones
if res['errors']:
return json.dumps(res)
rows = []
batch_headers = None
fia1 = False
fia2 = False
# place all valid rows into list of dict by CSV row title
for row in csvfile.readlines():
if not row: continue
row = row.split(';')
# a new batch starts
if row[0] == 'List name':
fia1 = False
fia2 = False
if row[13] == 'Concentration':
fia1 = True
elif row[15] == 'Concentration':
row[13] = 'Peak Mean'
row[14] = 'Peak St dev'
row[16] = 'Concentration Mean'
row[17] = 'Concentration St dev'
fia2 = True
fields = row
continue
row = dict(zip(fields, row))
if row['Parameter'] == 'sample' or not row['Concentration']:
continue
if fia1:
row['Peak Mean'] = 0
row['Peak St dev'] = 0
row['Concentration Mean'] = 0
row['Concentration St dev'] = 0
rows.append(row)
log = []
for row in rows:
param = row['Parameter']
service = kw_map[param]
keyword = service.getKeyword()
calc = service.getCalculation()
interim_fields = calc and calc.getInterimFields() or []
p_uid = row['Sample name']
parent = uc(UID = p_uid)
if len(parent) == 0:
msg = _('import_analysis_parent_not_found',
default = 'Analysis parent UID ${parent_uid} not found',
mapping = {'parent_uid': row['Sample name'], })
res['errors'].append(context.translate(msg))
continue
parent = parent[0].getObject()
c_uid = row['Sample type']
container = uc(UID = c_uid)
if len(container) == 0:
msg = _('import_analysis_container_not_found',
default = 'Analysis container UID ${parent_uid} not found',
mapping = {'container_uid': row['Sample type'], })
res['errors'].append(context.translate(msg))
continue
container = container[0].getObject()
# Duplicates.
if p_uid != c_uid:
dups = [d.getObject() for d in
pc(portal_type='DuplicateAnalysis',
path={'query': "/".join(container.getPhysicalPath()),
'level': 0,})]
# The analyses should exist already
# or no results will be imported.
analysis = None
for dup in dups:
if dup.getAnalysis().aq_parent == p_uid and \
dup.getKeyword() in (options['F SO2'], options['T SO2']):
analysis = dup
if not analysis:
msg = _('import_duplicate_not_found',
default = 'Duplicate analysis for slot ${slot} not found',
mapping = {'slot': row['Cup'], })
res['errors'].append(context.translate(msg))
continue
row['analysis'] = analysis
else:
analyses = parent.objectIds()
if keyword in analyses:
# analysis exists for this parameter.
analysis = parent.get(keyword)
row['analysis'] = analysis
else:
# analysis does not exist;
# create new analysis and set 'results_not_requested' state
parent.invokeFactory(type_name="Analysis", id = keyword)
analysis = parent[keyword]
analysis.edit(Service = service,
InterimFields = interim_fields,
MaxTimeAllowed = service.getMaxTimeAllowed())
changeWorkflowState(analysis,
'not_requested',
comments="FOSS FIAStar")
analysis.unmarkCreationFlag()
zope.event.notify(ObjectInitializedEvent(analysis))
row['analysis'] = analysis
as_state = wf_tool.getInfoFor(analysis, 'review_state', '')
if (as_state not in updateable_states):
msg = _('import_service_not_updateable',
default = 'Analysis ${service} at slot ${slot} in state ${state} - not updated',
mapping = {'service': service.Title(),
'slot': row['Cup'],
'state': as_state,})
res['errors'].append(context.translate(msg))
continue
if analysis.getResult():
msg = _('import_service_has_result',
default = 'Analysis ${service} at slot ${slot} has a result - not updated',
mapping = {'service': service.Title(),
'slot': row['Cup'], })
res['errors'].append(context.translate(msg))
continue
analysis.setInterimFields(
[
{'keyword':'dilution_factor',
'title': 'Dilution Factor',
'value': row['Dilution'],
'unit':''},
{'keyword':'injection',
'title': 'Injection',
'value': row['Injection'],
'unit':''},
{'keyword':'peak',
'title': 'Peak Height/Area',
'value': row['Peak Height/Area'],
'unit':''},
{'keyword':'peak_mean',
'title': 'Peak Mean',
'value': row.get('Peak Mean', '0'),
'unit':''},
{'keyword':'peak_st_dev',
'title': 'Peak St dev',
'value': row.get('Peak St dev', '0'),
'unit':''},
{'keyword':'concentration',
'title': 'Concentration',
'value': row['Concentration'],
'unit':''},
{'keyword':'concentration_mean',
'title': 'Concentration Mean',
'value': row['Concentration Mean'],
'unit':''},
{'keyword':'concentration_st_dev',
'title': 'Concentration St dev',
'value': row['Concentration St dev'],
'unit':''},
{'keyword':'deviation',
'title': 'Deviation',
'value': row['Deviation'],
'unit':''},
]
)
msg = _('import_analysis_ok',
default = 'Analysis ${service} at slot ${slot}: OK',
mapping = {'service': service.Title(),
'slot': row['Cup'], })
res['log'].append(context.translate(msg))
return json.dumps(res)
| [
"campbell@bikalabs.com"
] | campbell@bikalabs.com |
1beb2e528c471684c319966b412c74359454e913 | ea178f0977127189c7559dfa9ca2faadceef5ff8 | /python/jittor/test/test_auto_diff.py | 933a44e9ded740bcf082495e7343a4d606c8c0b3 | [
"Apache-2.0"
] | permissive | AbbasMZ/jittor | a0bb5b2cbceeffb40c61405b863e7e4b91567756 | fcec57f70422b52d6b8d0235e29f91fd2212f559 | refs/heads/master | 2023-06-20T07:07:22.952846 | 2021-07-15T14:40:54 | 2021-07-15T14:40:54 | 386,115,280 | 0 | 0 | Apache-2.0 | 2021-07-15T00:42:22 | 2021-07-15T00:39:53 | null | UTF-8 | Python | false | false | 1,953 | py | # ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <randonlang@gmail.com>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import numpy as np
import os
import sys
import jittor as jt
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
import torchvision.models as tcmodels
from torch import nn
except:
torch = None
skip_this_test = True
@unittest.skipIf(skip_this_test, "skip_this_test")
class TestAutoDiff(unittest.TestCase):
def test_pt_hook(self):
code = '''
import numpy as np
from jittor_utils import auto_diff
import torch
import torchvision.models as tcmodels
net = tcmodels.resnet50()
net.train()
hook = auto_diff.Hook("resnet50")
hook.hook_module(net)
np.random.seed(0)
data = np.random.random((2,3,224,224)).astype('float32')
data = torch.Tensor(data)
net(data)
# assert auto_diff.has_error == 0, auto_diff.has_error
'''
with open("/tmp/test_pt_hook.py", 'w') as f:
f.write(code)
assert os.system(sys.executable+" /tmp/test_pt_hook.py") == 0
assert os.system(sys.executable+" /tmp/test_pt_hook.py") == 0
code = '''
import numpy as np
import jittor as jt
from jittor_utils import auto_diff
from jittor.models import resnet50
net = resnet50()
net.train()
hook = auto_diff.Hook("resnet50")
hook.hook_module(net)
np.random.seed(0)
data = np.random.random((2,3,224,224)).astype('float32')
data = jt.array(data)
net(data)
# assert auto_diff.has_error == 0, auto_diff.has_error
'''
with open("/tmp/test_jt_hook.py", 'w') as f:
f.write(code)
assert os.system(sys.executable+" /tmp/test_jt_hook.py") == 0
if __name__ == "__main__":
unittest.main()
| [
"randonlang@gmail.com"
] | randonlang@gmail.com |
87597d7424f9eebfa25701cb9370b644ec76b843 | a4873925556df4b96c936e1ac12d9efb825db5af | /tac_kbp/detect_events_v2_bilstm_v3_posdep.py | d2257ad1d782fb17e0eaeda3d06b8faf5b79c5b9 | [] | no_license | Heidelberg-NLP/aiphes-hd-tac2016-kbp-event-nuggets | 9a4f9b086fb8f47dd7602bbdb6f974b8af62d790 | 4dc76a8d3b2cf675156efd39d0e7e5d1c8b29702 | refs/heads/master | 2020-07-12T04:46:51.590278 | 2018-09-06T11:33:55 | 2018-09-06T11:33:55 | 204,722,173 | 0 | 0 | null | 2019-08-27T16:32:54 | 2019-08-27T14:32:54 | null | UTF-8 | Python | false | false | 72,543 | py |
"""Event Nugget Detection
Run train and test on the TAC Event Nugget detection task
"""
import codecs
import json
import random
import sys
from datetime import datetime
import logging # logging
# from sklearn import preprocessing
# from sklearn.grid_search import GridSearchCV
# from sklearn.linear_model import LogisticRegression
from tac_kbp.utils.tf_helpers import tf_helpers
from tac_kbp.utils.Common_Utilities import CommonUtilities
import gensim
from gensim import corpora, models, similarities # used for word2vec
from gensim.models.word2vec import Word2Vec # used for word2vec
from gensim.models.doc2vec import Doc2Vec # used for doc2vec
import time # used for performance measuring
import math
from scipy import spatial # used for similarity calculation
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Phrases
from gensim import corpora # for dictionary
from gensim.models import LdaModel
# from sklearn.svm import libsvm
# from sklearn.svm import SVC
# from Word2Vec_AverageVectorsUtilities import AverageVectorsUtilities
import tac_kbp.readers.tac_prep_corpus as tc
from tac_kbp.readers.tac_prep_corpus import *
import time as ti
from tac_kbp.utils.Word2Vec_AverageVectorsUtilities import AverageVectorsUtilities
from tac_kbp.utils.VocabEmbedding_Utilities import VocabEmbeddingUtilities
import pickle
import string
from shutil import copyfile
def count_non_zero_label(test_seq, zero_label=0):
cnt = 0
for item in test_seq:
for lbl in item.y:
if lbl != zero_label:
cnt += 1
# print item.y
break
return cnt
import tensorflow as tf
from tac_kbp.models import EventSequenceLabeler_BiLSTM_v3_posdep
from tac_kbp.utils.BatchHelpers import *
class EventLabelerAndClassifier_v2_BiLSTM_v3_posdep(object):
def __init__(self,
classifier_name,
run_name,
model_dir,
output_dir,
event_labeling,
event_type_classify,
event_realis_classify,
event_coref):
self._classifier_name = classifier_name
self._run_name = run_name
self._model_dir = model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
self._classifier_settings_dir = "%s/%s" %(model_dir, classifier_name)
self._vocab_and_embeddings_file = "%s/vocab_and_embeddings.pickle" % (self._classifier_settings_dir)
self._vocab_and_embeddings_pos_file = "%s/vocab_and_embeddings_pos.pickle" % (self._classifier_settings_dir)
self._vocab_and_embeddings_deps_file = "%s/vocab_and_embeddings_deps.pickle" % (self._classifier_settings_dir)
self._output_dir = output_dir
self._event_labeling = event_labeling
self._event_type_classify = event_type_classify
self._event_realis_classify = event_realis_classify
self._event_coref = event_coref
self._settings = {}
self._settings_file = "%s/settings.pickle" % (self._classifier_settings_dir)
# Checkpoint setup
self._checkpoint_dir = os.path.abspath(os.path.join(self._classifier_settings_dir, "checkpoints"))
self._checkpoint_prefix = os.path.join(self._checkpoint_dir, "model")
self._checkpoint_best = os.path.join(self._checkpoint_dir, "model_best")
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
self._checkpoints_backup_dir = os.path.abspath(
os.path.join(self._classifier_settings_dir, "checkpoints_backup"))
if not os.path.exists(self._checkpoints_backup_dir):
os.makedirs(self._checkpoints_backup_dir)
self.pad_pos = "-PAD-"
self.unknown_pos = "-UNKNWN-"
self.pad_deps = "-PAD-"
self.unknown_deps = "-UNKNWN-"
self.zero_label_deps = "-NOLBLs-"
self.unknown_word = "<UNKNWN>"
self.pad_word = "<PAD>"
pass
def save_settings(self):
pickle.dump(self._settings, open(self._settings_file, 'wb'))
logging.info("settings saved to %s" % self._settings_file)
def load_settings(self):
self._settings = pickle.load(open(self._settings_file, 'rb'))
def train(self,
train_files,
dev_files,
embeddings_model,
embeddings_type,
embeddings_vec_size,
# pos_embeddings,
pos_embeddings_size,
# pos_embeddings_number,
# deps_embeddings,
deps_embeddings_size,
# deps_embeddings_number,
eval_dev,
max_nr_sent,
embeddings_trainable,
learning_rate,
# # learning_rate_trainable = False
train_epochs_cnt,
hidden_size,
batch_size,
include_pos_layers,
include_deps_layers,
include_token_layers,
learning_rate_fixed=True
):
embeddings_vocab_set = set([])
if embeddings_type == "w2v":
embeddings_vocab_set = set(embeddings_model.index2word)
# Load data
max_sent_len = 1000
update_vocab = False
update_tags = False
unknown_tag = u'O'
mapping_file = None
data_x_fieldname = "tokens"
data_y_fieldname = "labels_event"
tag_start_index = 1
# Retrieve deps vocabulary
logging.info("Retrieve deps vocabulary..")
st = ti.time()
deps_usage_stat = TacPrepJsonCorpus.deps_counts_from_jsonfiles(
json_files=train_files + dev_files,
data_fieldname="deps_basic",
max_nr_sent=max_nr_sent
)
deps_vocab = [xx[0] for xx in deps_usage_stat]
deps_vocab.sort()
pad_deps = self.pad_deps
unknown_deps = self.unknown_deps
zero_label_deps = self.zero_label_deps
deps_vocab.insert(0, zero_label_deps)
deps_vocab.insert(0, unknown_deps)
deps_vocab.insert(0, pad_deps)
logging.info("deps_vocab:%s" % deps_vocab)
logging.info("Done in %s s" % (ti.time() - st))
# Init random POS embeddings
vocab_and_embeddings_deps = {}
np.random.seed(111)
random_embeddings = np.random.uniform(-0.1, 0.1, (len(deps_vocab), deps_embeddings_size))
deps_vocab_dict_emb = dict([(k, i) for i, k in enumerate(deps_vocab)])
random_embeddings[deps_vocab_dict_emb[pad_deps]] = np.zeros((deps_embeddings_size))
vocab_and_embeddings_deps["embeddings"] = random_embeddings
vocab_and_embeddings_deps["vocabulary"] = deps_vocab_dict_emb
# save vocab and embeddings
pickle.dump(vocab_and_embeddings_deps, open(self._vocab_and_embeddings_deps_file, 'wb'))
logging.info('DEPS: Vocab and deps embeddings saved to: %s' % self._vocab_and_embeddings_deps_file)
# Retrieve pos vocabulary
logging.info("Retrieve pos vocabulary..")
st = ti.time()
pos_usage_stat = TacPrepJsonCorpus.word_counts_from_jsonfiles(
json_files=train_files + dev_files,
data_fieldname="pos",
max_nr_sent=max_nr_sent
)
pos_vocab = [xx[0] for xx in pos_usage_stat]
pos_vocab.sort()
pad_pos = self.pad_pos
unknown_pos = self.unknown_pos
pos_vocab.insert(0, pad_pos)
pos_vocab.insert(0, unknown_pos)
logging.info("pos_vocab:%s" % pos_vocab)
logging.info("Done in %s s" % (ti.time() - st))
# Init random POS embeddings
vocab_and_embeddings_pos = {}
np.random.seed(111)
random_embeddings = np.random.uniform(-0.1, 0.1, (len(pos_vocab), pos_embeddings_size))
pos_vocab_dict_emb = dict([(k, i) for i, k in enumerate(pos_vocab)])
random_embeddings[pos_vocab_dict_emb[pad_pos]] = np.zeros((pos_embeddings_size))
vocab_and_embeddings_pos["embeddings"] = random_embeddings
vocab_and_embeddings_pos["vocabulary"] = pos_vocab_dict_emb
# save vocab and embeddings
pickle.dump(vocab_and_embeddings_pos, open(self._vocab_and_embeddings_pos_file, 'wb'))
logging.info('PoS: Vocab and pos embeddings saved to: %s' % self._vocab_and_embeddings_pos_file)
# Retrieve words vocabulary
logging.info("Retrieve words vocabulary..")
st = ti.time()
word_usage_stat = TacPrepJsonCorpus.word_counts_from_jsonfiles(
json_files=train_files,
data_fieldname=data_x_fieldname,
max_nr_sent=max_nr_sent
)
logging.info("Done in %s s" % (ti.time()-st))
min_word_freq = 3
logging.info("min_word_freq:%s" % min_word_freq)
word_usage_stat = [xx for xx in word_usage_stat if xx[1] >= min_word_freq]
# clear the vocab
vocab = [xx[0] for xx in word_usage_stat]
add_lowercased = True
if add_lowercased:
vocab_lowercase = set([xx.lower() for xx in vocab])
vocab_lowercase = list(vocab_lowercase - set(vocab))
vocab.extend(vocab_lowercase)
# Add pad and unknown tokens
unknown_word = self.unknown_word
pad_word = self.pad_word
vocab.insert(0, pad_word)
vocab.insert(0, unknown_word)
vocab_dict = LabelDictionary(vocab, start_index=0)
logging.info("Get average model vector for unknown_vec..")
st = ti.time()
vocab_and_embeddings = {}
if embeddings_type == "w2v":
unknown_vec = AverageVectorsUtilities.makeFeatureVec(words = list(embeddings_vocab_set),
model = embeddings_model,
num_features = embeddings_vec_size,
index2word_set = embeddings_vocab_set)
pad_vec = unknown_vec * 0.25
logging.info("Done in %s s" % (ti.time() - st))
logging.info("Loading embeddings for vocab..")
st = ti.time()
vocab_and_embeddings = VocabEmbeddingUtilities\
.get_embeddings_for_vocab_from_model(vocabulary=vocab_dict,
embeddings_type='w2v',
embeddings_model=embeddings_model,
embeddings_size=embeddings_vec_size)
vocab_and_embeddings["embeddings"][vocab_and_embeddings["vocabulary"][unknown_word], :] = unknown_vec
vocab_and_embeddings["embeddings"][vocab_and_embeddings["vocabulary"][pad_word], :] = pad_vec
logging.info("Done in %s s" % (ti.time() - st))
elif embeddings_type == "rand":
np.random.seed(123)
random_embeddings = np.random.uniform(-0.1, 0.1, (len(vocab), embeddings_vec_size))
vocab_dict_emb = dict([(k, i) for i, k in enumerate(vocab)])
vocab_and_embeddings["embeddings"] = random_embeddings
vocab_and_embeddings["vocabulary"] = vocab_dict_emb
else:
raise Exception("embeddings_type=%s is not supported!" % embeddings_type)
# save vocab and embeddings
pickle.dump(vocab_and_embeddings, open(self._vocab_and_embeddings_file, 'wb'))
logging.info('Vocab and embeddings saved to: %s' % self._vocab_and_embeddings_file)
# Load the data for labeling
corpus_vocab_input = LabelDictionary()
corpus_vocab_input.set_dict(vocab_and_embeddings["vocabulary"])
labels_lst = [u'O', u'B-EVENT', u'I-EVENT']
classes_dict = {1: u'O', 2: u'B-EVENT', 3: u'I-EVENT'}
self._settings["labels_lst"] = labels_lst
self._settings["classes_dict"] = classes_dict
tac_corpus = TacPrepJsonCorpus([], labels_lst,
tag_start_index=1, # we keep the 0 for padding symbol for Tensorflow dynamic stuff
vocab_start_index=0)
tac_corpus.set_word_dict(corpus_vocab_input)
# Load train data
logging.info("Loading train data from %s..." % train_files)
st = ti.time()
train_seq, train_seq_meta = tac_corpus.read_sequence_list_tac_json(train_files,
max_sent_len=max_sent_len,
max_nr_sent=max_nr_sent,
update_vocab=update_vocab,
update_tags=update_tags,
unknown_word=unknown_word,
unknown_tag=unknown_tag,
mapping_file=mapping_file,
data_x_fieldname=data_x_fieldname,
data_y_fieldname=data_y_fieldname)
train_pos = Tac2016_EventNuggets_DataUtilities.get_data_idx_for_field(
data_meta=train_seq_meta,
field_name="pos",
field_vocab_dict=pos_vocab_dict_emb,
unknown_word=unknown_pos)
logging.info("train_pos[0]:%s" % train_pos[0])
train_deps_left, train_deps_right = Tac2016_EventNuggets_DataUtilities.get_left_right_data_idx_for_deps(
data_meta=train_seq_meta,
field_name="deps_basic",
field_vocab_dict=deps_vocab_dict_emb,
unknown_lbl=unknown_deps,
zero_deps_lbl=zero_label_deps,
field_sent_tokens="tokens")
logging.info("train_deps_left[0]:%s" % train_deps_left[0])
logging.info("train_deps_right[0]:%s" % train_deps_right[0])
logging.info("Done in %s s" % (ti.time() - st))
logging.info("All sents:%s" % len(train_seq))
logging.info("With non zero labels:%s" % count_non_zero_label(train_seq, zero_label=tag_start_index))
# exit()
# Load dev data
logging.info("Loading dev data from %s..." % dev_files)
st = ti.time()
dev_seq = None
dev_seq_meta = None
if eval_dev and len(dev_files) > 0:
dev_seq, dev_seq_meta = tac_corpus.read_sequence_list_tac_json(dev_files,
max_sent_len=max_sent_len,
max_nr_sent=max_nr_sent,
update_vocab=update_vocab,
update_tags=update_tags,
unknown_word=unknown_word,
unknown_tag=unknown_tag,
mapping_file=mapping_file,
data_x_fieldname=data_x_fieldname,
data_y_fieldname=data_y_fieldname)
dev_pos = Tac2016_EventNuggets_DataUtilities.get_data_idx_for_field(
data_meta=dev_seq_meta,
field_name="pos",
field_vocab_dict=pos_vocab_dict_emb,
unknown_word=unknown_pos)
logging.info("dev_pos[0]:%s" % dev_pos[0])
dev_deps_left, dev_deps_right = Tac2016_EventNuggets_DataUtilities.get_left_right_data_idx_for_deps(
data_meta=dev_seq_meta,
field_name="deps_basic",
field_vocab_dict=deps_vocab_dict_emb,
unknown_lbl=unknown_deps,
zero_deps_lbl=zero_label_deps,
field_sent_tokens="tokens")
logging.info("dev_deps_left[0]:%s" % dev_deps_left[0])
logging.info("dev_deps_right[0]:%s" % dev_deps_right[0])
logging.info("Done in %s s" % (ti.time() - st))
logging.info("All sents:%s" % len(dev_seq))
logging.info("With non zero labels:%s" % count_non_zero_label(dev_seq, zero_label=tag_start_index))
else:
logging.info("No dev evaluation.")
# TO DO: Train the model!
logging.info("Train the model..")
# Train the sequence event labeler
embeddings = vocab_and_embeddings["embeddings"]
embeddings_size = vocab_and_embeddings["embeddings"].shape[1]
embeddings_number = vocab_and_embeddings["embeddings"].shape[0]
pos_embeddings = vocab_and_embeddings_pos["embeddings"]
pos_embeddings_size = vocab_and_embeddings_pos["embeddings"].shape[1]
pos_embeddings_number = vocab_and_embeddings_pos["embeddings"].shape[0]
deps_embeddings = vocab_and_embeddings_deps["embeddings"]
deps_embeddings_size = vocab_and_embeddings_deps["embeddings"].shape[1]
deps_embeddings_number = vocab_and_embeddings_deps["embeddings"].shape[0]
# embeddings_trainable = False
# learning_rate = 0.1
learning_rate_trainable = False
# train_epochs_cnt = 1
self._settings["checkpoint_best"] = self._checkpoint_best
self.save_settings()
self.train_sequencelabeler_and_save_model(train_seq=train_seq,
train_seq_meta=train_seq_meta,
train_pos=train_pos,
train_deps_left=train_deps_left,
train_deps_right=train_deps_right,
dev_seq=dev_seq,
dev_seq_meta=dev_seq_meta,
dev_pos=dev_pos,
dev_deps_left=dev_deps_left,
dev_deps_right=dev_deps_right,
eval_dev=eval_dev,
n_classes=len(labels_lst),
classes_dict=classes_dict,
embeddings=embeddings,
embeddings_size=embeddings_size,
embeddings_number=embeddings_number,
embeddings_trainable=embeddings_trainable,
pos_embeddings=pos_embeddings,
pos_embeddings_size=pos_embeddings_size,
pos_embeddings_number=pos_embeddings_number,
deps_embeddings=deps_embeddings,
deps_embeddings_size=deps_embeddings_size,
deps_embeddings_number=deps_embeddings_number,
hidden_size=hidden_size,
learning_rate=learning_rate,
learning_rate_trainable=learning_rate_trainable,
train_epochs_cnt=train_epochs_cnt,
batch_size=batch_size,
include_pos_layers=include_pos_layers,
include_deps_layers=include_deps_layers,
include_token_layers=include_token_layers,
learning_rate_fixed=learning_rate_fixed
)
def train_sequencelabeler_and_save_model(self,
train_seq,
train_seq_meta,
train_pos,
train_deps_left,
train_deps_right,
dev_seq,
dev_seq_meta,
dev_pos,
dev_deps_left,
dev_deps_right,
eval_dev,
n_classes,
classes_dict,
embeddings,
embeddings_size,
embeddings_number,
embeddings_trainable,
pos_embeddings,
pos_embeddings_size,
pos_embeddings_number,
deps_embeddings,
deps_embeddings_size,
deps_embeddings_number,
hidden_size,
learning_rate,
learning_rate_trainable,
train_epochs_cnt,
batch_size,
include_pos_layers,
include_deps_layers,
include_token_layers,
learning_rate_fixed
):
allow_soft_placement = True
log_device_placement = True
# train settings
pad_value = 0
# batch_size = 50
epochs_cnt = train_epochs_cnt # set to 1 for debug purposes.
checkpoint_every = 5
eval_every = 1
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
seq_model = EventSequenceLabeler_BiLSTM_v3_posdep(
n_classes=n_classes,
embeddings=embeddings,
embeddings_size=embeddings_size,
embeddings_number=embeddings_number,
pos_embeddings=pos_embeddings,
pos_embeddings_size=pos_embeddings_size,
pos_embeddings_number=pos_embeddings_number,
deps_embeddings=deps_embeddings,
deps_embeddings_size=deps_embeddings_size,
deps_embeddings_number=deps_embeddings_number,
hidden_size=hidden_size,
learning_rate=learning_rate,
learning_rate_trainable=learning_rate_trainable,
embeddings_trainable=embeddings_trainable,
include_pos_layers=include_pos_layers,
include_deps_layers=include_deps_layers,
include_token_layers=include_token_layers
)
# We can train also the learning rate
#learn_rate = tf.Variable(learning_rate, trainable=learning_rate_trainable)
learn_rate = tf.placeholder(tf.float32, shape=[], name="learn_rate")
global_step = tf.Variable(0, name="global_step", trainable=False)
# Compute and apply gradients
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate)
gvs = optimizer.compute_gradients(seq_model.losses)
logging.info("gradients:")
for grad, var in gvs:
logging.info("%s - %s" % (grad, var))
capped_gvs = [(tf.clip_by_value(tf_helpers.tf_nan_to_zeros_float64(grad), -1., 1.) if grad is not None else grad, var) for grad, var in
gvs] # cap to prevent NaNs
# capped_gvs = [(tf.clip_by_value(tf_helpers.tf_nan_to_zeros_float64(grad), -1., 1.), var) for grad, var in
# gvs] # cap to prevent NaNs
apply_grads_op = optimizer.apply_gradients(capped_gvs, global_step=global_step)
graph_ops = {}
graph_ops["apply_grads_op"] = apply_grads_op
# graph_ops["learn_rate"] = learn_rate
with tf.name_scope("accuracy"):
# Calculate the accuracy
# Used during training
# Mask the losses - padded values are zeros
mask = tf.sign(tf.cast(seq_model.input_y_flat, dtype=tf.float64))
logging.info("mask:%s" % mask)
masked_losses = mask * seq_model.losses
# Bring back to [batch, class_num] shape
masked_losses = tf.reshape(masked_losses, tf.shape(seq_model.input_y))
input_seq_len_float = tf.cast(seq_model.input_seq_len, dtype=tf.float64)
# Calculate mean loss - depending on the dynamic number of elements
mean_loss_by_example = tf.reduce_sum(masked_losses, reduction_indices=1) / input_seq_len_float
mean_loss = tf.reduce_mean(mean_loss_by_example)
graph_ops["mean_loss"] = mean_loss
# # Evaluate model
preds_flat = tf.argmax(seq_model.probs_flat, 1)
preds_non_paddings = tf.gather(preds_flat, tf.where(tf.greater(seq_model.input_y_flat, [0])))
input_y_non_paddings = tf.gather(seq_model.input_y_flat, tf.where(tf.greater(seq_model.input_y_flat, [0])))
preds_y = tf.reshape(preds_flat, tf.shape(seq_model.input_y), name="preds_y")
graph_ops["preds_y"] = preds_y
correct_pred = tf.equal(preds_non_paddings, input_y_non_paddings)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float64))
graph_ops["accuracy"] = accuracy
def train_step(sess, seq_model, curr_learn_rate, x_batch, y_batch, x_batch_seq_len,
x_batch_pos, x_batch_deps, x_batch_deps_len, x_batch_deps_mask):
feed_dict = {
seq_model.input_x: x_batch, # batch_data_padded_x,
seq_model.input_y: y_batch, # batch_data_padded_y,
seq_model.input_seq_len: x_batch_seq_len, # batch_data_seqlens
seq_model.input_x_pos: x_batch_pos,
seq_model.input_x_deps: x_batch_deps,
seq_model.input_x_deps_len: x_batch_deps_len,
seq_model.input_x_deps_mask: x_batch_deps_mask,
learn_rate: curr_learn_rate
}
_, \
step, \
res_cost, \
res_acc\
= sess.run([
# graph_ops["learn_rate"],
graph_ops["apply_grads_op"],
global_step,
graph_ops["mean_loss"],
graph_ops["accuracy"]
],
feed_dict=feed_dict)
return res_cost, res_acc # , res_learn_rate
def dev_step(sess, seq_model, x_batch, y_batch, x_batch_seq_len,
x_batch_pos, x_batch_deps, x_batch_deps_len, x_batch_deps_mask):
feed_dict = {
seq_model.input_x: x_batch, # batch_data_padded_x,
seq_model.input_y: y_batch, # batch_data_padded_y,
seq_model.input_seq_len: x_batch_seq_len, # batch_data_seqlens
seq_model.input_x_pos: x_batch_pos,
seq_model.input_x_deps: x_batch_deps,
seq_model.input_x_deps_len: x_batch_deps_len,
seq_model.input_x_deps_mask: x_batch_deps_mask
}
step, \
res_cost, \
res_acc, \
res_output_y\
= sess.run([
global_step,
# apply_grads_op, - commented so does not apply gradients
graph_ops["mean_loss"],
graph_ops["accuracy"],
graph_ops["preds_y"]
],
feed_dict=feed_dict)
# print " Dev cost %2.2f | Dev batch acc %2.2f %% in %s\n" % (res_cost, res_acc, ti.time()-start),
return res_output_y, res_cost, res_acc
# Checkpoint setup
checkpoint_dir = self._checkpoint_dir
checkpoint_prefix = self._checkpoint_prefix
checkpoint_best = self._checkpoint_best
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Training
saver = tf.train.Saver(tf.all_variables(), max_to_keep=10, keep_checkpoint_every_n_hours=1.0)
init_vars = tf.initialize_all_variables()
sess.run(init_vars)
train_batches_cnt = (len(train_seq) / batch_size) + (0 if len(train_seq) % batch_size == 0 else 1)
batch_size_dev = 200
dev_batches_cnt = (len(dev_seq) / batch_size_dev) + (0 if len(dev_seq) % batch_size_dev == 0 else 1)
logging.info("Train batches:%s" % train_batches_cnt)
# train with batches
dev_predictions_history = []
dev_confusion_matrix_history = []
dev_gold = []
best_res_by_lbl = {"B-EVENT": {"f_score": 0.00}}
current_step = 0
logging.info("Start training in %s epochs" % epochs_cnt)
batch_cache_train = []
batch_cache_dev = []
learning_rate_current = learning_rate
for epoch in range(0, epochs_cnt):
logging.info("Epoch %d:" % (epoch + 1))
start_epoch = ti.time()
if not learning_rate_fixed and (epoch+1 > 4):
learning_rate_current = learning_rate_current/1.2 # https://arxiv.org/abs/1409.2329 - Recurrent Neural Network Regularization
logging.info("Curr learning rate:%s" % learning_rate_current)
for i in range(0, train_batches_cnt):
# print"Batch %s =================" % (i+1)
# Get the batch
if len(batch_cache_train)>i:
curr_batch_cache = batch_cache_train[i]
batch_data_padded_x = curr_batch_cache["batch_data_padded_x"]
batch_data_padded_y = curr_batch_cache["batch_data_padded_y"]
batch_data_seqlens = curr_batch_cache["batch_data_seqlens"]
batch_data_pos = curr_batch_cache["batch_data_pos"]
batch_data_deps_left = curr_batch_cache["batch_data_deps_left"]
batch_data_deps_left_seqlens = curr_batch_cache["batch_data_deps_left_seqlens"]
batch_data_deps_left_masks = curr_batch_cache["batch_data_deps_left_masks"]
# logging.info("Loaded batch %s from cache" % i)
# print batch_data_padded_x
else:
all_items_cnt = len(train_seq)
batch_from = i * batch_size
batch_to = i * batch_size + min(batch_size, all_items_cnt - (i * batch_size))
batch_data = train_seq[batch_from: batch_to]
batch_data_padded_x, batch_data_padded_y, batch_data_seqlens = prepare_batch_data(data=batch_data)
max_batch_sent_len = max(batch_data_seqlens)
batch_data_pos = train_pos[batch_from: batch_to]
batch_data_pos = [pad(xx, pad_value=0, to_size=max_batch_sent_len) for xx in batch_data_pos]
batch_data_deps_left = train_deps_left[batch_from: batch_to]
batch_data_deps_left, batch_data_deps_left_seqlens, batch_data_deps_left_masks = pad_and_get_mask(
batch_data_deps_left, pad_value=0)
batch_data_deps_left_seqlens = [pad(xx, pad_value=0, to_size=max_batch_sent_len) for xx in batch_data_deps_left_seqlens]
# print "batch_data_pos:%s" % batch_data_pos
# print "batch_data_deps_left:%s" % batch_data_deps_left
# print "batch_data_deps_left_seqlens:%s" % batch_data_deps_left_seqlens
# print "batch_data_deps_left_masks:%s" % batch_data_deps_left_masks
batch_data_pos = np.asarray(batch_data_pos)
batch_data_deps_left = np.asarray(batch_data_deps_left)
batch_data_deps_left_seqlens = np.asarray(batch_data_deps_left_seqlens)
batch_data_deps_left_masks = np.asarray(batch_data_deps_left_masks)
# print "batch_data_deps_left_seqlens.shape:" + str(batch_data_deps_left_seqlens.shape)
# print "batch_data_pos.shape:" + str(batch_data_pos.shape)
# print "batch_data_deps_left.shape:" + str(batch_data_deps_left.shape)
# print "batch_data_deps_left_masks.shape:" + str(batch_data_deps_left_masks.shape)
curr_batch_cache = {"batch_data_padded_x":batch_data_padded_x,
"batch_data_padded_y":batch_data_padded_y,
"batch_data_seqlens":batch_data_seqlens,
"batch_data_pos":batch_data_pos,
"batch_data_deps_left":batch_data_deps_left,
"batch_data_deps_left_seqlens":batch_data_deps_left_seqlens,
"batch_data_deps_left_masks":batch_data_deps_left_masks}
batch_cache_train.append(curr_batch_cache)
logging.info("Saved batch %s to cache" % (i))
# logging.info("Batch %s: " % (i))
# logging.info("batch_data_padded_x.shape:%s " % (str(np.asarray(batch_data_padded_x).shape)))
# logging.info("batch_data_padded_y.shape:%s " % (str(np.asarray(batch_data_padded_y).shape)))
# logging.info("batch_data_deps_left_seqlens.shape:%s " % (str(np.asarray(batch_data_deps_left_seqlens).shape)))
# logging.info("batch_data_pos.shape:%s " % (str(np.asarray(batch_data_pos).shape)))
# logging.info("batch_data_deps_left.shape:%s " % (str(np.asarray(batch_data_deps_left).shape)))
# logging.info("batch_data_deps_left_masks.shape:%s " % (str(np.asarray(batch_data_deps_left_masks).shape)))
# Do the train step
start = ti.time()
# seq_model.input_x_deps
cost, acc = train_step(sess, seq_model,
curr_learn_rate=learning_rate_current,
x_batch=batch_data_padded_x,
y_batch=batch_data_padded_y,
x_batch_seq_len=batch_data_seqlens,
x_batch_pos=batch_data_pos,
x_batch_deps=batch_data_deps_left,
x_batch_deps_len=batch_data_deps_left_seqlens,
x_batch_deps_mask=batch_data_deps_left_masks
)
current_step = tf.train.global_step(sess, global_step)
# print " Train cost %2.2f | Train batch acc %2.2f %% in %s\n" % (cost, acc, ti.time()-start)
# logging.info("learning_rate:%s"%lrate)
logging.info(" train epoch time %s " % (ti.time() - start_epoch))
if eval_dev and epoch > 0 and (epoch+1) % eval_every == 0:
# Dev eval - once per epoch
logging.info("Dev set:")
input_y_all = []
pred_y_all = []
for i in range(0, dev_batches_cnt):
all_items_cnt = len(train_seq)
batch_from = i * batch_size_dev
batch_to = i * batch_size_dev + min(batch_size_dev, all_items_cnt - (i * batch_size_dev))
if len(batch_cache_dev) > i:
curr_batch_cache = batch_cache_dev[i]
batch_data_padded_x = curr_batch_cache["batch_data_padded_x"]
batch_data_padded_y = curr_batch_cache["batch_data_padded_y"]
batch_data_seqlens = curr_batch_cache["batch_data_seqlens"]
batch_data_pos = curr_batch_cache["batch_data_pos"]
batch_data_deps_left = curr_batch_cache["batch_data_deps_left"]
batch_data_deps_left_seqlens = curr_batch_cache["batch_data_deps_left_seqlens"]
batch_data_deps_left_masks = curr_batch_cache["batch_data_deps_left_masks"]
# logging.info("Loaded batch %s from cache" % i)
# print batch_data_padded_x
else:
batch_data = dev_seq[batch_from: batch_to]
batch_data_padded_x, batch_data_padded_y, batch_data_seqlens = prepare_batch_data(data=batch_data)
max_batch_sent_len = max(batch_data_seqlens)
batch_data_pos = dev_pos[batch_from: batch_to]
batch_data_pos = [pad(xx, pad_value=0, to_size=max_batch_sent_len) for xx in batch_data_pos]
batch_data_deps_left = dev_deps_left[batch_from: batch_to]
batch_data_deps_left, batch_data_deps_left_seqlens, batch_data_deps_left_masks = pad_and_get_mask(
batch_data_deps_left, pad_value=0)
batch_data_deps_left_seqlens = [pad(xx, pad_value=0, to_size=max_batch_sent_len) for xx in
batch_data_deps_left_seqlens]
batch_data_pos = np.asarray(batch_data_pos)
batch_data_deps_left = np.asarray(batch_data_deps_left)
batch_data_deps_left_seqlens = np.asarray(batch_data_deps_left_seqlens)
batch_data_deps_left_masks = np.asarray(batch_data_deps_left_masks)
curr_batch_cache = {"batch_data_padded_x": batch_data_padded_x,
"batch_data_padded_y": batch_data_padded_y,
"batch_data_seqlens": batch_data_seqlens,
"batch_data_pos": batch_data_pos,
"batch_data_deps_left": batch_data_deps_left,
"batch_data_deps_left_seqlens": batch_data_deps_left_seqlens,
"batch_data_deps_left_masks": batch_data_deps_left_masks}
batch_cache_dev.append(curr_batch_cache)
logging.info("Saved batch %s to cache" % i)
res_pred_y, cost, acc = dev_step(sess, seq_model,
x_batch=batch_data_padded_x,
y_batch=batch_data_padded_y,
x_batch_seq_len=batch_data_seqlens,
x_batch_pos=batch_data_pos,
x_batch_deps=batch_data_deps_left,
x_batch_deps_len=batch_data_deps_left_seqlens,
x_batch_deps_mask=batch_data_deps_left_masks)
# print batch_data_padded_y[10]
# print res_pred_y[10]
for j in range(0, len(batch_data)):
input_y_all.extend(batch_data_padded_y[j][:batch_data_seqlens[j]])
pred_y_all.extend(res_pred_y[j][:batch_data_seqlens[j]])
# eval
logging.info("Confusion matrix:")
conf_matrix = confusion_matrix(input_y_all, pred_y_all, labels=[1, 2, 3])
logging.info("\n"+str(conf_matrix))
logging.info("Results by class:")
# print_acc_from_conf_matrix(conf_matrix, classes_dict)
p_r_f1_acc_by_class_dict = get_prec_rec_fscore_acc_from_conf_matrix(conf_matrix, classes_dict)
logging.info("label: (prec, recall, f-score, accuracy)")
for k,v in p_r_f1_acc_by_class_dict.iteritems():
logging.info("%s:%s" % (k, str(v)))
if len(dev_gold) == 0:
dev_gold = input_y_all[:]
logging.info("Best f-score:%s" % best_res_by_lbl["B-EVENT"]["f_score"])
logging.info("Curr f-score:%s" % p_r_f1_acc_by_class_dict["B-EVENT"][2])
# acc_by_class_dict = get_acc_from_conf_matrix(conf_matrix, classes_dict)
if p_r_f1_acc_by_class_dict["B-EVENT"][2] > best_res_by_lbl["B-EVENT"]["f_score"]:
logging.info("Better result - F-Score!")
best_res_by_lbl["B-EVENT"]["precision"] = p_r_f1_acc_by_class_dict["B-EVENT"][0]
best_res_by_lbl["B-EVENT"]["recall"] = p_r_f1_acc_by_class_dict["B-EVENT"][1]
best_res_by_lbl["B-EVENT"]["f_score"] = p_r_f1_acc_by_class_dict["B-EVENT"][2]
best_res_by_lbl["B-EVENT"]["accuracy"] = p_r_f1_acc_by_class_dict["B-EVENT"][3]
# save
path = saver.save(sess, checkpoint_best)
logging.info("Saved best model checkpoint to {}\n".format(path))
best_res_by_lbl["B-EVENT"]["checkpoint"] = path
best_res_by_lbl["B-EVENT"]["confusion_matrix"] = conf_matrix
try:
logging.info("Trying to backup best_model_file")
curr_file = path
curr_file_meta = path+".meta"
backup_file = path.replace(self._checkpoint_dir, self._checkpoints_backup_dir)
backup_file_meta = backup_file+".meta"
copyfile(curr_file, backup_file)
copyfile(curr_file_meta, backup_file_meta)
logging.info("File backuped to %s" % backup_file)
except Exception as e:
logging.error(e)
dev_predictions_history.append(pred_y_all[:])
dev_confusion_matrix_history.append(conf_matrix)
logging.info(" Dev cost %2.2f | Dev acc %2.2f %% in %s\n" % (cost, acc, ti.time() - start))
if (epoch+1) % checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logging.info("Saved model checkpoint to {}\n".format(path))
# print "Class : P, R, F-Score:"
# print "B-Event:"
# print precision_recall_fscore_support(input_y_all, pred_y_all, average=None, pos_label=2)
# print "I-Event:"
# print precision_recall_fscore_support(input_y_all, pred_y_all, average=None, pos_label=3)
#
# save last checkpoint
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logging.info("Saved model checkpoint to {}\n".format(path))
# calculate accuracies for last 3 epochs
#
# logging.info("Democratic choice across last 3 epochs:")
# pred_hist = np.array(dev_predictions_history[-3:])
# pred_hist.astype(int)
# pred_summary = np.zeros((pred_hist.shape[1], 5))
# for i in range(pred_hist.shape[0]):
# for j in range(pred_hist.shape[1]):
# if pred_hist[i][j] in [1, 2, 3]:
# pred_summary[j][pred_hist[i][j] - 1] += 1.0
#
# pred_summary = pred_summary * [1.001, 1.002, 1.003] # give priority to some classes
#
# summ_preds = np.argmax(pred_summary, axis=1)+1
#
# conf_matrix = confusion_matrix(dev_gold, summ_preds, labels=[1, 2, 3])
# print conf_matrix
# print "Accuracy by class:"
# print_acc_from_conf_matrix(conf_matrix, classes_dict)
def eval(self,
test_files,
max_nr_sent,
batch_size,
output_data_json,
output_submission_file,
):
# Load data settings
max_sent_len = 1000
update_vocab = False
update_tags = False
unknown_tag = u'O'
mapping_file = None
data_x_fieldname = "tokens"
data_y_fieldname = "labels_event"
tag_start_index = 1
unknown_word = "<UNKNWN>"
pad_word = "<PAD>"
# Load vocab and embeddings extracted on training
vocab_and_embeddings = pickle.load(open(self._vocab_and_embeddings_file, 'rb'))
logging.info('Vocab and embeddings loaded from: %s' % self._vocab_and_embeddings_file)
vocab_and_embeddings_pos = pickle.load(open(self._vocab_and_embeddings_pos_file, 'rb'))
logging.info('POS: Vocab and embeddings loaded from: %s' % self._vocab_and_embeddings_pos_file)
vocab_and_embeddings_deps = pickle.load(open(self._vocab_and_embeddings_deps_file, 'rb'))
logging.info('DEPS: Vocab and embeddings loaded from: %s' % self._vocab_and_embeddings_deps_file)
# Load the data for labeling
corpus_vocab_input = LabelDictionary()
corpus_vocab_input.set_dict(vocab_and_embeddings["vocabulary"])
labels_lst = self._settings["labels_lst"]
classes_dict = self._settings["classes_dict"]
tac_corpus = TacPrepJsonCorpus([], labels_lst,
tag_start_index=1,
# we keep the 0 for padding symbol for Tensorflow dynamic stuff
vocab_start_index=0)
tac_corpus.set_word_dict(corpus_vocab_input)
# Load test data
logging.info("Loading test data from %s..." % test_files)
st = ti.time()
test_seq, test_seq_meta = tac_corpus.read_sequence_list_tac_json(test_files,
max_sent_len=max_sent_len,
max_nr_sent=max_nr_sent,
update_vocab=update_vocab,
update_tags=update_tags,
unknown_word=unknown_word,
unknown_tag=unknown_tag,
mapping_file=mapping_file,
data_x_fieldname=data_x_fieldname,
data_y_fieldname=data_y_fieldname)
pos_vocab_dict_emb = vocab_and_embeddings_pos["vocabulary"]
test_pos = Tac2016_EventNuggets_DataUtilities.get_data_idx_for_field(
data_meta=test_seq_meta,
field_name="pos",
field_vocab_dict=pos_vocab_dict_emb,
unknown_word=self.unknown_pos)
logging.info("test_pos[0]:%s" % test_pos[0])
deps_vocab_dict_emb = vocab_and_embeddings_deps["vocabulary"]
test_deps_left, test_deps_right = Tac2016_EventNuggets_DataUtilities.get_left_right_data_idx_for_deps(
data_meta=test_seq_meta,
field_name="deps_basic",
field_vocab_dict=deps_vocab_dict_emb,
unknown_lbl=self.unknown_deps,
zero_deps_lbl=self.zero_label_deps,
field_sent_tokens="tokens")
logging.info("test_deps_left[0]:%s" % test_deps_left[0])
logging.info("test_deps_right[0]:%s" % test_deps_right[0])
logging.info("Done in %s s" % (ti.time() - st))
logging.info("All sents:%s" % len(test_seq))
logging.info("With non zero labels:%s" % count_non_zero_label(test_seq, zero_label=tag_start_index))
logging.info("Done in %s s" % (ti.time() - st))
logging.info("Test data all sents:%s" % len(test_seq))
logging.info("With non zero labels:%s" % count_non_zero_label(test_seq, zero_label=tag_start_index))
checkpoint_file = self._checkpoint_best
test_predictions = self.load_model_and_eval_sequencelabels(test_seq=test_seq,
checkpoint_file=checkpoint_file,
n_classes=len(labels_lst),
classes_dict=classes_dict,
batch_size=batch_size,
test_pos=test_pos,
test_deps_left=test_deps_left,
test_deps_right=test_deps_right
)
print len(test_predictions)
# Fill metadata with predictions.
for i, item in enumerate(test_seq_meta):
default_label = 1
pred_labels_safe = [xx if xx in classes_dict else default_label for xx in test_predictions[i]]
item_str_labels = [classes_dict[xx] for xx in pred_labels_safe]
item["labels_event"] = item_str_labels
# Extract events
event_nuggets_by_docs = Tac2016_EventNuggets_DataUtilities.extract_event_nuggets(test_seq_meta)
# for doc in event_nuggets_by_docs[:3]:
# print doc
# Json file
output_json_file = output_data_json
Tac2016_EventNuggets_DataUtilities.save_data_to_json_file(test_seq_meta, output_json_file)
logging.info("Processed json saved to %s" % output_json_file)
# Writing submission file
system_name="aiphes_hd_t16"
output_file = "%s/%s_submission_output.tbf" % (self._output_dir, self._run_name)
Tac2016_EventNuggets_DataUtilities.save_to_output_tbf(output_file, event_nuggets_by_docs, system_name)
logging.info("Submission file saved to %s" % output_file)
def load_model_and_eval_sequencelabels(self,
test_seq,
test_pos,
test_deps_left,
test_deps_right,
checkpoint_file,
n_classes,
classes_dict,
batch_size):
# settings
pad_value = 0
# batch_size = 100
allow_soft_placement = True
log_device_placement = True
test_batches_cnt = (len(test_seq) / batch_size) + (0 if len(test_seq) % batch_size ==0 else 1)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
logging.info("Graph operations:")
for opp in graph.get_operations():
if opp.name.find("input_") > -1:
logging.info(opp.name)
# get input placeholders
input_x = graph.get_operation_by_name("input_x").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
input_seq_len = graph.get_operation_by_name("input_seq_len").outputs[0]
input_x_pos = graph.get_operation_by_name("input_x_pos").outputs[0]
input_x_deps = graph.get_operation_by_name("input_x_deps").outputs[0]
input_x_deps_len = graph.get_operation_by_name("input_x_deps_len").outputs[0]
input_x_deps_mask = graph.get_operation_by_name("input_x_deps_mask").outputs[0]
# get ouptut layer - we want to use the output representations as features for other stuff
output_layer = graph.get_operation_by_name("output_layer").outputs[0]
preds_y = graph.get_operation_by_name("accuracy/preds_y").outputs[0]
def eval_step(sess, x_batch, y_batch, x_batch_seq_len, x_batch_pos, x_batch_deps, x_batch_deps_len, x_batch_deps_mask):
feed_dict = {
input_x: x_batch,
input_y: y_batch,
input_seq_len: x_batch_seq_len,
input_x_pos: x_batch_pos,
input_x_deps: x_batch_deps,
input_x_deps_len: x_batch_deps_len,
input_x_deps_mask: x_batch_deps_mask
}
# res_output_layer, \
res_output_y \
= sess.run(
[
# output_layer,
preds_y],
feed_dict=feed_dict)
return res_output_y #, res_output_layer
# Dev eval - once per epoch
logging.info("Evaluation:")
input_y_flat = []
pred_y_flat = []
test_predicts_all = []
for i in range(0, test_batches_cnt):
all_items_cnt = len(test_seq)
batch_from = i * batch_size
batch_to = i * batch_size + min(batch_size, all_items_cnt - (i * batch_size))
batch_data = test_seq[batch_from: batch_to]
logging.info("Batch %s - %s items" % (i, len(batch_data)))
batch_data_padded_x, batch_data_padded_y, batch_data_seqlens\
= prepare_batch_data(data=batch_data)
max_batch_sent_len = max(batch_data_seqlens)
batch_data_pos = test_pos[batch_from: batch_to]
batch_data_pos = [pad(xx, pad_value=0, to_size=max_batch_sent_len) for xx in batch_data_pos]
batch_data_deps_left = test_deps_left[batch_from: batch_to]
batch_data_deps_left, batch_data_deps_left_seqlens, batch_data_deps_left_masks = pad_and_get_mask(
batch_data_deps_left, pad_value=0)
batch_data_deps_left_seqlens = [pad(xx, pad_value=0, to_size=max_batch_sent_len) for xx in
batch_data_deps_left_seqlens]
batch_data_pos = np.asarray(batch_data_pos)
batch_data_deps_left = np.asarray(batch_data_deps_left)
batch_data_deps_left_seqlens = np.asarray(batch_data_deps_left_seqlens)
batch_data_deps_left_masks = np.asarray(batch_data_deps_left_masks)
# res_output_y, res_output_layer
res_output_y = eval_step(sess,
x_batch=batch_data_padded_x,
y_batch=batch_data_padded_y,
x_batch_seq_len=batch_data_seqlens,
x_batch_pos=batch_data_pos,
x_batch_deps=batch_data_deps_left,
x_batch_deps_len=batch_data_deps_left_seqlens,
x_batch_deps_mask=batch_data_deps_left_masks
)
res_output_y = res_output_y[0]
# print batch_data_padded_y[10]
# print res_pred_y[10]
logging.info(res_output_y)
for j in range(0, len(batch_data)):
# flatpred and y for calculating the accuracy
input_y_flat.extend(batch_data_padded_y[j][:batch_data_seqlens[j]])
pred_y_flat.extend(res_output_y[j][:batch_data_seqlens[j]])
# predictions for all samples(sentences)
test_predicts_all.append(res_output_y[j][:batch_data_seqlens[j]])
logging.info("Confusion matrix:")
conf_matrix = confusion_matrix(input_y_flat, pred_y_flat, labels=[1, 2, 3])
logging.info("\n"+str(conf_matrix))
logging.info("F-score by class:")
acc_by_class_dict = get_prec_rec_fscore_acc_from_conf_matrix(conf_matrix, classes_dict)
acc_print = "\n"+string.join(["%s : F1=%s, %s" % (k, v[2], v) for k, v in acc_by_class_dict.iteritems()], "\n")
logging.info(acc_print)
return test_predicts_all
# Set logging info
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s]: %(levelname)s : %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Enable file logging
logFileName = '%s/%s-%s.log' % ('logs', 'sup_parser_v1', '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.now()))
# fileHandler = logging.FileHandler(logFileName, 'wb')
# fileHandler.setFormatter(logFormatter)
# logger.addHandler(fileHandler)
# Enable console logging
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
# SAMPLE RUN:
# TRAIN:
# run_name="run_v1_tr201415_eval2015"
# output_dir=output/${run_name}
# mkdir -p ${output_dir}
# #model dir where output models are saved after train
# model_dir=models/${run_name}
# rm -rf -- ${model_dir}
# mkdir -p ${model_dir}
#
# scale_features=True
# # resources
# emb_model_type=w2v
# emb_model="resources/external/w2v_embeddings/qatarliving_qc_size20_win10_mincnt5_rpl_skip1_phrFalse_2016_02_23.word2vec.bin"
# # emb_model=resources/closed_track/word2vec_google/GoogleNews-vectors-negative300.bin
#
# word2vec_load_bin=False
# # word2vec_load_bin=True # for google pretrained embeddings
# python tac_kbp/detect_events_v1_bilstm.py -cmd:train -run_name:${run_name} -emb_model_type:${emb_model_type} -emb_model:${emb_model}
if __name__ == "__main__":
# Run parameters
cmd = 'train'
cmd = CommonUtilities.get_param_value("cmd", sys.argv, cmd)
logging.info('cmd:%s' % cmd)
# run name for output params
run_name = ""
run_name = CommonUtilities.get_param_value("run_name", sys.argv, run_name)
if run_name != "":
logging.info(('run_name:%s' % run_name))
else:
logging.error('Error: missing input file parameter - run_name')
quit()
# dir for saving and loading the models
model_dir = ""
model_dir = CommonUtilities.get_param_value("model_dir", sys.argv, model_dir)
# dir for saving output of the parsing
output_dir = ""
output_dir = CommonUtilities.get_param_value("output_dir", sys.argv, output_dir)
logging.info('model_dir:%s' % model_dir)
model_file_basename = '%s/%s_model_' % (model_dir, run_name)
scale_file_basename = '%s/%s_scalerange_' % (model_dir, run_name)
# Input Data
data_tac2014_train = data_dir+"/clear_data/data_tac2014_train.json"
data_tac2014_eval = data_dir+"/clear_data/data_tac2014_eval.json"
data_tac2015_train = data_dir+"/clear_data/data_tac2015_train.json"
data_tac2015_eval = data_dir+"/clear_data/data_tac2015_eval.json"
# Settings
# Perform scaling on the features
scale_features = False
scale_features = CommonUtilities.get_param_value_bool("scale_features", sys.argv, scale_features)
logging.info('scale_features:{0}'.format(scale_features))
embeddings_trainable = True
embeddings_trainable = CommonUtilities.get_param_value_bool("emb_train", sys.argv, embeddings_trainable)
logging.info('embeddings_trainable:{0}'.format(embeddings_trainable))
embeddings_size = 50
embeddings_size = CommonUtilities.get_param_value_int("emb_size", sys.argv, embeddings_size)
logging.info('embeddings_size:{0}'.format(embeddings_size))
# w2v/doc2vec params
# word2vec word2vec_model file
embeddings_model_type = "w2v" #w2v, dep, rand
embeddings_model_type = CommonUtilities.get_param_value("emb_model_type", sys.argv, default=embeddings_model_type)
logging.info('embeddings_model_type:%s' % embeddings_model_type)
embeddings_model_file = "" #
embeddings_model_file = CommonUtilities.get_param_value("emb_model", sys.argv, default=embeddings_model_file)
logging.info('embeddings_model_file:%s' % embeddings_model_file)
# load word2vec word2vec_model as binary file
word2vec_load_bin = False
word2vec_load_bin = CommonUtilities.get_param_value_bool("word2vec_load_bin", sys.argv, word2vec_load_bin)
logging.info('word2vec_load_bin:{0}'.format(word2vec_load_bin))
# Create the main class
event_labeling = True
event_type_classify = False
event_realis_classify = False
event_coref = False
logging.info("Jobs to run:")
logging.info( "event_labeling:%s" % event_labeling)
logging.info( "event_type_classify:%s" % event_type_classify)
logging.info( "event_realis_classify:%s" % event_realis_classify)
logging.info( "event_coref:%s" % event_coref)
classifier_name = "event_labeler_BiLSTM_v3_posdeps"
magic_box = EventLabelerAndClassifier_v2_BiLSTM_v3_posdep(
classifier_name = classifier_name,
run_name = run_name,
output_dir = output_dir,
model_dir = model_dir,
event_labeling = event_labeling,
event_type_classify = event_type_classify,
event_realis_classify = event_realis_classify,
event_coref = event_coref,
)
max_nr_sent = 1000
max_nr_sent = CommonUtilities.get_param_value_int("max_sents", sys.argv, max_nr_sent)
logging.info('max_nr_sent:{0}'.format(max_nr_sent))
if max_nr_sent == 0:
max_nr_sent = 1000000 # Current corpus reading method require int..
batch_size = 50
batch_size = CommonUtilities.get_param_value_int("batch_size", sys.argv, batch_size)
logging.info('batch_size:{0}'.format(batch_size))
lstm_hidden_size = 50
lstm_hidden_size = CommonUtilities.get_param_value_int("lstm_hidden_size", sys.argv, lstm_hidden_size)
logging.info('lstm_hidden_size:{0}'.format(lstm_hidden_size))
deps_embeddings_vec_size = 50
deps_embeddings_vec_size = CommonUtilities.get_param_value_int("deps_emb_size", sys.argv, deps_embeddings_vec_size)
logging.info('deps_embeddings_vec_size:{0}'.format(deps_embeddings_vec_size))
pos_embeddings_vec_size = 50
pos_embeddings_vec_size = CommonUtilities.get_param_value_int("pos_emb_size", sys.argv, pos_embeddings_vec_size)
logging.info('pos_embeddings_vec_size:{0}'.format(pos_embeddings_vec_size))
if cmd == "train":
logging.info("==========================")
logging.info("======== TRAINING ========")
logging.info("==========================")
embeddings_vec_size = embeddings_size
if embeddings_model_type == "w2v":
logging.info("Loading w2v model..")
if word2vec_load_bin:
embeddings_model = Word2Vec.load_word2vec_format(embeddings_model_file, binary=True) # use this for google vectors
else:
embeddings_model = Word2Vec.load(embeddings_model_file)
embeddings_vec_size = embeddings_model.syn0.shape[1]
elif embeddings_model_type == "rand":
embeddings_model = None
else:
raise Exception("embeddings_model_type=%s is not yet supported!" % embeddings_model_type)
# train data
input_data_fileslist_train = [data_tac2014_train, data_tac2015_train, data_tac2014_eval]
train_data_files = "" #
train_data_files = CommonUtilities.get_param_value("train_data_files", sys.argv,
default=train_data_files)
if train_data_files == "":
logging.error("No train_data_files provided. ")
exit()
else:
logging.info('train_data_files:%s' % train_data_files)
input_data_fileslist_train = train_data_files.split(";")
# dev data
eval_dev = True
input_data_fileslist_dev = [data_tac2015_eval]
dev_data_files = "" #
dev_data_files = CommonUtilities.get_param_value("dev_data_files", sys.argv,
default=dev_data_files)
if dev_data_files == "":
logging.error("No dev_data_files provided. ")
exit()
else:
logging.info('dev_data_files:%s' % dev_data_files)
input_data_fileslist_dev = dev_data_files.split(";")
learning_rate = 0.1
learning_rate = CommonUtilities.get_param_value_float("learning_rate", sys.argv,
default=learning_rate)
logging.info('learning_rate:%s' % learning_rate)
train_epochs_cnt = 6
train_epochs_cnt = CommonUtilities.get_param_value_int("train_epochs_cnt", sys.argv,
default=train_epochs_cnt)
logging.info('train_epochs_cnt:%s' % train_epochs_cnt)
learning_rate_fixed=True
learning_rate_fixed = CommonUtilities.get_param_value_bool("learning_rate_fixed", sys.argv,
default=learning_rate_fixed)
logging.info('learning_rate_fixed:%s' % learning_rate_fixed)
include_token_layers = {'output_layer': False} # always use on the input
include_pos_layers = {'input_layer': False, 'output_layer': False}
include_deps_layers = {'input_layer': False, 'output_layer': False}
magic_box.train(train_files=input_data_fileslist_train,
dev_files=input_data_fileslist_dev,
embeddings_model=embeddings_model,
embeddings_type=embeddings_model_type,
embeddings_vec_size=embeddings_vec_size,
embeddings_trainable=embeddings_trainable,
pos_embeddings_size=pos_embeddings_vec_size,
deps_embeddings_size=deps_embeddings_vec_size,
eval_dev=eval_dev,
max_nr_sent=max_nr_sent,
train_epochs_cnt=train_epochs_cnt,
learning_rate=learning_rate,
hidden_size=lstm_hidden_size,
batch_size=batch_size,
include_deps_layers=include_deps_layers,
include_pos_layers=include_pos_layers,
include_token_layers=include_token_layers,
learning_rate_fixed=learning_rate_fixed)
elif cmd == "eval":
logging.info("==========================")
logging.info("======= EVALUATION =======")
logging.info("==========================")
# test data
input_data_fileslist_test = []
test_data_files = "" #
test_data_files = CommonUtilities.get_param_value("test_data_files", sys.argv,
default=test_data_files)
# -input_is_proc_data:${input_is_proc_data} -output_proc_data_json:${output_proc_data_json} -output_submission_file:${output_submission_file}
output_data_json = "output_proc_data_%s.json.txt" % run_name #
output_data_json = CommonUtilities.get_param_value("output_proc_data_json", sys.argv, default=output_data_json)
logging.info('output_data_json:%s' % output_data_json)
output_submission_file = "output_submission_%s.tbf.txt" % run_name #
output_submission_file = CommonUtilities.get_param_value("output_submission_file", sys.argv,
default=output_submission_file)
logging.info('output_submission_file:%s' % output_submission_file)
if test_data_files == "":
logging.error("No test_data_files provided. ")
exit()
else:
logging.info('test_data_files:%s' % test_data_files)
input_data_fileslist_test = test_data_files.split(";")
batch_size_eval=300
magic_box.load_settings()
magic_box.eval(test_files=input_data_fileslist_test,
max_nr_sent=max_nr_sent,
batch_size=batch_size_eval,
output_data_json=output_data_json,
output_submission_file=output_submission_file,
# pos_embeddings_size=pos_embeddings_vec_size,
# deps_embeddings_size=deps_embeddings_vec_size,
)
else:
logging.error("Unknown command:%s" % cmd)
| [
"tbmihailov@gmail.com"
] | tbmihailov@gmail.com |
773cf06a03db23a20481c582d9f289de0f9226f7 | 28489cb7b78af54f0812362682ed5245ea8f844c | /6 - Python/Strings/8 - Desginer Door Mat.py | 4ab5b2cebf19eaea093d9d7aafd0429d6b725d68 | [
"MIT"
] | permissive | sbobbala76/Python.HackerRank | 7782b179d0c7b25711fe2b98763beb9ec22d7b09 | 33c5c30d1e6af1f48370dbb075f168d339f7438a | refs/heads/master | 2021-05-23T12:29:11.302406 | 2020-06-01T12:28:49 | 2020-06-01T12:28:49 | 253,286,651 | 0 | 0 | MIT | 2020-04-05T17:00:33 | 2020-04-05T17:00:32 | null | UTF-8 | Python | false | false | 276 | py | n, m = map(int,input().split()) # More than 6 lines of code will result in 0 score. Blank lines are not counted.
for i in range(1, n, 2):
print ((".|." * i).center(m, '-'))
print ("WELCOME".center(m, '-'))
for i in range(n - 2, -1, -2):
print ((".|." * i).center(m, '-'))
| [
"ehouarn.perret@outlook.com"
] | ehouarn.perret@outlook.com |
5ce3ea9f60875b83c78de02f52a5b51e04639afd | 45df508e4c99f453ca114053a92deb65939f18c9 | /tfx/tools/cli/handler/handler_factory.py | 3bb20df41fbe67898d03601ac853a29e07ad5135 | [
"Apache-2.0"
] | permissive | VonRosenchild/tfx | 604eaf9a3de3a45d4084b36a478011d9b7441fc1 | 1c670e92143c7856f67a866f721b8a9368ede385 | refs/heads/master | 2020-08-09T13:45:07.067267 | 2019-10-10T03:07:20 | 2019-10-10T03:07:48 | 214,100,022 | 1 | 0 | Apache-2.0 | 2019-10-10T06:06:11 | 2019-10-10T06:06:09 | null | UTF-8 | Python | false | false | 4,015 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to choose engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import subprocess
import sys
import click
from typing import Dict, Text, Any
from tfx.tools.cli import labels
from tfx.tools.cli.handler import base_handler
def detect_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:
"""Detect handler from the environment.
Details:
When the engine flag is set to 'auto', this method first finds all the
packages in the local environment. The environment is first checked
for multiple orchestrators and if true the user must rerun the command with
required engine. If only one orchestrator is present, the engine is set to
that.
Args:
flags_dict: A dictionary containing the flags of a command.
Returns:
Corrosponding Handler object.
"""
packages_list = str(subprocess.check_output(['pip', 'freeze', '--local']))
if labels.AIRFLOW_PACKAGE_NAME in packages_list and labels.KUBEFLOW_PACKAGE_NAME in packages_list:
sys.exit('Multiple orchestrators found. Choose one using --engine flag.')
if labels.AIRFLOW_PACKAGE_NAME in packages_list:
click.echo('Detected Airflow.')
click.echo(
'Use --engine flag if you intend to use a different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'airflow'
from tfx.tools.cli.handler import airflow_handler # pylint: disable=g-import-not-at-top
return airflow_handler.AirflowHandler(flags_dict)
elif labels.KUBEFLOW_PACKAGE_NAME in packages_list:
click.echo('Detected Kubeflow.')
click.echo(
'Use --engine flag if you intend to use a different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'kubeflow'
from tfx.tools.cli.handler import kubeflow_handler # pylint: disable=g-import-not-at-top
return kubeflow_handler.KubeflowHandler(flags_dict)
else:
click.echo('Detected Beam.')
flags_dict[labels.ENGINE_FLAG] = 'beam'
from tfx.tools.cli.handler import beam_handler # pylint: disable=g-import-not-at-top
return beam_handler.BeamHandler(flags_dict)
def create_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:
"""Retrieve handler from the environment using the --engine flag.
Args:
flags_dict: A dictionary containing the flags of a command.
Raises:
RuntimeError: When engine is not supported by TFX.
Returns:
Corresponding Handler object.
"""
engine = flags_dict[labels.ENGINE_FLAG]
packages_list = str(subprocess.check_output(['pip', 'freeze', '--local']))
if engine == 'airflow':
if labels.AIRFLOW_PACKAGE_NAME not in packages_list:
sys.exit('Airflow not found.')
from tfx.tools.cli.handler import airflow_handler # pylint: disable=g-import-not-at-top
return airflow_handler.AirflowHandler(flags_dict)
elif engine == 'kubeflow':
if labels.KUBEFLOW_PACKAGE_NAME not in packages_list:
sys.exit('Kubeflow not found.')
from tfx.tools.cli.handler import kubeflow_handler # pylint: disable=g-import-not-at-top
return kubeflow_handler.KubeflowHandler(flags_dict)
elif engine == 'beam':
from tfx.tools.cli.handler import beam_handler # pylint: disable=g-import-not-at-top
return beam_handler.BeamHandler(flags_dict)
elif engine == 'auto':
return detect_handler(flags_dict)
else:
raise RuntimeError('Engine {} is not supported.'.format(engine))
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
f653716998f94f2dc22f92e0238512eba7349306 | f12ec753dc42958714343b5aa8a11a83f7fe5e1a | /roboticstoolbox/backend/xacro/tests/test_xacro.py | 779c4295ac73f3803c64e954b3dd9bc067a42523 | [
"MIT"
] | permissive | RPellowski/robotics-toolbox-python | 6bedecccbcf14740c3aa65823b5bab8ba3ade93b | 837b33ebbca1347126a611edb83131e284dd065b | refs/heads/master | 2023-01-01T04:53:23.117118 | 2020-10-12T05:25:47 | 2020-10-12T05:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,455 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Open Source Robotics Foundation, Inc.
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Open Source Robotics Foundation, Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Authors: Stuart Glaser, William Woodall, Robert Haschke
# Maintainer: Robert Haschke <rhaschke@techfak.uni-bielefeld.de>
from __future__ import print_function
import ast
from contextlib import contextmanager
import os.path
import re
import subprocess
import sys
import unittest
from roboticstoolbox.backend import xacro
from roboticstoolbox.backend.xacro.cli import process_args
import xml.dom
from xml.dom.minidom import parseString
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
# regex to match whitespace
whitespace = re.compile(r'\s+')
def text_values_match(a, b):
# generic comparison
if whitespace.sub(' ', a).strip() == whitespace.sub(' ', b).strip():
return True
try: # special handling of dicts: ignore order
a_dict = ast.literal_eval(a)
b_dict = ast.literal_eval(b)
if (
isinstance(a_dict, dict)
and isinstance(b_dict, dict)
and a_dict == b_dict):
return True
except Exception: # Attribute values aren't dicts
pass
# on failure, try to split a and b at whitespace and compare snippets
def match_splits(a_, b_): # pragma: no cover
if len(a_) != len(b_):
return False
for a, b in zip(a_, b_):
if a == b:
continue
try: # compare numeric values only up to some accuracy
if abs(float(a) - float(b)) > 1.0e-9:
return False
except ValueError: # values aren't numeric and not identical
return False
return True
return match_splits(a.split(), b.split())
def all_attributes_match(a, b):
if len(a.attributes) != len(b.attributes): # pragma: no cover
print('Different number of attributes')
return False
a_atts = a.attributes.items()
b_atts = b.attributes.items()
a_atts.sort()
b_atts.sort()
for a, b in zip(a_atts, b_atts):
if a[0] != b[0]: # pragma: no cover
print('Different attribute names: %s and %s' % (a[0], b[0]))
return False
if not text_values_match(a[1], b[1]): # pragma: no cover
print('Different attribute values: %s and %s' % (a[1], b[1]))
return False
return True
def text_matches(a, b):
if text_values_match(a, b):
return True
print("Different text values: '%s' and '%s'" % (a, b))
return False
def nodes_match(a, b, ignore_nodes):
if not a and not b:
return True
if not a or not b: # pragma: no cover
return False
if a.nodeType != b.nodeType: # pragma: no cover
print('Different node types: %s and %s' % (a, b))
return False
# compare text-valued nodes
if a.nodeType in [xml.dom.Node.TEXT_NODE,
xml.dom.Node.CDATA_SECTION_NODE,
xml.dom.Node.COMMENT_NODE]:
return text_matches(a.data, b.data)
# ignore all other nodes except ELEMENTs
if a.nodeType != xml.dom.Node.ELEMENT_NODE: # pragma: no cover
return True
# compare ELEMENT nodes
if a.nodeName != b.nodeName: # pragma: no cover
print('Different element names: %s and %s' % (a.nodeName, b.nodeName))
return False
if not all_attributes_match(a, b): # pragma: no cover
return False
a = a.firstChild
b = b.firstChild
while a or b:
# ignore whitespace-only text nodes
# we could have several text nodes in a row, due to replacements
while (a and
((a.nodeType in ignore_nodes) or
(
a.nodeType == xml.dom.Node.TEXT_NODE
and whitespace.sub('', a.data) == ""))):
a = a.nextSibling
while (b and
((b.nodeType in ignore_nodes) or
(
b.nodeType == xml.dom.Node.TEXT_NODE
and whitespace.sub('', b.data) == ""))):
b = b.nextSibling
if not nodes_match(a, b, ignore_nodes): # pragma: no cover
return False
if a:
a = a.nextSibling
if b:
b = b.nextSibling
return True
def xml_matches(a, b, ignore_nodes=[]):
if isinstance(a, str):
return xml_matches(parseString(a).documentElement, b, ignore_nodes)
if isinstance(b, str):
return xml_matches(a, parseString(b).documentElement, ignore_nodes)
if a.nodeType == xml.dom.Node.DOCUMENT_NODE:
return xml_matches(a.documentElement, b, ignore_nodes)
if b.nodeType == xml.dom.Node.DOCUMENT_NODE:
return xml_matches(a, b.documentElement, ignore_nodes)
if not nodes_match(a, b, ignore_nodes): # pragma: no cover
print('Match failed:')
a.writexml(sys.stdout)
print()
print('=' * 78)
b.writexml(sys.stdout)
print()
return False
return True
# capture output going to file=sys.stdout | sys.stderr
@contextmanager
def capture_stderr(function, *args, **kwargs):
# temporarily replace sys.stderr with StringIO()
old, sys.stderr = sys.stderr, StringIO()
result = function(*args, **kwargs)
sys.stderr.seek(0)
yield (result, sys.stderr.read())
sys.stderr = old # restore sys.stderr
class TestMatchXML(unittest.TestCase):
def test_normalize_whitespace_text(self):
self.assertTrue(text_matches("", " \t\n\r"))
def test_normalize_whitespace_trim(self):
self.assertTrue(text_matches(" foo bar ", "foo \t\n\r bar"))
def test_match_similar_numbers(self):
self.assertTrue(text_matches("0.123456789", "0.123456788"))
def test_mismatch_different_numbers(self):
self.assertFalse(text_matches("0.123456789", "0.1234567879"))
def test_match_unordered_dicts(self):
self.assertTrue(
text_matches("{'a': 1, 'b': 2, 'c': 3}", "{'c': 3, 'b': 2, 'a': 1}"))
def test_mismatch_different_dicts(self):
self.assertFalse(
text_matches("{'a': 1, 'b': 2, 'c': 3}", "{'c': 3, 'b': 2, 'a': 0}"))
def test_empty_node_vs_whitespace(self):
self.assertTrue(xml_matches('''<foo/>''', '''<foo> \t\n\r </foo>'''))
def test_whitespace_vs_empty_node(self):
self.assertTrue(xml_matches('''<foo> \t\n\r </foo>''', '''<foo/>'''))
def test_normalize_whitespace_nested(self):
self.assertTrue(xml_matches(
'''<a><b/></a>''', '''<a>\n<b> </b> </a>'''))
def test_ignore_comments(self):
self.assertTrue(xml_matches(
'''<a><b/><!-- foo --> <!-- bar --></a>''',
'''<a><b/></a>''', [xml.dom.Node.COMMENT_NODE]))
class TestXacroFunctions(unittest.TestCase):
def test_is_valid_name(self):
self.assertTrue(xacro.is_valid_name("_valid_name_123"))
# syntactically correct keyword
self.assertFalse(xacro.is_valid_name('pass'))
self.assertFalse(xacro.is_valid_name('foo ')) # trailing whitespace
self.assertFalse(xacro.is_valid_name(' foo')) # leading whitespace
self.assertFalse(xacro.is_valid_name('1234')) # number
self.assertFalse(xacro.is_valid_name('1234abc')) # number and letters
self.assertFalse(xacro.is_valid_name('')) # empty string
self.assertFalse(xacro.is_valid_name(' ')) # whitespace only
self.assertFalse(xacro.is_valid_name('foo bar')) # several tokens
self.assertFalse(xacro.is_valid_name('no-dashed-names-for-you'))
# dot separates fields
self.assertFalse(xacro.is_valid_name('invalid.too'))
def test_resolve_macro(self):
# define three nested macro dicts with the same macro names (keys)
content = {'simple': 'simple'}
ns2 = dict({k: v + '2' for k, v in content.items()})
ns1 = dict({k: v + '1' for k, v in content.items()})
ns1.update(ns2=ns2)
macros = dict(content)
macros.update(ns1=ns1)
self.assertEqual(xacro.resolve_macro('simple', macros), 'simple')
self.assertEqual(xacro.resolve_macro('ns1.simple', macros), 'simple1')
self.assertEqual(xacro.resolve_macro(
'ns1.ns2.simple', macros), 'simple2')
def check_macro_arg(self, s, param, forward, default, rest):
p, v, r = xacro.parse_macro_arg(s)
self.assertEqual(
p, param, msg="'{0}' != '{1}' parsing {2}".format(p, param, s))
if forward or default:
self.assertTrue(v is not None)
self.assertEqual(
v[0], forward, msg="'{0}' != '{1}' parsing {2}".format(
v[0], forward, s))
self.assertEqual(
v[1], default, msg="'{0}' != '{1}' parsing {2}".format(
v[1], default, s))
else:
self.assertTrue(v is None)
self.assertEqual(
r, rest, msg="'{0}' != '{1}' parsing {2}".format(r, rest, s))
def test_parse_macro_arg(self):
for forward in ['', '^', '^|']:
defaults = ['', "f('some string','some other')", "f('a b')"]
if forward == '^':
defaults = ['']
for default in defaults:
seps = ['=', ':='] if forward or default else ['']
for sep in seps:
for rest in ['', ' ', ' bar', ' bar=42']:
s = 'foo{0}{1}{2}{3}'.format(
sep, forward, default, rest)
self.check_macro_arg(
s, 'foo', 'foo' if forward else None,
default if default else None,
rest.lstrip())
def test_parse_macro_whitespace(self):
for ws in [' ', ' \t ', ' \n ']:
self.check_macro_arg(
ws + 'foo' + ws + 'bar=42' + ws,
'foo', None, None, 'bar=42' + ws)
# base class providing some convenience functions
class TestXacroBase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestXacroBase, self).__init__(*args, **kwargs)
self.ignore_nodes = []
def assert_matches(self, a, b):
self.assertTrue(xml_matches(a, b, self.ignore_nodes))
def quick_xacro(self, xml, cli=None, **kwargs):
args = {}
if cli: # pragma nocover
opts, _ = process_args(cli, require_input=False)
args.update(vars(opts)) # initialize with cli args
args.update(kwargs) # explicit function args have highest priority
doc = xacro.parse(xml)
xacro.process_doc(doc, **args)
return doc
def run_xacro(self, input_path, *args): # pragma nocover
args = list(args)
subprocess.call(['xacro', input_path] + args)
# class to match XML docs while ignoring any comments
class TestXacroCommentsIgnored(TestXacroBase):
def __init__(self, *args, **kwargs):
super(TestXacroCommentsIgnored, self).__init__(*args, **kwargs)
self.ignore_nodes = [xml.dom.Node.COMMENT_NODE]
def test_pr2(self):
# run xacro on the pr2 tree snapshot
test_dir = os.path.abspath(os.path.dirname(__file__))
print(test_dir)
pr2_xacro_path = os.path.join(
test_dir, 'robots', 'pr2', 'pr2.urdf.xacro')
pr2_golden_parse_path = os.path.join(
test_dir, 'robots', 'pr2', 'pr2_1.11.4.xml')
self.assert_matches(
xml.dom.minidom.parse(pr2_golden_parse_path),
self.quick_xacro(open(pr2_xacro_path)))
# standard test class (including the test from TestXacroCommentsIgnored)
class TestXacro(TestXacroCommentsIgnored):
def __init__(self, *args, **kwargs):
super(TestXacroCommentsIgnored, self).__init__(*args, **kwargs)
self.ignore_nodes = []
def test_invalid_property_name(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="invalid.name"/></a>'''
self.assertRaises(xacro.XacroException, self.quick_xacro, src)
def test_dynamic_macro_names(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo"><a>foo</a></xacro:macro>
<xacro:macro name="bar"><b>bar</b></xacro:macro>
<xacro:property name="var" value="%s"/>
<xacro:call macro="${var}"/></a>'''
res = '''<a>%s</a>'''
self.assert_matches(self.quick_xacro(src % "foo"), res % "<a>foo</a>")
self.assert_matches(self.quick_xacro(src % "bar"), res % "<b>bar</b>")
def test_dynamic_macro_name_clash(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo"><a name="foo"/></xacro:macro>
<xacro:macro name="call"><a name="bar"/></xacro:macro>
<xacro:call macro="foo"/></a>'''
self.assertRaises(xacro.XacroException, self.quick_xacro, src)
def test_dynamic_macro_undefined(self):
self.assertRaises(xacro.XacroException,
self.quick_xacro,
'''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:call macro="foo"/></a>''')
def test_macro_undefined(self):
self.assertRaises(
xacro.XacroException,
self.quick_xacro,
'''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:undefined><foo/><bar/></xacro:undefined></a>''')
def test_xacro_element(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="name"><xacro:element xacro:name="${name}"/></xacro:macro>
<xacro:foo name="A"/>
<xacro:foo name="B"/>
</a>''' # noqa
res = '''<a><A/><B/></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_xacro_attribute(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="name value">
<tag><xacro:attribute name="${name}" value="${value}"/></tag>
</xacro:macro>
<xacro:foo name="A" value="foo"/>
<xacro:foo name="B" value="bar"/>
</a>'''
res = '''<a><tag A="foo"/><tag B="bar"/></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_inorder_processing(self):
src = '''
<xml xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="foo" value="1.0"/>
<xacro:macro name="m" params="foo"><a foo="${foo}"/></xacro:macro>
<xacro:m foo="1 ${foo}"/>
<!-- now redefining the property and macro -->
<xacro:property name="foo" value="2.0"/>
<xacro:macro name="m" params="foo"><b bar="${foo}"/></xacro:macro>
<xacro:m foo="2 ${foo}"/>
</xml>'''
expected = '''
<xml>
<a foo="1 1.0"/>
<b bar="2 2.0"/>
</xml>
'''
self.assert_matches(self.quick_xacro(src), expected)
def test_should_replace_before_macroexpand(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="inner" params="*the_block">
<in_the_inner><xacro:insert_block name="the_block" /></in_the_inner>
</xacro:macro>
<xacro:macro name="outer" params="*the_block">
<in_the_outer><xacro:inner><xacro:insert_block name="the_block" /></xacro:inner></in_the_outer>
</xacro:macro>
<xacro:outer><woot /></xacro:outer></a>''' # noqa
res = '''<a><in_the_outer><in_the_inner><woot /></in_the_inner></in_the_outer></a>''' # noqa
self.assert_matches(self.quick_xacro(src), res)
def test_evaluate_macro_params_before_body(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="lst">${lst[-1]}</xacro:macro>
<xacro:foo lst="${[1,2,3]}"/></a>'''
self.assert_matches(self.quick_xacro(src), '''<a>3</a>''')
def test_macro_params_escaped_string(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="a='1 -2' c=3"><bar a="${a}" c="${c}"/></xacro:macro>
<xacro:foo/></a>''' # noqa
self.assert_matches(self.quick_xacro(src), '''<a><bar a="1 -2" c="3"/></a>''') # noqa
def test_property_replacement(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="foo" value="42" />
<the_foo result="${foo}" />
</a>'''
res = '''<a><the_foo result="42"/></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_property_scope_parent(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="factor">
<xacro:property name="foo" value="${21*factor}" scope="parent"/>
</xacro:macro>
<xacro:foo factor="2"/><a foo="${foo}"/></a>'''
self.assert_matches(self.quick_xacro(src), '''<a><a foo="42"/></a>''')
def test_property_scope_global(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="factor">
<xacro:macro name="bar">
<xacro:property name="foo" value="${21*factor}" scope="global"/>
</xacro:macro>
<xacro:bar/>
</xacro:macro>
<xacro:foo factor="2"/><a foo="${foo}"/></a>'''
self.assert_matches(self.quick_xacro(src), '''<a><a foo="42"/></a>''')
def test_math_ignores_spaces(self):
src = '''<a><f v="${0.9 / 2 - 0.2}" /></a>'''
self.assert_matches(self.quick_xacro(src), '''<a><f v="0.25" /></a>''')
# def test_substitution_args_find(self):
# self.assert_matches(self.quick_xacro('''<a><f v="$(find xacro)/test/test_xacro.py" /></a>'''), # noqa
# '''<a><f v="''' + os.path.abspath((__file__).replace(".pyc",".py") + '''" /></a>''')) # noqa
# def test_substitution_args_arg(self):
# res = '''<a><f v="my_arg" /></a>'''
# self.assert_matches(self.quick_xacro('''<a><f v="$(arg sub_arg)" /></a>''', cli=['sub_arg:=my_arg']), res) # noqa
def test_escaping_dollar_braces(self):
src = '''<a b="$${foo}" c="$$${foo}" d="text $${foo}" e="text $$${foo}" f="$$(pwd)" />''' # noqa
res = '''<a b="${foo}" c="$${foo}" d="text ${foo}" e="text $${foo}" f="$(pwd)" />''' # noqa
self.assert_matches(self.quick_xacro(src), res)
def test_just_a_dollar_sign(self):
src = '''<a b="$" c="text $" d="text $ text"/>'''
self.assert_matches(self.quick_xacro(src), src)
def test_multiple_insert_blocks(self):
self.assert_matches(self.quick_xacro('''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="*block">
<xacro:insert_block name="block" />
<xacro:insert_block name="block" />
</xacro:macro>
<xacro:foo>
<a_block />
</xacro:foo>
</a>'''), '''<a>
<a_block />
<a_block />
</a>''')
def test_multiple_blocks(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="*block{A} *block{B}">
<xacro:insert_block name="block1" />
<xacro:insert_block name="block2" />
</xacro:macro>
<xacro:foo>
<block1/>
<block2/>
</xacro:foo>
</a>'''
res = '''<a>
<block{A}/>
<block{B}/>
</a>'''
# test both, reversal and non-reversal of block order
for d in [dict(A='1', B='2'), dict(A='2', B='1')]:
self.assert_matches(
self.quick_xacro(src.format(**d)), res.format(**d))
def test_integer_stays_integer(self):
self.assert_matches(self.quick_xacro('''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="m" params="num">
<test number="${num}" />
</xacro:macro>
<xacro:m num="100" />
</a>'''), '''
<a>
<test number="100" />
</a>''')
def test_insert_block_property(self):
self.assert_matches(self.quick_xacro('''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="bar">bar</xacro:macro>
<xacro:property name="val" value="2" />
<xacro:property name="some_block">
<some_block attr="${val}"><xacro:bar/></some_block>
</xacro:property>
<foo>
<xacro:insert_block name="some_block" />
</foo>
</a>'''), '''
<a>
<foo><some_block attr="2">bar</some_block></foo>
</a>''')
# def test_include(self):
# src = '''<a xmlns:xacro="http://www.ros.org/xacro"><xacro:include filename="include1.xml"/></a>''' # noqa
# self.assert_matches(self.quick_xacro(src), '''<a><inc1/></a>''')
# def test_include_glob(self):
# src = '''<a xmlns:xacro="http://www.ros.org/xacro"><xacro:include filename="include{glob}.xml"/></a>''' # noqa
# res = '<a><inc1/><inc2/></a>'
# for pattern in ['*', '?', '[1-2]']:
# self.assert_matches(self.quick_xacro(src.format(glob=pattern)), res) # noqa
def test_include_nonexistent(self):
self.assertRaises(xacro.XacroException,
self.quick_xacro, '''<a xmlns:xacro="http://www.ros.org/xacro">
<xacro:include filename="include-nada.xml" /></a>''') # noqa
def test_include_deprecated(self):
# <include> tags with some non-trivial content should not issue the deprecation warning # noqa
src = '''<a><include filename="nada"><tag/></include></a>'''
with capture_stderr(self.quick_xacro, src) as (result, output):
self.assert_matches(result, src)
self.assertEqual(output, '')
def test_include_from_variable(self):
doc = '''<a xmlns:xacro="http://www.ros.org/xacro">
<xacro:property name="file" value="roboticstoolbox/backend/xacro/tests/include1.xml"/>
<xacro:include filename="${file}" /></a>''' # noqa
self.assert_matches(self.quick_xacro(doc), '''<a><inc1/></a>''')
def test_include_with_namespace(self):
src = '''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="var" value="main"/>
<xacro:include filename="roboticstoolbox/backend/xacro/tests/include1.xacro" ns="A"/>
<xacro:include filename="roboticstoolbox/backend/xacro/tests/include2.xacro" ns="B"/>
<xacro:A.foo/><xacro:B.foo/>
<main var="${var}" A="${2*A.var}" B="${B.var+1}"/>
</a>'''
res = '''
<a>
<inc1/><inc2/><main var="main" A="2" B="3"/>
</a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_boolean_if_statement(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:if value="false">
<a />
</xacro:if>
<xacro:if value="true">
<b />
</xacro:if>
</robot>'''), '''
<robot>
<b />
</robot>''')
def test_invalid_if_statement(self):
self.assertRaises(xacro.XacroException,
self.quick_xacro,
'''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:if value="nonsense"><foo/></xacro:if></a>''')
def test_integer_if_statement(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:if value="${0*42}">
<a />
</xacro:if>
<xacro:if value="0">
<b />
</xacro:if>
<xacro:if value="${0}">
<c />
</xacro:if>
<xacro:if value="${1*2+3}">
<d />
</xacro:if>
</robot>'''), '''
<robot>
<d />
</robot>''')
def test_float_if_statement(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:if value="${3*0.0}">
<a />
</xacro:if>
<xacro:if value="${3*0.1}">
<b />
</xacro:if>
</robot>'''), '''
<robot>
<b />
</robot>''')
def test_property_if_statement(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="condT" value="${True}"/>
<xacro:property name="condF" value="${False}"/>
<xacro:if value="${condF}"><a /></xacro:if>
<xacro:if value="${condT}"><b /></xacro:if>
<xacro:if value="${True}"><c /></xacro:if>
</robot>'''), '''
<robot>
<b /><c />
</robot>''')
def test_consecutive_if(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:if value="1"><xacro:if value="0"><a>bar</a></xacro:if></xacro:if>
</a>'''), '''<a/>''')
def test_equality_expression_in_if_statement(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="var" value="useit"/>
<xacro:if value="${var == 'useit'}"><foo>bar</foo></xacro:if>
<xacro:if value="${'use' in var}"><bar>foo</bar></xacro:if>
</a>'''), '''
<a>
<foo>bar</foo>
<bar>foo</bar>
</a>''')
def test_no_evaluation(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="xyz" value="5 -2"/>
<foo>${xyz}</foo>
</a>'''), '''
<a>
<foo>5 -2</foo>
</a>''')
def test_math_expressions(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<foo function="${1. + sin(pi)}"/>
</a>'''), '''
<a>
<foo function="1.0"/>
</a>''')
def test_consider_non_elements_if(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:if value="1"><!-- comment --> text <b>bar</b></xacro:if>
</a>'''), '''
<a><!-- comment --> text <b>bar</b></a>''')
def test_consider_non_elements_block(self):
self.assert_matches(
self.quick_xacro('''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="*block">
<!-- comment -->
foo
<xacro:insert_block name="block" />
</xacro:macro>
<xacro:foo>
<!-- ignored comment -->
ignored text
<a_block />
</xacro:foo>
</a>'''), '''
<a>
<!-- comment -->
foo
<a_block />
</a>''')
def test_ignore_xacro_comments(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<!-- A -->
<!-- ignore multiline comments before any xacro tag -->
<!-- ignored -->
<xacro:property name="foo" value="1"/>
<!-- ignored -->
<xacro:if value="1"><!-- B --></xacro:if>
<!-- ignored -->
<xacro:macro name="foo"><!-- C --></xacro:macro>
<!-- ignored -->
<xacro:foo/>
</a>'''), '''
<a><!-- A --><!-- B --><!-- C --></a>''')
def test_recursive_evaluation(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a" value=" 42 "/>
<xacro:property name="a2" value="${ 2 * a }"/>
<a doubled="${a2}"/>
</robot>'''), '''
<robot>
<a doubled="84"/>
</robot>''')
def test_recursive_evaluation_wrong_order(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a2" value="${2*a}"/>
<xacro:property name="a" value="42"/>
<a doubled="${a2}"/>
</robot>'''), '''
<robot>
<a doubled="84"/>
</robot>''')
def test_recursive_definition(self):
self.assertRaises(xacro.XacroException,
self.quick_xacro, '''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a" value="${a2}"/>
<xacro:property name="a2" value="${2*a}"/>
<a doubled="${a2}"/>
</robot>''')
def test_multiple_recursive_evaluation(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a" value="1"/>
<xacro:property name="b" value="2"/>
<xacro:property name="c" value="3"/>
<xacro:property name="product" value="${a*b*c}"/>
<answer product="${product}"/>
</robot>'''), '''
<robot>
<answer product="6"/>
</robot>''')
def test_multiple_definition_and_evaluation(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a" value="42"/>
<xacro:property name="b" value="${a}"/>
<xacro:property name="b" value="${-a}"/>
<xacro:property name="b" value="${a}"/>
<answer b="${b} ${b} ${b}"/>
</robot>'''), '''
<robot>
<answer b="42 42 42"/>
</robot>''')
def test_transitive_evaluation(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a" value="42"/>
<xacro:property name="b" value="${a}"/>
<xacro:property name="c" value="${b}"/>
<xacro:property name="d" value="${c}"/>
<answer d="${d}"/>
</robot>'''), '''
<robot>
<answer d="42"/>
</robot>''')
def test_multi_tree_evaluation(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="a" value="42"/>
<xacro:property name="b" value="2.1"/>
<xacro:property name="c" value="${a}"/>
<xacro:property name="d" value="${b}"/>
<xacro:property name="f" value="${c*d}"/>
<answer f="${f}"/>
</robot>'''), '''
<robot>
<answer f="88.2"/>
</robot>''')
def test_from_issue(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="x" value="42"/>
<xacro:property name="wheel_width" value="${x}"/>
<link name="my_link">
<origin xyz="0 0 ${wheel_width/2}"/>
</link>
</robot>'''), '''
<robot>
<link name="my_link">
<origin xyz="0 0 21.0"/>
</link>
</robot>''')
def test_recursive_bad_math(self):
self.assertRaises(xacro.XacroException, self.quick_xacro, '''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="x" value="0"/>
<tag badness="${1/x}"/>
</robot>''')
def test_default_param(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="fixed_link" params="parent_link:=base_link child_link *joint_pose">
<link name="${child_link}"/>
<joint name="${child_link}_joint" type="fixed">
<xacro:insert_block name="joint_pose" />
<parent link="${parent_link}"/>
<child link="${child_link}" />
</joint>
</xacro:macro>
<xacro:fixed_link child_link="foo">
<origin xyz="0 0 0" rpy="0 0 0" />
</xacro:fixed_link >
</robot>'''), '''
<robot>
<link name="foo"/>
<joint name="foo_joint" type="fixed">
<origin rpy="0 0 0" xyz="0 0 0"/>
<parent link="base_link"/>
<child link="foo"/>
</joint>
</robot>''') # noqa
def test_default_param_override(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="fixed_link" params="parent_link:=base_link child_link *joint_pose">
<link name="${child_link}"/>
<joint name="${child_link}_joint" type="fixed">
<xacro:insert_block name="joint_pose" />
<parent link="${parent_link}"/>
<child link="${child_link}" />
</joint>
</xacro:macro>
<xacro:fixed_link child_link="foo" parent_link="bar">
<origin xyz="0 0 0" rpy="0 0 0" />
</xacro:fixed_link >
</robot>'''), '''
<robot>
<link name="foo"/>
<joint name="foo_joint" type="fixed">
<origin rpy="0 0 0" xyz="0 0 0"/>
<parent link="bar"/>
<child link="foo"/>
</joint>
</robot>''') # noqa
def test_param_missing(self):
self.assertRaises(xacro.XacroException, self.quick_xacro, '''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="fixed_link" params="parent_link child_link *joint_pose">
<link name="${child_link}"/>
<joint name="${child_link}_joint" type="fixed">
<xacro:insert_block name="joint_pose" />
<parent link="${parent_link}"/>
<child link="${child_link}" />
</joint>
</xacro:macro>
<xacro:fixed_link child_link="foo">
<origin xyz="0 0 0" rpy="0 0 0" />
</xacro:fixed_link >
</robot>''')
def test_default_arg(self):
self.assert_matches(self.quick_xacro('''
<robot xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:arg name="foo" default="2"/>
<link name="my_link">
<origin xyz="0 0 0"/>
</link>
</robot>
'''), '''
<robot>
<link name="my_link">
<origin xyz="0 0 0"/>
</link>
</robot>''')
# def test_default_arg_override(self):
# self.assert_matches(self.quick_xacro('''
# <robot xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:arg name="foo" default="2"/>
# <link name="my_link">
# <origin xyz="0 0 $(arg foo)"/>
# </link>
# </robot>
# ''', ['foo:=4']), '''
# <robot>
# <link name="my_link">
# <origin xyz="0 0 4"/>
# </link>
# </robot>''')
# def test_default_arg_missing(self):
# self.assertRaises(Exception, self.quick_xacro, '''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <a arg="$(arg foo)"/>
# </a>
# ''')
def test_default_arg_empty(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:arg name="foo" default=""/></a>'''), '''<a/>''')
def test_iterable_literals_plain(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="list" value="[0, 1+1, 2]"/>
<xacro:property name="tuple" value="(0,1+1,2)"/>
<xacro:property name="dict" value="{'a':0, 'b':1+1, 'c':2}"/>
<a list="${list}" tuple="${tuple}" dict="${dict}"/>
</a>'''), '''
<a>
<a list="[0, 1+1, 2]" tuple="(0,1+1,2)" dict="{'a':0, 'b':1+1, 'c':2}"/>
</a>''')
def test_iterable_literals_eval(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="list" value="${[0, 1+1, 2]}"/>
<xacro:property name="tuple" value="${(0,1+1,2)}"/>
<xacro:property name="dic" value="${dict(a=0, b=1+1, c=2)}"/>
<a list="${list}" tuple="${tuple}" dict="${dic}"/>
</a>'''), '''
<a>
<a list="[0, 2, 2]" tuple="(0, 2, 2)" dict="{'a': 0, 'c': 2, 'b': 2}"/>
</a>''')
def test_literals_eval(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="f" value="1.23"/>
<xacro:property name="i" value="123"/>
<xacro:property name="s" value="1_2_3"/>
float=${f+1} int=${i+1} string=${s}
</a>'''), '''
<a>
float=2.23 int=124 string=1_2_3
</a>''')
def test_enforce_xacro_ns(self):
self.assert_matches(self.quick_xacro('''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<arg name="foo" value="bar"/>
<include filename="foo"/>
</a>''', xacro_ns=False), '''
<a>
<arg name="foo" value="bar"/>
<include filename="foo"/>
</a>''')
# def test_issue_68_numeric_arg(self):
# # If a property is assigned from a substitution arg, then this properties' value was # noqa
# # no longer converted to a python type, so that e.g. 0.5 remained u'0.5'. # noqa
# # If this property is then used in a numerical expression an exception is thrown. # noqa
# self.assert_matches(self.quick_xacro('''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:arg name="foo" default="0.5"/>
# <xacro:property name="prop" value="$(arg foo)" />
# <a prop="${prop-0.3}"/>
# </a>
# '''), '''
# <a>
# <a prop="0.2"/>
# </a>''')
# def test_transitive_arg_evaluation(self):
# self.assert_matches(self.quick_xacro('''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:arg name="foo" default="0.5"/>
# <xacro:arg name="bar" default="$(arg foo)"/>
# <xacro:property name="prop" value="$(arg bar)" />
# <a prop="${prop-0.3}"/>
# </a>
# '''), '''
# <a>
# <a prop="0.2"/>
# </a>''')
def test_macro_name_with_colon(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="xacro:my_macro"><foo/></xacro:macro>
<xacro:my_macro/>
</a>'''
res = '''<a><foo/></a>'''
with capture_stderr(self.quick_xacro, src) as (result, output):
self.assert_matches(result, res)
self.assertTrue(
"macro names must not contain prefix 'xacro:'" in output)
def test_overwrite_globals(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="pi" value="3.14"/></a>'''
with capture_stderr(self.quick_xacro, src) as (result, output):
self.assert_matches(result, '<a/>')
self.assertTrue(output)
def test_no_double_evaluation(self):
src = '''
<a xmlns:xacro="http://www.ros.org/xacro">
<xacro:macro name="foo" params="a b:=${a} c:=$${a}"> a=${a} b=${b} c=${c} </xacro:macro>
<xacro:property name="a" value="1"/>
<xacro:property name="d" value="$${a}"/>
<d d="${d}"><xacro:foo a="2"/></d>
</a>''' # noqa
res = '''<a><d d="${a}"> a=2 b=1 c=${a} </d></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_property_forwarding(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="arg" value="42"/>
<xacro:macro name="foo" params="arg:=^%s">${arg}</xacro:macro>
<xacro:foo/>
</a>'''
res = '''<a>%s</a>'''
self.assert_matches(self.quick_xacro(src % ''), res % '42')
self.assert_matches(self.quick_xacro(src % '|'), res % '42')
self.assert_matches(self.quick_xacro(src % '|6'), res % '42')
# def test_extension_in_expression(self):
# src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">${2*'$(arg var)'}</a>''' # noqa
# res = '''<a>%s</a>'''
# self.assert_matches(self.quick_xacro(src, ['var:=xacro']), res % (2 * 'xacro')) # noqa
# def test_expression_in_extension(self):
# src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">$(arg ${'v'+'ar'})</a>''' # noqa
# res = '''<a>%s</a>'''
# self.assert_matches(self.quick_xacro(src, ['var:=xacro']), res % 'xacro') # noqa
def test_target_namespace(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro" xacro:targetNamespace="http://www.ros.org"/>''' # noqa
res = '''<a xmlns="http://www.ros.org"/>'''
self.assert_matches(self.quick_xacro(src), res)
def test_target_namespace_only_from_root(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro"><b xacro:targetNamespace="http://www.ros.org"/></a>''' # noqa
res = '''<a><b/></a>'''
self.assert_matches(self.quick_xacro(src), res)
# def test_include_lazy(self):
# doc = ('''<a xmlns:xacro="http://www.ros.org/xacro">
# <xacro:if value="false"><xacro:include filename="non-existent"/></xacro:if></a>''') # noqa
# self.assert_matches(self.quick_xacro(doc), '''<a/>''')
# def test_issue_63_fixed_with_inorder_processing(self):
# self.assert_matches(self.quick_xacro('''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:arg name="has_stuff" default="false"/>
# <xacro:if value="$(arg has_stuff)">
# <xacro:include file="$(find nonexistent_package)/stuff.urdf" />
# </xacro:if>
# </a>'''), '<a/>')
# def test_include_from_macro(self):
# src = '''
# <a xmlns:xacro="http://www.ros.org/xacro">
# <xacro:macro name="foo" params="file:=include1.xml"><xacro:include filename="${file}"/></xacro:macro> # noqa
# <xacro:foo/>
# <xacro:foo file="${abs_filename('include1.xml')}"/>
# <xacro:include filename="subdir/foo.xacro"/>
# <xacro:foo file="$(cwd)/subdir/include1.xml"/>
# </a>'''
# res = '''<a><inc1/><inc1/><subdir_inc1/><subdir_inc1/></a>'''
# self.assert_matches(self.quick_xacro(src), res)
# def test_yaml_support(self):
# src = '''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:property name="settings" value="${load_yaml('settings.yaml')}"/>
# <xacro:property name="type" value="$(arg type)"/>
# <xacro:include filename="${settings['arms'][type]['file']}"/>
# <xacro:call macro="${settings['arms'][type]['macro']}"/>
# </a>'''
# res = '''<a><{tag}/></a>'''
# for i in ['inc1', 'inc2']:
# self.assert_matches(self.quick_xacro(src, cli=['type:=%s' % i]),
# res.format(tag=i))
# def test_yaml_support_dotted(self):
# src = '''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:property name="settings" value="${load_yaml('settings.yaml')}"/>
# <xacro:property name="type" value="$(arg type)"/>
# <xacro:include filename="${settings.arms[type].file}"/>
# <xacro:call macro="${settings.arms[type].macro}"/>
# </a>'''
# res = '''<a><{tag}/></a>'''
# for i in ['inc1', 'inc2']:
# self.assert_matches(self.quick_xacro(src, cli=['type:=%s' % i]),
# res.format(tag=i))
# def test_yaml_support_dotted_key_error(self):
# src = '''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:property name="settings" value="${load_yaml('settings.yaml')}"/>
# <xacro:property name="bar" value="${settings.baz}"/>
# ${bar}
# </a>'''
# self.assertRaises(xacro.XacroException, self.quick_xacro, src)
# def test_yaml_support_dotted_arith(self):
# src = '''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:property name="settings" value="${load_yaml('settings.yaml')}"/>
# <xacro:property name="bar" value="${settings.arms.inc2.props.port + 1}"/>
# ${bar}
# </a>'''
# res = '''<a>4243</a>'''
# self.assert_matches(self.quick_xacro(src), res)
# def test_yaml_support_key_in_dict(self):
# src = '''
# <a xmlns:xacro="http://www.ros.org/wiki/xacro">
# <xacro:property name="settings" value="${load_yaml('settings.yaml')}"/>
# ${'arms' in settings} ${'baz' in settings}
# </a>'''
# res = '''<a>True False</a>'''
# self.assert_matches(self.quick_xacro(src), res)
def test_xacro_exist_required(self):
src = '''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:include filename="non-existent.xacro"/>
</a>'''
self.assertRaises(xacro.XacroException, self.quick_xacro, src)
def test_xacro_exist_optional(self):
src = '''
<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:include filename="non-existent.xacro" optional="True"/>
</a>'''
res = '''<a></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_macro_default_param_evaluation_order(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="foo" params="arg:=${2*foo}">
<xacro:property name="foo" value="-"/>
<f val="${arg}"/>
</xacro:macro>
<xacro:property name="foo" value="${3*7}"/>
<xacro:foo/>
<xacro:property name="foo" value="*"/>
<xacro:foo/>
</a>'''
res = '''<a>
<f val="42"/><f val="**"/></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_default_property(self):
src = '''
<a xmlns:xacro="http://www.ros.org/xacro">
<xacro:property name="prop" default="false"/>
<xacro:unless value="${prop}">
<foo/>
<xacro:property name="prop" value="true"/>
</xacro:unless>
<!-- second foo should be ignored -->
<xacro:unless value="${prop}">
<foo/>
<xacro:property name="prop" value="true"/>
</xacro:unless>
</a>'''
res = '''<a><foo/></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_unicode_literal_parsing(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">🍔 </a>'''
self.assert_matches(self.quick_xacro(src), '''<a>🍔 </a>''')
def test_unicode_property(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="burger" value="🍔"/>
${burger}</a>'''
res = '''<a>🍔</a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_unicode_property_attribute(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="burger" value="🍔"/>
<b c="${burger}"/></a>'''
res = '''<a><b c="🍔"/></a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_unicode_property_block(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="burger">
🍔
</xacro:property>
<xacro:insert_block name="burger"/></a>'''
res = '''<a>🍔</a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_unicode_conditional(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:property name="burger" value="🍔"/>
<xacro:if value="${burger == u'🍔'}">
🍟
</xacro:if>
</a>'''
res = '''<a>🍟</a>'''
self.assert_matches(self.quick_xacro(src), res)
def test_unicode_macro(self):
src = '''<a xmlns:xacro="http://www.ros.org/wiki/xacro">
<xacro:macro name="burger" params="how_many">
${u'🍔' * how_many}
</xacro:macro>
<xacro:burger how_many="4"/>
</a>'''
res = '''<a>🍔🍔🍔🍔</a>'''
self.assert_matches(self.quick_xacro(src), res)
# def test_unicode_file(self):
# # run the full xacro processing pipeline on a file with
# # unicode characters in it and make sure the output is correct
# test_dir = os.path.abspath(os.path.dirname(__file__))
# input_path = os.path.join(test_dir, 'emoji.xacro')
# tmp_dir_name = tempfile.mkdtemp() # create directory we can trash
# output_path = os.path.join(tmp_dir_name, "out.xml")
# self.run_xacro(input_path, '-o', output_path)
# self.assertTrue(os.path.isfile(output_path))
# self.assert_matches(xml.dom.minidom.parse(output_path), '''<robot>🍔</robot>''') # noqa
# shutil.rmtree(tmp_dir_name) # clean up after ourselves
def test_invalid_syntax(self):
self.assertRaises(xacro.XacroException, self.quick_xacro, '<a>a${</a>')
self.assertRaises(xacro.XacroException, self.quick_xacro, '<a>${b</a>')
self.assertRaises(
xacro.XacroException, self.quick_xacro, '<a>${{}}</a>')
self.assertRaises(xacro.XacroException, self.quick_xacro, '<a>a$(</a>')
self.assertRaises(xacro.XacroException, self.quick_xacro, '<a>$(b</a>')
if __name__ == '__main__': # pragma nocover
unittest.main()
| [
"jhavl@users.noreply.github.com"
] | jhavl@users.noreply.github.com |
54f2fc3f5ae22cc683bea5d9709baa7e6a5fab6a | 80576f0a050d87edf6c60f096779aa54e8548e6b | /djangosige/apps/estoque/views/consulta.py | 3d6249b21ea28097cb51df7d7022e2792850c8af | [
"MIT"
] | permissive | HigorMonteiro/djangoSIGE | fb9ded1641622a7d105781fd7b004f4d3f8a17a7 | 8fc89a570663b32e1be032ce9a45d37842b82008 | refs/heads/master | 2020-12-02T22:39:46.446177 | 2017-07-05T02:07:00 | 2017-07-05T02:07:00 | 96,162,182 | 0 | 0 | null | 2017-07-04T01:09:59 | 2017-07-04T01:09:59 | null | UTF-8 | Python | false | false | 1,354 | py | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse_lazy
from django.views.generic.list import ListView
from djangosige.apps.cadastro.models import Produto
from djangosige.apps.estoque.models import LocalEstoque
class ConsultaEstoqueView(ListView):
template_name = "estoque/consulta/consulta_estoque.html"
success_url = reverse_lazy('estoque:consultaestoqueview')
context_object_name = 'produtos_filtrados'
def get_context_data(self, **kwargs):
context = super(ConsultaEstoqueView, self).get_context_data(**kwargs)
context['todos_produtos'] = Produto.objects.filter(
controlar_estoque=True)
context['todos_locais'] = LocalEstoque.objects.all()
context['title_complete'] = 'CONSULTA DE ESTOQUE'
return context
def get_queryset(self):
produto = self.request.GET.get('produto')
local = self.request.GET.get('local')
if produto:
produtos_filtrados = Produto.objects.filter(id=produto)
elif local:
produtos_filtrados = LocalEstoque.objects.get(
id=local).produtos_estoque.filter(controlar_estoque=True, estoque_atual__gt=0)
else:
produtos_filtrados = Produto.objects.filter(
controlar_estoque=True, estoque_atual__gt=0)
return produtos_filtrados
| [
"thiagovilelap@hotmail.com"
] | thiagovilelap@hotmail.com |
33da9285ec09b6b5f1b27bf790bcaea7b6820864 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_96/875.py | 276e1f647b1feac69a7d212c271358860c599206 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | from copy import copy
scores_n = map(lambda x:-1, range(31))
scores_s = copy(scores_n)
for i in range(11):
for j in range(i, i+3):
for k in range(i, i+3):
score = i + j + k
if score>30 or j>10 or k>10:
continue
if j==i+2 or k==i+2:
if max([i,j,k])>scores_s[score]:
scores_s[score] = max([i,j,k])
else:
if max([i,j,k])>scores_n[score]:
scores_n[score] = max([i,j,k])
for i in range(31):
if scores_s[i]==-1:
scores_s[i]=scores_n[i]
# Start Calculate
fn = 'B-large'
fi = open('%s.in' % fn, 'r')
fo = open('%s.out' % fn, 'w')
t = int(fi.readline())
cases = fi.readlines()
fi.close()
for c in range(t):
ns = map(lambda x: int(x), cases[c].strip().split(' '))
n = ns[0]
s = ns[1]
p = ns[2]
gs = ns[3:3+n]
target = 0
for g in gs:
if scores_n[g]>=p:
target = target + 1
continue
if s>0 and scores_s[g]>=p:
target = target + 1
s = s - 1
continue
fo.write("Case #%s: %s\n" % ((c+1),target))
fo.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
98e2c351373391cdab276e22d809edbe37d7a18a | 94f524333d8a6f44985adec85242c8f57acfb6f0 | /fastaRenamers/spadesContigsRenamer.py | fee774105212ea6f6b52ac65a1404182e78e7d9b | [
"MIT"
] | permissive | nickp60/open_utils | 5185288959eef30fc0d11e91d1aa76fe2e8236e7 | 628d102554e7c13fadecef5b94fa0dc0b389d37b | refs/heads/master | 2021-01-17T02:32:15.820342 | 2020-04-21T16:29:11 | 2020-04-21T16:29:11 | 48,182,000 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,081 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import argparse
import sys
import logging
import os
def get_args():
parser = argparse.ArgumentParser(
description="given a contigs file from spades, this renames each " +
"this renames the header line to include the file's basename")
parser.add_argument("-i", "--infile", dest="infile", help="input genome", required=True)
parser.add_argument("-n", "--name", dest="name", help="name", required=True)
parser.add_argument("-c", "--clobber", help="overwrite existing outfile",
dest="clobber",
default=False,
action="store_true")
parser.add_argument("-o", "--outfile", help=" output file",
dest="outfile",
default=os.path.join(os.getcwd(),
"spadesRenamed.fa"))
args = parser.parse_args()
return(args)
def renameContigs(infile, name, outfile):
counter = 1
# name = os.path.splitext(os.path.basename(infile))
with open(outfile, "a") as f:
for line in open(infile):
if '>' in line:
f.write('>' + name + "_c" + str(counter) + " " + name + " " +
line[1:len(line)])
counter = counter + 1
else:
f.write(line)
def main():
logger = logging.getLogger('filterBamToSam')
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,
format='%(name)s (%(levelname)s): %(message)s')
logger.setLevel(logging.DEBUG)
logger.debug("All settings used:")
args = get_args()
for k, v in sorted(vars(args).items()):
logger.debug("{0}: {1}".format(k, v))
if os.path.isfile(args.outfile):
if args.clobber:
os.unlink(args.outfile)
else:
logger.error("Ouput file exists! exiting")
sys.exit(1)
renameContigs(args.infile, args.name, args.outfile)
logger.info("Finished")
if __name__ == '__main__':
main()
| [
"nickp60@gmail.com"
] | nickp60@gmail.com |
b7968e221fae41e83c55e7ffda8a44e5f3bf2795 | d1687f79f6e389f2b695b1d4bdf2eeb8ef07048e | /src/netius/test/extra/smtp_r.py | 80fb13816a097a82c50120c8380dc13689f37f10 | [
"Apache-2.0"
] | permissive | zdzhjx/netius | 4604d3a772644338ef5020defa967d5cd8b89e6a | c8b050c42c5d9e6a0980604f08d7508bba0f996e | refs/heads/master | 2021-01-22T12:07:57.608669 | 2016-08-30T17:26:28 | 2016-08-30T17:26:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2016 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2016 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import unittest
import netius.extra
PRIVATE_KEY = b"MIICVwIAAoGAgRWSX07LB0VzpDy14taaO1b+juQVhQpyKy/fxaLupohy4UDOxHJU\
Iz7jzR6B8l93KXWqxG5UZK2CduL6TKJGQZ+jGkTk0YU3d3r5kwPNOX1o+qhICJF8\
tcWZcw1MUV816sxJ3hi6RTz7faRvJtj9J2SM2cY3eq0xQSM/dvD1fqUCAwEAAQKB\
gDaUp3qTN3fQnxAf94x9z2Mt6p8CxDKn8xRdvtGzjhNueJzUKVmZOghZLDtsHegd\
A6bNMTKzsA2N7C9W1B0ZNHkmc6cbUyM/gXPLzpErFF4c5sTYAaJGKK+3/3BrrliG\
6vgzTXt3KZRlInfrumZRo4h7yE/IokfmzBwjbyP7N3lhAkDpfTwLidRBTgYVz5yO\
/7j55vl2GN80xDk0IDfO17/O8qyQlt+J6pksE0ojTkAjD2N4rx3dL4kPgmx80r/D\
AdNNAkCNh4LBukRUMT+ulfngrnzQ4QDnCUXpANKpe3HZk4Yfysj1+zrlWFilzO3y\
t/RpGu4GtH1LUNQNjrp94CcBNPy5AkBW6KCTAuiYrjwhnjd+Gr11d33fcX6Tm35X\
Yq6jNTdWBooo/5+RLFt7RmrQHW5OHoo9/6C0Fd+EgF11UNTD90f5AkBBB6/0FgNJ\
cCujq7PaIjKlw40nm2ItEry5NUh1wcxSFVpLdDl2oiZxYH1BFndOSBpwqEQd9DDL\
Xfag2fryGge5AkCFPjggILI8jZZoEW9gJoyqh13fkf+WjtwL1mLztK2gQcrvlyUd\
/ddIy8ZEkmGRiHMcX0SGdsEprW/EpbhSdakC"
MESSAGE = b"Header: Value\r\n\r\nHello World"
RESULT = b"DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=netius.hive.pt;\r\n\
i=email@netius.hive.pt; l=13; q=dns/txt; s=20160523113052;\r\n\
t=1464003802; h=Header;\r\n\
bh=sIAi0xXPHrEtJmW97Q5q9AZTwKC+l1Iy+0m8vQIc/DY=; b=Pr7dVjQIX3ovG78v1X45seFwA/+uyIAofJbxn5iXTRBA5Mv+YVdiI9QMm/gU1ljoSGqqC+hvLS4iB2N1kC4fGuDxXOyNaApOLSA2hl/mBpzca6SNyu6CYvUDdhmfD+8TsYMe6Vy8UY9lWpPYNgfb9BhORqPvxiC8A8F9ScTVT/s=\r\nHeader: Value\r\n\r\nHello World"
REGISTRY = {
"netius.hive.pt" : dict(
key_b64 = PRIVATE_KEY,
selector = "20160523113052",
domain = "netius.hive.pt"
)
}
class RelaySMTPServerTest(unittest.TestCase):
def test_dkim(self):
smtp_r = netius.extra.RelaySMTPServer()
smtp_r.dkim = REGISTRY
result = smtp_r.dkim_contents(
MESSAGE,
email = "email@netius.hive.pt",
creation = 1464003802
)
self.assertEqual(result, RESULT)
| [
"joamag@hive.pt"
] | joamag@hive.pt |
0523121cce605e677e973420974d98141f287da8 | 622f779c8ed0bf589b947c5550c11f64f438444a | /src/Checking-Valid-String-In-Binary-Tree.py | ae103e30993f7e0ebb59eb38ed512376518291d2 | [
"MIT"
] | permissive | sahilrider/LeetCode-Solutions | 9290f69096af53e5c7212fe1e3820131a89257d0 | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | refs/heads/master | 2023-01-03T22:35:28.377721 | 2020-10-25T16:15:49 | 2020-10-25T16:15:49 | 232,368,847 | 2 | 0 | MIT | 2020-10-25T16:15:50 | 2020-01-07T16:37:43 | Python | UTF-8 | Python | false | false | 985 | py | '''https://leetcode.com/explore/featured/card/30-day-leetcoding-challenge/532/week-5/3315/'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidSequence(self, root: TreeNode, arr: List[int]) -> bool:
def dfs(node, seq):
if not node.left and not node.right:
if seq + [node.val] == self.arr:
return True
else:
return False
if node.left and node.right:
return dfs(node.left, seq+[node.val]) or dfs(node.right, seq+[node.val])
if node.left:
return dfs(node.left, seq+[node.val])
if node.right:
return dfs(node.right, seq+[node.val])
self.arr = arr
if not root:
return False
return dfs(root, [])
| [
"sahilriders@gmail.com"
] | sahilriders@gmail.com |
fa29cbe747d06ae6b51db93c6ce0423dc4b07024 | 7704dfa69e81c8a2f22b4bdd2b41a1bdad86ac4a | /nailgun/nailgun/utils/migration.py | f17807f187bf6ec511e997a0c7368cdb4ee7ec65 | [
"Apache-2.0"
] | permissive | andrei4ka/fuel-web-redhat | 8614af4567d2617a8420869c068d6b1f33ddf30c | 01609fcbbae5cefcd015b6d7a0dbb181e9011c14 | refs/heads/master | 2022-10-16T01:53:59.889901 | 2015-01-23T11:00:22 | 2015-01-23T11:00:22 | 29,728,913 | 0 | 0 | Apache-2.0 | 2022-09-16T17:48:26 | 2015-01-23T10:56:45 | Python | UTF-8 | Python | false | false | 11,522 | py | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import op
import json
import os
import re
import six
import sqlalchemy as sa
from sqlalchemy.sql import text
import uuid
import yaml
from nailgun.db.sqlalchemy.fixman import load_fixture
from nailgun.openstack.common import jsonutils
from nailgun.settings import settings
def upgrade_enum(table, column_name, enum_name, old_options, new_options):
old_type = sa.Enum(*old_options, name=enum_name)
new_type = sa.Enum(*new_options, name=enum_name)
tmp_type = sa.Enum(*new_options, name="_" + enum_name)
# Create a temporary type, convert and drop the "old" type
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute(
u'ALTER TABLE {0} ALTER COLUMN {1} TYPE _{2}'
u' USING {1}::text::_{2}'.format(
table,
column_name,
enum_name
)
)
old_type.drop(op.get_bind(), checkfirst=False)
# Create and convert to the "new" type
new_type.create(op.get_bind(), checkfirst=False)
op.execute(
u'ALTER TABLE {0} ALTER COLUMN {1} TYPE {2}'
u' USING {1}::text::{2}'.format(
table,
column_name,
enum_name
)
)
tmp_type.drop(op.get_bind(), checkfirst=False)
def drop_enum(name):
op.execute(
u'DROP TYPE {0}'.format(name)
)
def convert_condition_value(val):
if isinstance(val, six.string_types):
return "'{0}'".format(val)
return str(val).lower()
def negate_condition(condition):
"""Negates condition.
"""
return "not ({0})".format(condition)
def remove_question_operator(expression):
"""Removes '?' operator from expressions, it was deprecated in 6.0
"""
return re.sub(r'(:[\w\.\-]+)\?', '\\1', expression)
def upgrade_release_attributes_50_to_51(attrs_meta):
if not attrs_meta.get('editable'):
return attrs_meta
def depends_to_restrictions(depends, restrictions):
for cond in depends:
expr = cond.keys()[0]
restrictions.append(
expr + " != " + convert_condition_value(cond[expr]))
def conflicts_to_restrictions(conflicts, restrictions):
for cond in conflicts:
expr = cond.keys()[0]
restrictions.append(
expr + " == " + convert_condition_value(cond[expr]))
for _, group in six.iteritems(attrs_meta.get('editable')):
for _, attr in six.iteritems(group):
restrictions = []
if attr.get('depends'):
depends_to_restrictions(attr['depends'], restrictions)
attr.pop('depends')
if attr.get('conflicts'):
conflicts_to_restrictions(attr['conflicts'], restrictions)
attr.pop('conflicts')
if restrictions:
attr['restrictions'] = restrictions
return attrs_meta
def upgrade_release_attributes_51_to_60(attrs_meta):
"""Remove '?' operator from expressions
"""
if not attrs_meta.get('editable'):
return attrs_meta
def convert_restrictions(restrictions):
result = []
for restriction in restrictions:
if isinstance(restriction, basestring):
restriction = remove_question_operator(restriction)
else:
restriction['condition'] = remove_question_operator(
restriction['condition'])
result.append(restriction)
return result
for _, group in six.iteritems(attrs_meta.get('editable')):
for _, attr in six.iteritems(group):
if 'restrictions' in attr:
attr['restrictions'] = convert_restrictions(
attr['restrictions'])
if 'values' in attr:
for value in attr['values']:
if 'restrictions' in value:
value['restrictions'] = convert_restrictions(
value['restrictions'])
return attrs_meta
def upgrade_release_roles_50_to_51(roles_meta):
for _, role in six.iteritems(roles_meta):
if role.get('depends'):
for depend in role['depends']:
cond = depend.get('condition')
if isinstance(cond, dict):
expr = cond.keys()[0]
depend['condition'] = \
expr + " == " + convert_condition_value(cond[expr])
return roles_meta
def upgrade_release_roles_51_to_60(roles_meta, add_meta=None):
"""Convert all role_metadata.depends values into
roles_metadata.restrictions.
"""
add_meta = add_meta or {}
for role_name, role in six.iteritems(roles_meta):
for depend in role.get('depends', []):
cond = depend.get('condition')
new_restriction = {
'condition': remove_question_operator(negate_condition(cond))
}
if 'warning' in depend:
new_restriction['message'] = depend['warning']
role.setdefault('restrictions', [])
role['restrictions'].append(new_restriction)
if 'depends' in role:
del role['depends']
if role_name in add_meta:
role.update(add_meta[role_name])
return roles_meta
def upgrade_release_wizard_metadata_50_to_51(fixture_path=None):
if not fixture_path:
fixture_path = os.path.join(os.path.dirname(__file__), '..',
'fixtures', 'openstack.yaml')
with open(fixture_path, 'r') as fixture_file:
fixt = load_fixture(fixture_file, loader=yaml)
# wizard_meta is the same for all existing in db releases
wizard_meta = fixt[0]['fields']['wizard_metadata']
# remove nsx data from Network section of wizard_metadata
wizard_meta['Network']['manager']['values'] = [
n for n in wizard_meta['Network']['manager']['values']
if n['data'] != 'neutron-nsx'
]
return wizard_meta
def upgrade_clusters_replaced_info(connection):
select = text(
"""SELECT id, replaced_provisioning_info, replaced_deployment_info
FROM clusters""")
clusters = connection.execute(select)
for cluster in clusters:
nodes_select = text(
"""SELECT id FROM nodes WHERE cluster_id=:id""")
nodes = connection.execute(
nodes_select,
id=cluster[0])
provisioning_info = jsonutils.loads(cluster[1])
deployment_nodes = jsonutils.loads(cluster[2])
provisioning_nodes = provisioning_info.pop('nodes', [])
for node in nodes:
node_deploy = [d for d in deployment_nodes
if d['uid'] == str(node[0])]
node_provision = next((d for d in provisioning_nodes
if d['uid'] == str(node[0])), {})
update_node = text(
"""UPDATE nodes
SET replaced_deployment_info = :deploy,
replaced_provisioning_info = :provision
WHERE id = :id""")
connection.execute(
update_node,
deploy=jsonutils.dumps(node_deploy),
provision=jsonutils.dumps(node_provision),
id=node[0])
update_cluster = text(
"""UPDATE clusters
SET replaced_deployment_info = :deploy,
replaced_provisioning_info = :provision
WHERE id = :id""")
connection.execute(
update_cluster,
deploy=jsonutils.dumps({}),
provision=jsonutils.dumps(provisioning_info),
id=cluster[0])
def upgrade_release_set_deployable_false(connection, versions):
"""Set deployable=False for a given versions list.
:param connection: a database connection
:param versions: a list of versions to be forbidden
"""
update_query = text(
"UPDATE releases SET is_deployable = 'false' "
" WHERE version IN :versions")
connection.execute(update_query, versions=tuple(versions))
def upgrade_release_fill_orchestrator_data(connection, versions):
"""Fill release_orchestrator_data if it's not filled yet.
:param connection: a database connection
:param versions: a list of versions to be forbidden
"""
for version in versions:
select_query = text(
"SELECT id, operating_system FROM releases "
" WHERE version LIKE :version AND id NOT IN ("
" SELECT release_id FROM release_orchestrator_data "
" )")
releases = connection.execute(select_query, version=version)
for release in releases:
insert_query = text(
"INSERT INTO release_orchestrator_data ("
" release_id, repo_metadata, puppet_manifests_source, "
" puppet_modules_source)"
" VALUES ("
" :release_id, "
" :repo_metadata, "
" :puppet_manifests_source, "
" :puppet_modules_source)")
# if release_orchestrator_data isn't filled then releases'
# repos stores in unversioned directory with "fuelweb" word
repo_path = 'http://{MASTER_IP}:8080/{OS}/fuelweb/x86_64'.format(
MASTER_IP=settings.MASTER_IP, OS=release[1].lower())
# for ubuntu we need to add 'precise main'
if release[1].lower() == 'ubuntu':
repo_path += ' precise main'
connection.execute(
insert_query,
release_id=release[0],
repo_metadata=(
'{{ "nailgun": "{0}" }}'.format(repo_path)),
puppet_manifests_source=(
'rsync://{MASTER_IP}:/puppet/manifests/'.format(
MASTER_IP=settings.MASTER_IP)),
puppet_modules_source=(
'rsync://{MASTER_IP}:/puppet/modules/'.format(
MASTER_IP=settings.MASTER_IP)),
)
def dump_master_node_settings(connection, fixture_path=None):
"""Generate uuid for master node installation and update
master_node_settings table by generated value
Arguments:
connection - a database connection
"""
if not fixture_path:
fixture_path = os.path.join(os.path.dirname(__file__), '..',
'fixtures', 'master_node_settings.yaml')
with open(fixture_path, 'r') as fixture_file:
fixt = load_fixture(fixture_file, loader=yaml)
settings = json.dumps(fixt[0]["fields"]["settings"])
generated_uuid = str(uuid.uuid4())
insert_query = text(
"INSERT INTO master_node_settings (master_node_uid, settings) "
" VALUES (:master_node_uid, :settings)"
)
connection.execute(insert_query, master_node_uid=generated_uuid,
settings=settings)
| [
"akirilochkin@mirantis.com"
] | akirilochkin@mirantis.com |
32e21376754df4221bc846a99c412c84967a1a96 | 2a119e23ac73cd2fa0dbbb96cd01045562ab0835 | /citties.py | 6a75b6fa09e66271a00bf425b921fa57c812317c | [] | no_license | pavel-malin/python_work | 94d133834348350251684ef366923698e5e2f289 | d860e5d15116771e3cbe9e316fc032a28a3029e2 | refs/heads/master | 2020-04-28T05:17:45.067766 | 2019-04-20T19:39:43 | 2019-04-20T19:39:43 | 175,014,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # Executing a loop by value is correct and
# completing through break
prompt = "\nPlease enter the name of a city you have visited: "
prompt += "\n(Enter 'quit' when you are finished.) "
while True:
city = input(prompt)
if city == 'quit':
break
else:
print("I'd love to go to " + city.title() + "!")
| [
"noreply@github.com"
] | pavel-malin.noreply@github.com |
6d696fd319ed1d5bafd79955314f0e5c2a0591bd | 6adf334dd2a074686447e15898ed3fff793aab48 | /02_Two_Pointers/13_shortest_window_sort.py | 0649343b6278c871289f402481d7b1642ea3c9ed | [] | no_license | satyapatibandla/Patterns-for-Coding-Interviews | 29ac1a15d5505293b83a8fb4acf12080851fe8d6 | b3eb2ac82fd640ecbdf3654a91a57a013be1806f | refs/heads/main | 2023-05-07T07:56:01.824272 | 2021-06-01T04:02:50 | 2021-06-01T04:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # Time O(N) | Space O(1)
def shortest_window_sort(arr):
n = len(arr)
left, right = 0, n - 1
while left < n - 1 and arr[left] <= arr[left + 1]:
left += 1
if left == n - 1:
return 0
while right > 0 and arr[right] >= arr[right - 1]:
right -= 1
sub_min = float('inf')
sub_max = float('-inf')
for k in range(left, right + 1):
sub_min = min(sub_min, arr[k])
sub_max = max(sub_max, arr[k])
while left > 0 and arr[left - 1] > sub_min:
left -= 1
while right < n - 1 and arr[right + 1] < sub_max:
right += 1
return right - left + 1
def main():
print(shortest_window_sort([1, 2, 5, 3, 7, 10, 9, 12]))
print(shortest_window_sort([1, 3, 2, 0, -1, 7, 10]))
print(shortest_window_sort([1, 2, 3]))
print(shortest_window_sort([3, 2, 1]))
if __name__ == '__main__':
main()
| [
"shash873@gmail.com"
] | shash873@gmail.com |
bbaae8ba9d96ccfec2270b1e13f5afbc2dd4d684 | ea01ed735850bf61101b869b1df618d3c09c2aa3 | /python基础/网络编程/套接字/tcp_Socket/小作业简单模拟ssh/存在粘包问题的ssh/客户端粘包/client.py | 965fe7a1d0a3a342ab614d08ffbe2b145e78ac05 | [] | no_license | liuzhipeng17/python-common | 867c49ac08719fabda371765d1f9e42f6dd289b9 | fb44da203d4e3a8304d9fe6205e60c71d3a620d8 | refs/heads/master | 2021-09-27T10:39:45.178135 | 2018-11-08T01:49:33 | 2018-11-08T01:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # -*- coding: utf-8 -*-
import socket
phone = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
phone.connect(('127.0.0.1', 8080))
phone.send('hello'.encode(encoding='utf-8'))
phone.send('world'.encode(encoding='utf-8'))
# data = phone.recv(1024)
# print("服务端返回内容:%s" % data)
# phone.close() | [
"liucpliu@sina.cn"
] | liucpliu@sina.cn |
c47a29657f2c9622c4d133a0ed587fd61faf21bb | a5fe2130ea434f958f6151cd4d8c92d43f1c1ca1 | /src/app/urls.py | 3e64476466f19c1ff036b6caa5569139515fdffa | [] | no_license | DavidArmendariz/django-movies-app | 44da33cc200773ef473ea21f67a1dfff57ea0e96 | b77f1f538bae4a906d0b00597fef8fef97ea409b | refs/heads/master | 2023-03-11T16:43:02.956765 | 2021-02-23T04:28:17 | 2021-02-23T04:28:17 | 338,206,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [path("admin/", admin.site.urls), path("", include("movies.urls"))]
| [
"darmendariz1998@outlook.com"
] | darmendariz1998@outlook.com |
727bfa21f987a7f6a8b67b48d83909fe08a8b388 | dbaad22aa8aa6f0ebdeacfbe9588b281e4e2a106 | /專03-MySQL/可支配所得/03-延續專2-可支配所得XML-匯入.py | 21efdf3f8e63ba0884679301caad1fe8c6af6a50 | [
"MIT"
] | permissive | ccrain78990s/Python-Exercise | b4ecec6a653afd90de855a64fbf587032705fa8f | a9d09d5f3484efc2b9d9a53b71307257a51be160 | refs/heads/main | 2023-07-18T08:31:39.557299 | 2021-09-06T15:26:19 | 2021-09-06T15:26:19 | 357,761,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,306 | py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = "Chen"
"""
家庭收支調查-所得收入者職業別平均每人"所得收入總計" ↓ 2020-10-27
https://data.gov.tw/dataset/131148
家庭收支調查-所得收入者職業別平均每人"非消費支出" ↓ 2020-11-18
https://data.gov.tw/dataset/132281
家庭收支調查-所得收入者職業別平均每人"可支配所得" ↓ 2020-09-23
https://data.gov.tw/dataset/130027
主要欄位 ↓
Year、臺灣地區、民意代表及主管及經理人員、專業人員、技術員及助理專業人員、
事務支援人員、服務及銷售工作人員、農林漁牧業生產人員、技藝有關工作人員、
機械設備操作及組裝人員、基層技術工及勞力工、其他
"""
import sys
from xml.etree import ElementTree
import urllib.request as httplib # 3.x
import pymysql as MySQLdb # pip install MySQLdb
db = MySQLdb.connect(host="127.0.0.1", user="admin", passwd="admin", db="mydatabase")
cursor = db.cursor()
try:
# 家庭收支調查 - 所得收入者職業別平均每人"所得收入總計" 最後更新時間 2020 - 10 - 27
# url = "https://www.dgbas.gov.tw/public/data/open/localstat/086-%E6%89%80%E5%BE%97%E6%94%B6%E5%85%A5%E8%80%85%E8%81%B7%E6%A5%AD%E5%88%A5%E5%B9%B3%E5%9D%87%E6%AF%8F%E4%BA%BA%E6%89%80%E5%BE%97%E6%94%B6%E5%85%A5%E7%B8%BD%E8%A8%88.xml"
# 非消費性支出
# url =https://www.dgbas.gov.tw/public/data/open/localstat/088-%E6%89%80%E5%BE%97%E6%94%B6%E5%85%A5%E8%80%85%E8%81%B7%E6%A5%AD%E5%88%A5%E5%B9%B3%E5%9D%87%E6%AF%8F%E4%BA%BA%E9%9D%9E%E6%B6%88%E8%B2%BB%E6%94%AF%E5%87%BA.xml
# 可支配所得
url="https://www.dgbas.gov.tw/public/data/open/localstat/085-%E6%89%80%E5%BE%97%E6%94%B6%E5%85%A5%E8%80%85%E8%81%B7%E6%A5%AD%E5%88%A5%E5%B9%B3%E5%9D%87%E6%AF%8F%E4%BA%BA%E5%8F%AF%E6%94%AF%E9%85%8D%E6%89%80%E5%BE%97.xml"
req = httplib.Request(url)
reponse = httplib.urlopen(req)
if reponse.code == 200:
if (sys.version_info > (3, 0)):
# contents=reponse.read().decode(reponse.headers.get_content_charset())
contents = reponse.read().decode("UTF-8")
else:
contents = reponse.read()
print(contents)
root = ElementTree.fromstring(contents)
t0 = root.findall("Data")
t1 = root.findall("Data/Year")
t2 = root.findall("Data/臺灣地區")
t3 = root.findall("Data/民意代表及主管及經理人員")
t4 = root.findall("Data/專業人員")
t5 = root.findall("Data/技術員及助理專業人員")
t6 = root.findall("Data/事務支援人員")
t7 = root.findall("Data/服務及銷售工作人員")
t8 = root.findall("Data/農林漁牧業生產人員")
t9 = root.findall("Data/技藝有關工作人員")
t10 = root.findall("Data/機械設備操作及組裝人員")
t11 = root.findall("Data/基層技術工及勞力工")
t12 = root.findall("Data/其他")
#listYear = []
#for x in range(0, len(t0)):
# listYear.append(t1[x].text)
# 印出所有資料
for x in range(0, len(t0)):
print("年份:", t1[x].text)
print("台灣地區平均每人所得收入總計:", t2[x].text)
print(" ")
print("民意代表及主管及經理人員:", t3[x].text)
print("專業人員:", t4[x].text)
print("技術員及助理專業人員:", t5[x].text)
print("事務支援人員:", t6[x].text)
print("服務及銷售工作人員:", t7[x].text)
print("農林漁牧業生產人員:", t8[x].text)
print("技藝有關工作人員:", t9[x].text)
print("機械設備操作及組裝人員:", t10[x].text)
print("基層技術工及勞力工:", t11[x].text)
print("其他:", t12[x].text)
print("----------------------------")
print(" ")
for x in range(0,len(t0)):
str1 = "INSERT INTO `income` (`id`, `year`, `taiwan`, `supervisor`, `professionals`, `assistant`, `support`, `service`, `production`, `artisan`, `assembler`, `labor`, `other`)" + \
" VALUES ('[value-1]','[value-2]','[value-3]','[value-4]','[value-5]','[value-6]','[value-7]','[value-8]','[value-9]','[value-10]','[value-11]','[value-12]','[value-13]')"
str1 = str1.replace("[value-1]", "null")
str1 = str1.replace("[value-2]", t1[x].text)
str1 = str1.replace("[value-3]", t2[x].text)
str1 = str1.replace("[value-4]", t3[x].text)
str1 = str1.replace("[value-5]", t4[x].text)
str1 = str1.replace("[value-6]", t5[x].text)
str1 = str1.replace("[value-7]", t6[x].text)
str1 = str1.replace("[value-8]", t7[x].text)
str1 = str1.replace("[value-9]", t8[x].text)
str1 = str1.replace("[value-10]", t9[x].text)
str1 = str1.replace("[value-11]", t10[x].text)
str1 = str1.replace("[value-12]", t11[x].text)
str1 = str1.replace("[value-13]", t12[x].text)
# print(str1)
cursor.execute(str1)
db.commit()
db.close()
print("====資料量====")
print(len(t0))
except:
print("error")
| [
"47476106+ccrain78990s@users.noreply.github.com"
] | 47476106+ccrain78990s@users.noreply.github.com |
a5d4381d2e7fb369853b31614a859d2ff6b32d68 | 40cc021bfa13a1fc6b3d94ac1628d404a0800c10 | /functional_tests/functional_test.py | 06d96ff97dc5dc5a0fbc18c6a413da1cfa12bd56 | [] | no_license | dvoong/lins-alterations | ffffb7da1cc8c6608764c7f90a75e6fa5d9a72af | d0eac4e5647e716918ff3c075fccc6fdff2cc8af | refs/heads/master | 2021-01-17T20:30:16.355168 | 2016-08-08T21:06:34 | 2016-08-08T21:06:34 | 61,430,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from selenium import webdriver
from django.test import LiveServerTestCase
class FunctionalTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Chrome()
def tearDown(self):
self.browser.quit()
| [
"voong.david@gmail.com"
] | voong.david@gmail.com |
843c46e20f9e27d506f7e0513e60ff58cfd8a899 | bac37a96ead59a3c4caaac63745d5748f5060195 | /数据库编程/03操作sqlite3数据库插入多条数据.py | 50dd4ad9ddd086e75837820c57fc0f898c149c69 | [] | no_license | pod1019/python_learning | 1e7d3a9c10fc8c1b4e8ff31554d495df518fb385 | a15213d33a253c3a77ab0d5de9a4f937c27693ca | refs/heads/master | 2020-09-14T11:11:53.100591 | 2020-04-11T04:00:27 | 2020-04-11T04:00:27 | 223,112,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # 导入模块
import sqlite3
# 创建连接
con = sqlite3.connect('D:/python_learning/数据库编程/sqlite3demo/demo.db')
# 创建游标对象
cur = con.cursor()
# 编写插入sql
# 由于pno设置的是自增,所以不需要在写,values(?,?)用?占位
sql = 'insert into t_person(pname,age) values (?,?)'
# 执行插入多条数据的sql
try:
# 插入多条数据用executemany(),而且参数用列表形式----注意与插入一条的区别,一条用元组
cur.executemany(sql,[("小李",23),("小花",30),("小明",28)])
con.commit() # 提交事务
print("插入多条数据成功")
except Exception as e:
print(e)
con.rollback() #插入不成功,则回滚事务·地方
print("插入多条数据失败")
finally:
# 关闭游标
cur.close()
# 关闭数据库连接
con.close() | [
"pod1019@163.com"
] | pod1019@163.com |
be15e630f100b6c734ffc9282a5a15fa6666b4bc | d5ad13232e3f1ced55f6956bc4cbda87925c8085 | /RNAseqMSMS/21-rna-seq-sv-stats/22-sv-num-plot.py | 7a1af076451c2238ba8f8a0ef5d277e0c20d152a | [] | no_license | arvin580/SIBS | c0ba9a8a41f59cb333517c286f7d80300b9501a2 | 0cc2378bf62359ec068336ea4de16d081d0f58a4 | refs/heads/master | 2021-01-23T21:57:35.658443 | 2015-04-09T23:11:34 | 2015-04-09T23:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | import math
inFile = open('HeLa-Deletion-Duplication-Inversion-Translocation-Gene-more_than_two-num')
D = {}
for line in inFile:
line = line.strip()
fields = line.split('\t')
num = int(fields[1])
D.setdefault(num,0)
D[num]+=1
inFile.close()
L1 = []
L2 = []
L3 = []
for k in D:
L1.append(k)
L2.append(D[k])
L3.append(math.log(D[k],2))
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
#a=PyPlot('HeLa-Deletion-Duplication-Inversion-Translocation-Gene-more_than_two-num.pdf')
#a.plot2([L1,L2])
ymax= 900
ymin = -10
xmax = 65
xmin = 0
fig = plt.figure()
ax=fig.add_subplot(111)
ax.plot(L1,L2,marker='*',color='magenta')
ax.set_ylim(ymin,ymax)
ax.set_xlim(xmin,xmax)
ax.set_xlabel('Number of Structural Variation Events')
ax.set_ylabel('Number of Genes')
ax.set_yticklabels(['','1']+range(200,ymax,200))
ax.set_xticklabels(['','1']+range(10,xmax,10))
ax.set_yticks([-10,1]+range(200,ymax,200))
ax.set_xticks([0,1]+range(10,xmax,10))
plt.savefig('HeLa-Deletion-Duplication-Inversion-Translocation-Gene-more_than_two-num.pdf')
| [
"sunahnice@gmail.com"
] | sunahnice@gmail.com |
d908966e8967f4625b79dcef07c794977cb2bf18 | 17966824e4bac02b66a802091b3d6277131995a2 | /子集1.py | d60e5dead1f874f1c571538b5194d51748e1c203 | [] | no_license | to2bage/mixx | 8f00c2c888cc3dfe7f681077118121a6c6a3982b | c09337cc162449a8246b29b419187b38075a1268 | refs/heads/master | 2021-07-15T09:38:10.426660 | 2017-10-18T05:32:54 | 2017-10-18T05:32:54 | 104,962,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | class Solution:
"""
@param: nums: A set of numbers
@return: A list of lists
"""
def subsets(self, nums):
ret = []
nums.sort()
for i in range(len(nums) + 1):
combs = self._subsets(nums, 0, i)
for comb in combs:
ret.append(comb)
return ret
# 数组numbers, 从位置pos开始, 包含k个元素的所有组合
def _subsets(self, numbers, pos, k):
rect = []
if k == 0: #包含0个元素,意味着子集是空集
return [[]]
for i in range(pos, len(numbers), 1): #从第i个元素开始的子集
combs = self._subsets(numbers, i + 1, k - 1) #简化为: 从第i+1个元素开始的子集
if combs:
if combs != [[]]:
for comb in combs:
new_comb = [numbers[i]] #首先添加第i个元素到子集当中
new_comb.extend(comb) #再添加从第i+1个元素开始的子集
rect.append(new_comb)
else:
rect.append([numbers[i]])
return rect
if __name__ == "__main__":
nums = [1,2,3]
s = Solution()
r = s.subsets(nums)
print(r) | [
"to2bage@hotmail.com"
] | to2bage@hotmail.com |
642206b159003756166684fbe8234e97aa9c6180 | 073f486ffe0397302a4b54594cf86daea50123f3 | /Cap04-variaveis/variaveis_python.py | 50d3022e569fc9c4abaade462dc771e2617b7033 | [] | no_license | frclasso/CodeGurus_Python_mod1-turma2_2019 | 314f9cf0c0bdcefbf5cd786998b8c914da3cf9d0 | f04c5857a97a0aab1e532e422020b4aa0d365dd6 | refs/heads/master | 2020-05-22T17:04:10.440217 | 2019-07-13T11:11:28 | 2019-07-13T11:11:28 | 186,445,385 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py |
# variaveis
lang = 'Python'
print(lang)
print(id(lang))
print(type(lang)) # string
ord = 1
print(type(ord)) # inteiro/int
ord2 = 2.0
print(type(ord2)) # float
ord3 = 2+3j
print(type(ord3)) # complex
numero = 1000.000000000
print('{:.1f}'.format(numero))
# variaveis compostas
listas = []
mercado = ['leite', 'pão','café', 'óleo', 'papel higienico']
print(mercado)
# indexar
print(mercado[0])
print(mercado[1])
print(mercado[2])
print(mercado[-1])
# fatias
print(mercado[1:])
print(mercado[2:])
print(mercado[2:4])
# alterar
mercado[0] = 'cerveja'
print(mercado)
# Adicionar
mercado.append('laranja') # adiciona ao final da lista
print(mercado)
mercado.insert(0, 'Vinho')
print(mercado)
vegetais = ['cenoura', 'tomate', 'cebola']
mercado.extend(vegetais)
print(mercado)
print()
# clonar
mercado2 = mercado[:]
mercado[0]='queijo'
print(mercado2)
print(id(mercado2))
print()
print(mercado)
print(id(mercado))
m2 = mercado.copy()
print(m2)
print(id(m2))
# remover , pop, del, remove
print(mercado2)
mercado2.pop() # remove o ultimo elemento
print(mercado2)
mercado2.pop(0) # indice
print(mercado2)
mercado2.remove('tomate')
print(mercado2)
del mercado2[0]
print(mercado2)
# mercado2 = []
# print(mercado2)
del mercado2[:]
print(mercado2)
# del mercado2
# print(mercado2)
print('-'*70)
#### Tuplas - () parenteses
shoppinglist = ('tennis', 'meias', 'cuecas', 'camisetas')
print(shoppinglist)
print(shoppinglist[0])
print(shoppinglist[-1])
print(shoppinglist[1:3])
print(id(shoppinglist))
shoppinglist2 = shoppinglist[1:2]
print(shoppinglist)
print(id(shoppinglist))
print('-'*70)
print(vegetais * 2)
dupla = shoppinglist * 2
print(id(dupla))
print(dupla)
print(shoppinglist + shoppinglist2)
print(mercado + vegetais)
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
3501629570f43c265e2db96c74ccdebae5329084 | 452b8b849e080cda5a26f4018cafa5a674ff7c20 | /froide/foirequest/south_migrations/0008_auto__chg_field_foirequest_status.py | 069d28864ec44dfbcda830afab2a5479ff5e1844 | [
"MIT"
] | permissive | okffi/tietopyynto | 1262dcaf748c41b49be4a774be552fc75fc9b336 | 66b7e7dbf3c3395d79af3da85b3b58f01fad62dc | refs/heads/tietopyynto | 2021-01-17T21:07:04.829856 | 2016-10-30T19:26:53 | 2016-10-30T19:26:53 | 14,255,294 | 3 | 2 | MIT | 2021-01-05T11:51:18 | 2013-11-09T10:19:16 | Python | UTF-8 | Python | false | false | 16,045 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from froide.helper.auth_migration_util import USER_DB_NAME
APP_MODEL, APP_MODEL_NAME = 'account.User', 'account.user'
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FoiRequest.status'
db.alter_column('foirequest_foirequest', 'status', self.gf('django.db.models.fields.CharField')(max_length=50))
def backwards(self, orm):
# Changing field 'FoiRequest.status'
db.alter_column('foirequest_foirequest', 'status', self.gf('django.db.models.fields.CharField')(max_length=25))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
APP_MODEL_NAME: {
'Meta': {'object_name': 'User', 'db_table': "'%s'" % USER_DB_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foirequest.foiattachment': {
'Meta': {'ordering': "('name',)", 'object_name': 'FoiAttachment'},
'belongs_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiMessage']", 'null': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'foirequest.foievent': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'FoiEvent'},
'context_json': ('django.db.models.fields.TextField', [], {}),
'event_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']", 'null': 'True', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True', 'blank': 'True'})
},
'foirequest.foimessage': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'FoiMessage'},
'html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_postal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_response': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'original': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'plaintext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'redacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiRequest']"}),
'sender_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sender_public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']", 'null': 'True', 'blank': 'True'}),
'sender_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'foirequest.foirequest': {
'Meta': {'ordering': "('last_message',)", 'object_name': 'FoiRequest'},
'costs': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_message': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.FoiLaw']", 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']", 'null': 'True', 'blank': 'True'}),
'refusal_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resolved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'secret_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True'}),
'visibility': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'foirequest.publicbodysuggestion': {
'Meta': {'ordering': "('timestamp',)", 'object_name': 'PublicBodySuggestion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_body': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBody']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foirequest.FoiRequest']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % APP_MODEL, 'null': 'True'})
},
'publicbody.foilaw': {
'Meta': {'object_name': 'FoiLaw'},
'combined': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'letter_end': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'letter_start': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_response_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_response_time_unit': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'meta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '3'}),
'refusal_reasons': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'publicbody.publicbody': {
'Meta': {'object_name': 'PublicBody'},
'_created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_creators'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_updaters'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classification_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'laws': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'descendants'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBodyTopic']", 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website_dump': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'publicbody.publicbodytopic': {
'Meta': {'object_name': 'PublicBodyTopic'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foirequest']
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
1b01898879304e77c804c5595ccc87d222693ea1 | 7a23870e9b0b56b112f634d26760282ff7a4f46c | /Projects/Archived Tk code/From extensions folder/Pmw/Pmw_1_3/demos/NestedDialogs.py | 7d5eb03b725e4ec779920d6fee5ea3055e19a2b5 | [] | no_license | leo-editor/leo-editor-contrib | 0c671998c4ec7fd7c4ce890a201395afe340481b | 28c22721e1bc313c120a8a6c288893bc566a5c67 | refs/heads/master | 2023-06-25T04:28:54.520792 | 2023-06-14T20:18:12 | 2023-06-14T20:18:12 | 16,771,641 | 6 | 6 | null | 2023-06-09T11:26:42 | 2014-02-12T15:28:36 | Python | UTF-8 | Python | false | false | 1,991 | py | title = 'Modal dialog nesting demonstration'
# Import Pmw from this directory tree.
import sys
sys.path[:0] = ['../../..']
import Tkinter
import Pmw
class Demo:
def __init__(self, parent):
# Create button to launch the dialog.
w = Tkinter.Button(parent, text = 'Show first dialog',
command = self.showFirstDialog)
w.pack(padx = 8, pady = 8)
self.timerId = None
self.dialog1 = Pmw.MessageDialog(parent,
message_text = 'This is the first modal dialog.\n' +
'You can see how dialogs nest by\n' +
'clicking on the "Next" button.',
title = 'Dialog 1',
buttons = ('Next', 'Cancel'),
defaultbutton = 'Next',
command = self.next_dialog)
self.dialog1.withdraw()
self.dialog2 = Pmw.Dialog(self.dialog1.interior(),
title = 'Dialog 2',
buttons = ('Cancel',),
deactivatecommand = self.cancelTimer,
defaultbutton = 'Cancel')
self.dialog2.withdraw()
w = Tkinter.Label(self.dialog2.interior(),
text = 'This is the second modal dialog.\n' +
'It will automatically disappear shortly')
w.pack(padx = 10, pady = 10)
def showFirstDialog(self):
self.dialog1.activate()
def cancelTimer(self):
if self.timerId is not None:
self.dialog2.after_cancel(self.timerId)
self.timerId = None
def deactivateSecond(self):
self.timerId = None
self.dialog2.deactivate()
def next_dialog(self, result):
if result != 'Next':
self.dialog1.deactivate()
return
self.timerId = self.dialog2.after(3000, self.deactivateSecond)
self.dialog2.activate()
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Tkinter.Tk()
Pmw.initialise(root)
root.title(title)
exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
| [
"edreamleo@gmail.com"
] | edreamleo@gmail.com |
e9dfda94eae7e425e96c91ab025607701adb78f6 | 633fdb0bd4c8246dfabb27c680ede42879922664 | /music/views.py | acec97bbd24b616bd30134972d6c7b3cca24b691 | [] | no_license | xarala221/django-api | 0b134ad6b2c05b77a54390f96d0c7d1b30f06d17 | 52498eb5272391d2b27fd8ee8ffabec3f4b6d833 | refs/heads/master | 2020-04-05T16:46:52.976200 | 2018-11-10T22:53:58 | 2018-11-10T22:53:58 | 157,028,142 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from rest_framework import generics
from .models import Songs
from .serializers import SongsSerializer
class ListSongsView(generics.ListAPIView):
"""
Provides a get method handler.
"""
queryset = Songs.objects.all()
serializer_class = SongsSerializer | [
"xaralaxarala@gmail.com"
] | xaralaxarala@gmail.com |
5e5f0a055c727a0333d32942d973e0d30bc1a7f0 | ea4e3ac0966fe7b69f42eaa5a32980caa2248957 | /download/unzip/pyobjc/pyobjc-14.1.1/pyobjc/stable/pyobjc-framework-Cocoa/PyObjCTest/test_nscoder.py | 22ae4552930af5aacfd31164ce73dd9af98a1053 | [
"MIT"
] | permissive | hyl946/opensource_apple | 36b49deda8b2f241437ed45113d624ad45aa6d5f | e0f41fa0d9d535d57bfe56a264b4b27b8f93d86a | refs/heads/master | 2023-02-26T16:27:25.343636 | 2020-03-29T08:50:45 | 2020-03-29T08:50:45 | 249,169,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,190 | py | import unittest
import objc
from Foundation import *
from objc.test.testbndl import PyObjC_TestClass4
class TestNSCoderUsage(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertAlmostEquals'):
# XXX Move to a PyObjC unittest module?
def assertAlmostEquals(self, val1, val2):
self.assert_ (abs(val1 - val2) < 0.000001)
def testUsage(self):
class CoderClass1 (NSObject):
def encodeWithCoder_(self, coder):
# NSObject does not implement NSCoding, no need to
# call superclass implementation:
# super(CoderClass1, self).encodeWithCoder_(coder)
coder.encodeValueOfObjCType_at_(objc._C_INT, 2)
coder.encodeValueOfObjCType_at_(objc._C_DBL, 2.0)
coder.encodeArrayOfObjCType_count_at_(objc._C_DBL, 4, (1.0, 2.0, 3.0, 4.0))
coder.encodeBytes_length_("hello world!", 5)
def initWithCoder_(self, coder):
# NSObject does not implement NSCoding, no need to
# call superclass implementation:
# self = super(CodeClass1, self).initWithCoder_(coder)
self = self.init()
self.intVal = coder.decodeValueOfObjCType_at_(objc._C_INT)
self.dblVal = coder.decodeValueOfObjCType_at_(objc._C_DBL)
self.dblArray = coder.decodeArrayOfObjCType_count_at_(objc._C_DBL, 4)
self.decodedBytes = coder.decodeBytesWithReturnedLength_()
return self
origObj = CoderClass1.alloc().init()
data = NSMutableData.data()
archiver = NSArchiver.alloc().initForWritingWithMutableData_(data)
archiver.encodeObject_(origObj)
archiver = NSUnarchiver.alloc().initForReadingWithData_(data)
newObj = archiver.decodeObject()
self.assertEquals(newObj.intVal, 2)
self.assertAlmostEquals(newObj.dblVal, 2.0)
self.assertEquals(len(newObj.dblArray), 4)
self.assertAlmostEquals(newObj.dblArray[0], 1.0)
self.assertAlmostEquals(newObj.dblArray[1], 2.0)
self.assertAlmostEquals(newObj.dblArray[2], 3.0)
self.assertAlmostEquals(newObj.dblArray[3], 4.0)
self.assertEquals(newObj.decodedBytes[0], "hello")
self.assertEquals(newObj.decodedBytes[1], 5)
class MyCoder (NSCoder):
def init(self):
self = super(MyCoder, self).init()
if self is None: return None
self.coded = []
return self
def encodeValueOfObjCType_at_(self, tp, value):
self.coded.append( ("value", tp, value) )
def encodeArrayOfObjCType_count_at_(self, tp, cnt, value):
self.coded.append( ("array", tp, cnt, value) )
def encodeBytes_length_(self, bytes, length):
self.coded.append( ("bytes", bytes, length) )
def decodeValueOfObjCType_at_(self, tp):
if tp == 'i':
return 42
elif tp == 'd':
return 1.5
def decodeArrayOfObjCType_count_at_(self, tp, cnt):
return range(cnt)
def decodeBytesWithReturnedLength_(self):
return ("ABCDEabcde", 10)
class TestPythonCoder(unittest.TestCase):
#
# This test accesses a NSCoder implemented in Python from Objective-C
#
# The tests only use those methods that require a custom IMP-stub.
#
def testEncoding(self):
coder = MyCoder.alloc().init()
o = PyObjC_TestClass4.alloc().init()
o.encodeWithCoder_(coder)
self.assertEquals(coder.coded,
[
("value", "d", 1.5),
("array", "i", 4, (3,4,5,6)),
("bytes", "hello world", 11),
])
def testDecoding(self):
coder = MyCoder.alloc().init()
o = PyObjC_TestClass4
self.assertEquals(o.fetchInt_(coder), 42)
self.assertEquals(o.fetchDouble_(coder), 1.5)
d = o.fetchData_(coder)
self.assertEquals(d.length(), 10)
self.assertEquals(str(d.bytes()), "ABCDEabcde")
d = o.fetchArray_(coder)
self.assertEquals(tuple(range(10)), tuple(d))
if __name__ == '__main__':
unittest.main( )
| [
"hyl946@163.com"
] | hyl946@163.com |
50edcb60e68fe42f0f72122d986487e8ebf59fec | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /V/ValidPalindromeII.py | 73923540643cfb6825921f25a0d4fe449e430b43 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | '''
-Easy-
Given a non-empty string s, you may delete at most one character. Judge whether you can
make it a palindrome.
Example 1:
Input: "aba"
Output: True
Example 2:
Input: "abca"
Output: True
Explanation: You could delete the character 'c'.
Note:
The string will only contain lowercase characters a-z. The maximum length of the string is 50000.
'''
class Solution(object):
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
n = len(s)
left, right = 0, n-1
def isValid(s, l, r):
i, j = l, r
while i <= j:
if s[i] != s[j]: return False
i += 1
j -= 1
return True
while left <= right:
if s[left] != s[right]:
return isValid(s, left, right-1) or isValid(s, left+1, right)
left += 1
right -= 1
return True
if __name__ == "__main__":
print(Solution().validPalindrome("abca"))
print(Solution().validPalindrome("aba"))
print(Solution().validPalindrome("abvea"))
| [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
04c6ed85fac8a1c9ef45fb35b18a08e8bc9c1389 | a96af1535c19244640b9d137ede80f61569d6823 | /tests/test_flows/test_entry_flow.py | eb6d7a5f170af2b7ff46b1deb10707f8799c5e71 | [
"BSD-2-Clause-Views"
] | permissive | emmamcbryde/summer-1 | 260d2c2c0085b5181f592b3bd8a186902f923135 | 3ea377b3352c82edaed95ea1e5683b9a130fe9e6 | refs/heads/master | 2023-04-23T20:06:52.800992 | 2021-04-29T05:29:29 | 2021-04-29T05:29:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | import pytest
from summer import AgeStratification, Compartment, Stratification, adjust
from summer.flows import BaseEntryFlow
class EntryFlow(BaseEntryFlow):
"""Basic entry flow used to test BaseEntryFlow stratification."""
def get_net_flow(self, compartment_values, time):
return 1
def test_entry_flow_stratify__when_not_applicable():
flow = EntryFlow(
name="flow",
dest=Compartment("I"),
param=2,
adjustments=[],
)
strat = Stratification(
name="location",
strata=["1", "2", "3"],
compartments=["R"],
)
# Expect no stratification because compartment not being stratified.
new_flows = flow.stratify(strat)
assert new_flows == [flow]
def test_entry_flow_stratify__with_no_flow_adjustments():
flow = EntryFlow(
name="flow",
dest=Compartment("I"),
param=2,
adjustments=[],
)
strat = Stratification(
name="location",
strata=["1", "2"],
compartments=["I", "R"],
)
new_flows = flow.stratify(strat)
assert len(new_flows) == 2
# Both flows has 50% flow adjustment applied to conserve inflows of people.
assert new_flows[0]._is_equal(
EntryFlow(
name="flow",
param=2,
dest=Compartment("I", {"location": "1"}),
adjustments=[adjust.Multiply(0.5)],
)
)
assert new_flows[1]._is_equal(
EntryFlow(
name="flow",
param=2,
dest=Compartment("I", {"location": "2"}),
adjustments=[adjust.Multiply(0.5)],
)
)
def test_entry_flow_stratify_with_adjustments():
flow = EntryFlow(
name="flow",
dest=Compartment("I"),
param=2,
adjustments=[adjust.Overwrite(0.2)],
)
strat = Stratification(
name="location",
strata=["1", "2"],
compartments=["I", "R"],
)
strat.add_flow_adjustments("flow", {"1": adjust.Multiply(0.1), "2": adjust.Multiply(0.3)})
new_flows = flow.stratify(strat)
assert len(new_flows) == 2
assert new_flows[0]._is_equal(
EntryFlow(
name="flow",
param=2,
dest=Compartment("I", {"location": "1"}),
adjustments=[adjust.Overwrite(0.2), adjust.Multiply(0.1)],
)
)
assert new_flows[1]._is_equal(
EntryFlow(
name="flow",
param=2,
dest=Compartment("I", {"location": "2"}),
adjustments=[adjust.Overwrite(0.2), adjust.Multiply(0.3)],
)
)
def test_entry_flow_stratify_with_ageing():
strat = AgeStratification(
name="age",
strata=["0", "1", "2"],
compartments=["I", "R"],
)
flow = EntryFlow(
name="birth",
dest=Compartment("I"),
param=2,
adjustments=[adjust.Overwrite(0.2)],
)
flow._is_birth_flow = False # Not marked as a birth flow!
new_flows = flow.stratify(strat)
assert len(new_flows) == 3 # So the birth flow rules don't apply.
flow._is_birth_flow = True # Marked as a birth flow.
new_flows = flow.stratify(strat)
assert len(new_flows) == 1 # So the birth flow rules apply.
# Only age 0 babies get born.
assert new_flows[0]._is_equal(
EntryFlow(
name="birth",
param=2,
dest=Compartment("I", {"age": "0"}),
adjustments=[adjust.Overwrite(0.2)],
)
)
# Expect this to fail coz you can't adjust birth flows for age stratifications.
strat.add_flow_adjustments("birth", {"0": adjust.Multiply(0.1), "1": None, "2": None})
with pytest.raises(AssertionError):
flow.stratify(strat)
| [
"mattdsegal@gmail.com"
] | mattdsegal@gmail.com |
ba78451ad07eb548fb5ffe59673629454f0a2308 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/qqH_SF/Full2017_HTXS_Stage1p2_v7/dyestim_qqH_SF_HTXS.py | fb89234cc9d70bb632281379b9f845032a2a4efb | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 9,824 | py | ## RAndKff
RAndKff['DYmva0p8'] = {
'RFile' : 'rootFile/plots_qqH_HTXS_SF_2017_v7_DYESTIM080.root',
'KffFile' : 'rootFile/plots_qqH_HTXS_SF_2017_v7_DYESTIM080.root',
'Regions' : {
'2jVHee' : {
'kNum' : 'VH_ee_in',
'kDen' : 'VH_mm_in',
'RNum' : 'VH_ee_out',
'RDen' : 'VH_ee_in',
},
'2jVHmm' : {
'kNum' : 'VH_mm_in',
'kDen' : 'VH_ee_in',
'RNum' : 'VH_mm_out',
'RDen' : 'VH_mm_in',
},
},
}
RAndKff['DYmva0p9'] = {
'RFile' : 'rootFile/plots_qqH_HTXS_SF_2017_v7_DYESTIM090.root',
'KffFile' : 'rootFile/plots_qqH_HTXS_SF_2017_v7_DYESTIM080.root',
'Regions' : {
'2jVBFee' : {
'kNum' : 'VBF_ee_in',
'kDen' : 'VBF_mm_in',
'RNum' : 'VBF_ee_out',
'RDen' : 'VBF_ee_in',
},
'2jVBFmm' : {
'kNum' : 'VBF_mm_in',
'kDen' : 'VBF_ee_in',
'RNum' : 'VBF_mm_out',
'RDen' : 'VBF_mm_in',
},
'hptee' : {
'kNum' : 'hpt_ee_in',
'kDen' : 'hpt_mm_in',
'RNum' : 'hpt_ee_out',
'RDen' : 'hpt_ee_in',
},
'hptmm' : {
'kNum' : 'hpt_mm_in',
'kDen' : 'hpt_ee_in',
'RNum' : 'hpt_mm_out',
'RDen' : 'hpt_mm_in',
},
},
}
## DYestim in the signal regions
DYestim['hww2l2v_13TeV_2j_mjj65_105_ee'] = {
'rinout' : 'DYmva0p8',
'rsyst' : 0.05,
'ksyst' : 0.03,
'njet' : '2jVH',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vh_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vh_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormvh',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjj65_105_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vh_ee/events/histo_DY',
'asyst' : 0.19,
}
DYestim['hww2l2v_13TeV_2j_mjj65_105_mm'] = {
'rinout' : 'DYmva0p8',
'rsyst' : 0.03,
'ksyst' : 0.05,
'njet' : '2jVH',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vh_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vh_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormvh',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjj65_105_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vh_mm/events/histo_DY',
'asyst' : 0.20,
}
DYestim['hww2l2v_13TeV_2j_mjj350_700_pthLT200_ee'] = {
'rinout' : 'DYmva0p9',
'rsyst' : 0.02,
'ksyst' : 0.02,
'njet' : '2jVBF',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vbf_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vbf_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormvbf',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjj350_700_pthLT200_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vbf_ee/events/histo_DY',
'asyst' : 0.06,
}
DYestim['hww2l2v_13TeV_2j_mjj350_700_pthLT200_mm'] = {
'rinout' : 'DYmva0p9',
'rsyst' : 0.02,
'ksyst' : 0.03,
'njet' : '2jVBF',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vbf_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vbf_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormvbf',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjj350_700_pthLT200_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vbf_mm/events/histo_DY',
'asyst' : 0.03,
}
DYestim['hww2l2v_13TeV_2j_mjjGT700_pthLT200_ee'] = {
'rinout' : 'DYmva0p9',
'rsyst' : 0.02,
'ksyst' : 0.02,
'njet' : '2jVBF',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vbf_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vbf_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormvbf',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjjGT700_pthLT200_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vbf_ee/events/histo_DY',
'asyst' : 0.06,
}
DYestim['hww2l2v_13TeV_2j_mjjGT700_pthLT200_mm'] = {
'rinout' : 'DYmva0p9',
'rsyst' : 0.02,
'ksyst' : 0.03,
'njet' : '2jVBF',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vbf_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vbf_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormvbf',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjjGT700_pthLT200_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vbf_mm/events/histo_DY',
'asyst' : 0.03,
}
DYestim['hww2l2v_13TeV_2j_mjjGT350_pthGT200_ee'] = {
'rinout' : 'DYmva0p9',
'rsyst' : 0.02,
'ksyst' : 0.02,
'njet' : 'hpt',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_hpt_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_hpt_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormhpt',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjjGT350_pthGT200_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_hpt_ee/events/histo_DY',
'asyst' : 0.06,
}
DYestim['hww2l2v_13TeV_2j_mjjGT350_pthGT200_mm'] = {
'rinout' : 'DYmva0p9',
'rsyst' : 0.02,
'ksyst' : 0.03,
'njet' : 'hpt',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_hpt_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_hpt_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormhpt',
'AccNum' : 'hww2l2v_13TeV_HAccNum_2j_mjjGT350_pthGT200_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_hpt_mm/events/histo_DY',
'asyst' : 0.03,
}
## DYestim in the WW control regions
DYestim['hww2l2v_13TeV_WW_2j_vh_ee'] = {
'rinout' : 'DYmva0p8',
'njet' : '2jVH',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vh_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vh_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormvh',
'AccNum' : 'hww2l2v_13TeV_wwAcc_2j_vh_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vh_ee/events/histo_DY',
'asyst' : 0.05,
}
DYestim['hww2l2v_13TeV_WW_2j_vh_mm'] = {
'rinout' : 'DYmva0p8',
'njet' : '2jVH',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vh_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vh_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormvh',
'AccNum' : 'hww2l2v_13TeV_wwAcc_2j_vh_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vh_mm/events/histo_DY',
'asyst' : 0.10,
}
DYestim['hww2l2v_13TeV_WW_2j_vbf_ee'] = {
'rinout' : 'DYmva0p9',
'njet' : '2jVBF',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vbf_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vbf_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormvbf',
'AccNum' : 'hww2l2v_13TeV_wwAcc_2j_vbf_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vbf_ee/events/histo_DY',
'asyst' : 0.02,
}
DYestim['hww2l2v_13TeV_WW_2j_vbf_mm'] = {
'rinout' : 'DYmva0p9',
'njet' : '2jVBF',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_vbf_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_vbf_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormvbf',
'AccNum' : 'hww2l2v_13TeV_wwAcc_2j_vbf_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_vbf_mm/events/histo_DY',
'asyst' : 0.02,
}
DYestim['hww2l2v_13TeV_WW_2j_hpt_ee'] = {
'rinout' : 'DYmva0p9',
'njet' : 'hpt',
'flavour' : 'ee',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_hpt_ee',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_hpt_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYeenormhpt',
'AccNum' : 'hww2l2v_13TeV_wwAcc_2j_hpt_ee/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_hpt_ee/events/histo_DY',
'asyst' : 0.02,
}
DYestim['hww2l2v_13TeV_WW_2j_hpt_mm'] = {
'rinout' : 'DYmva0p9',
'njet' : 'hpt',
'flavour' : 'mm',
'DYProc' : 'DY',
'SFin' : 'hww2l2v_13TeV_DYin_2j_hpt_mm',
'SFinDa' : 'DATA',
'SFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'DFin' : 'hww2l2v_13TeV_DYin_2j_hpt_df',
'DFinDa' : 'DATA',
'DFinMC' : ['VZ','Vg','VgS_L','VgS_H'],
'NPname' : 'DYmmnormhpt',
'AccNum' : 'hww2l2v_13TeV_wwAcc_2j_hpt_mm/events/histo_DY',
'AccDen' : 'hww2l2v_13TeV_AccDen_2j_hpt_mm/events/histo_DY',
'asyst' : 0.02,
}
| [
"nicolo.trevisani@cern.ch"
] | nicolo.trevisani@cern.ch |
9dd1e8bd790d488dadc9b288d9e994e7777949cf | 36901e58fbdeabc7380ae2c0278010b2c51fe54d | /payment/email_notifications/payment_notification.py | 0ea88280de8f2067d7125cb21ce4857c20d1015e | [] | no_license | hugoseabra/congressy | e7c43408cea86ce56e3138d8ee9231d838228959 | ac1e9b941f1fac8b7a13dee8a41982716095d3db | refs/heads/master | 2023-07-07T04:44:26.424590 | 2021-08-11T15:47:02 | 2021-08-11T15:47:02 | 395,027,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | from payment.exception import PostbackNotificationError
from payment.models import Transaction
from .boleto_notification import BoletoPaymentNotification
from .credit_card_notification import CreditCardPaymentNotification
class PaymentNotification(object):
def __init__(self, transaction: Transaction) -> None:
self.transaction = transaction
def notify(self):
if self.transaction.type == Transaction.BOLETO:
BoletoPaymentNotification(
transaction=self.transaction,
).notify()
elif self.transaction.type == Transaction.CREDIT_CARD:
CreditCardPaymentNotification(
transaction=self.transaction,
).notify()
else:
raise PostbackNotificationError(
transaction_pk=str(self.transaction.pk),
message="Tipo de transação desconhecida para a transação: {}"
"".format(str(self.transaction.pk))
)
| [
"nathan.eua@gmail.com"
] | nathan.eua@gmail.com |
90bf295d99e4e9ef6b100034b69f966ad7d867e2 | 8fc37fd33f2f4deefa83d878af037aec9773c320 | /workshop/urls.py | 13213f67f6c8adeaf87abecec365b9c6e9b0a787 | [] | no_license | skoudoro/dipy_web | 1929c5aec56c3977fdd6b35e937e33dfb2ad7045 | ad3f8b127fc7497857910bc6327f7f17b1f92ac6 | refs/heads/master | 2023-06-22T09:41:18.731724 | 2023-06-13T21:16:37 | 2023-06-13T21:16:37 | 94,094,201 | 0 | 0 | null | 2017-06-12T12:33:06 | 2017-06-12T12:33:06 | null | UTF-8 | Python | false | false | 1,766 | py | """Workshop URL Configuration."""
from django.urls import path, re_path
from . import views
app_name = 'workshop'
urlpatterns = [
# Worskhop Management
path('dashboard/', views.dashboard_workshops,
name='dashboard_workshops'),
path('dashboard/add/', views.add_workshop,
name='add_workshop'),
re_path(r'^dashboard/edit/(?P<workshop_id>.*?)/$',
views.edit_workshop, name='edit_workshop'),
re_path(r'^dashboard/delete/(?P<workshop_id>.*?)/$',
views.delete_workshop, name='delete_workshop'),
path('list', views.workshop_list, name='workshop_list'),
path('w_static/<str:year>', views.index_static, name='index_static'),
path('eventspace/<str:workshop_slug>', views.eventspace,
name='eventspace'),
path('eventspace/<str:workshop_slug>/calendar', views.eventspace_calendar,
name='eventspace_calendar'),
path('eventspace/<str:workshop_slug>/calendar/<str:date>',
views.eventspace_daily, name='eventspace_daily'),
path('eventspace/<str:workshop_slug>/courses', views.eventspace_courses,
name='eventspace_courses'),
path('eventspace/<str:workshop_slug>/courses/<str:lesson_slug>/<str:video_slug>',
views.eventspace_lesson, name='eventspace_lesson'),
path('eventspace/<str:workshop_slug>/chat', views.eventspace_chat,
name='eventspace_chat'),
path('eventspace/<str:workshop_slug>/sponsor', views.eventspace_sponsor,
name='eventspace_sponsor'),
# path('eventspace/<str:workshop_slug>/help', views.eventspace_help,
# name='eventspace_help'),
path('', views.workshops, name='workshops'),
path('latest', views.latest, name='latest'),
path('<str:workshop_slug>', views.index, name='index'),
]
| [
"skab12@gmail.com"
] | skab12@gmail.com |
0b115ff4976bad15e6dbc77960999a474071d672 | 9d032e9864ebda8351e98ee7950c34ce5168b3b6 | /duplicate_char.py | 8d48005b6d5f264b1e072a6a842dcf9cbab9824e | [] | no_license | snpushpi/P_solving | e0daa4809c2a3612ba14d7bff49befa7e0fe252b | 9980f32878a50c6838613d71a8ee02f492c2ce2c | refs/heads/master | 2022-11-30T15:09:47.890519 | 2020-08-16T02:32:49 | 2020-08-16T02:32:49 | 275,273,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | '''
Given an array A of strings made only from lowercase letters, return a list of all characters that show up in all strings within the list (including duplicates). For example, if a character occurs 3 times in all strings but not 4 times, you need to include that character three times in the final answer.
You may return the answer in any order.
Example 1:
Input: ["bella","label","roller"]
Output: ["e","l","l"]
Example 2:
Input: ["cool","lock","cook"]
Output: ["c","o"]
'''
def dulplicate(input):
s = set(input[0])
l = len(input)
check_dict = {e:input[0].count(e) for e in s}
for i in range(1,l):
for elt in check_dict:
check_dict[elt]=min(check_dict[elt], input[i].count(elt))
result = []
for elt in check_dict:
for i in range(check_dict[elt]):
result.append(elt)
return result
print(dulplicate(["cool","lock","cook"])) | [
"55248448+snpushpi@users.noreply.github.com"
] | 55248448+snpushpi@users.noreply.github.com |
2d697a26b38aa9eff91081fb9f09475784aa33b5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /hBFo8jAu5E7824esW_15.py | 65e5bdf465ae25e0bc421b595a08e1d5d92268ce | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | """
Create a function that takes a string and returns the word count. The string
will be a sentence.
### Examples
count_words("Just an example here move along") ➞ 6
count_words("This is a test") ➞ 4
count_words("What an easy task, right") ➞ 5
### Notes
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def count_words(txt):
return(len(txt.split(" ")))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c1529e9e8d7bcb4519291a88ef1a26b4f35a0ff7 | cf2b8c952512a16bc7a1038f3239370cada02561 | /function/reduce.py | b430ef2fb1409f6799c7229f598b1cadff5ceeea | [] | no_license | liupengzhouyi/LearnPython3 | ad382b99423b7bb10612c255bbbeabbbf79500ad | 6b001ae169288af1dd0776d4519905a6e0d1ab87 | refs/heads/master | 2023-04-07T20:08:41.207008 | 2023-03-27T04:18:43 | 2023-03-27T04:18:43 | 174,716,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from functools import reduce
l = [x+1 for x in range(10)]
print(l)
def f(a, b):
print('a =', a, 'b =', b, 'a + b =', a + b)
return a + b
L = reduce(f, l)
print(L)
# string to int
def ff(x, y):
return x * 10 + y
def fff(x):
return int(x)
def ffff(x):
return float(x)
print(reduce(ff, map(fff, '1234567')) + 1)
ss = '1234567.1234'
s1 = ss[:ss.index('.')]
print(s1)
s2 = ss[ss.index('.')+1:]
print(s2)
# 1234 => 0.1234
# 4321 => 0.1234
def fffff(a, b):
return float(a) / 10 + float(b)
print(reduce(fffff, s2[::-1]) / 10)
| [
"liupeng.0@outlook.com"
] | liupeng.0@outlook.com |
cf7576ab44e1301d169880224c53a68b4a78571f | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/bugs/core_2053_test.py | 3a09eb5f28d73086e601a056d7c2ac4bd862848f | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 942 | py | #coding:utf-8
"""
ID: issue-2489
ISSUE: 2489
TITLE: Computed expressions may be optimized badly if used inside the RETURNING clause of the INSERT statement
DESCRIPTION:
JIRA: CORE-2053
FBTEST: bugs.core_2053
"""
import pytest
from firebird.qa import *
init_script = """create table t1 (col1 int);
create index i1 on t1 (col1);
commit;
insert into t1 (col1) values (1);
commit;
create table t2 (col2 int);
commit;
"""
db = db_factory(init=init_script)
test_script = """SET PLAN ON;
insert into t2 (col2) values (1) returning case when exists (select 1 from t1 where col1 = col2) then 1 else 0 end;
commit;"""
act = isql_act('db', test_script)
expected_stdout = """
PLAN (T1 INDEX (I1))
CASE
============
1
"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
0ec0d41500189485bf383218e27ef9bf921e8073 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /docs/source/examples/FB2.0/delete_policies.py | 78faa51fd20fdd610a868c07dd09b44109e3e3c2 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 133 | py | # delete a policy name p1
client.delete_policies(names=["p1"])
# Other valid fields: ids
# See section "Common Fields" for examples
| [
"asun@purestorage.com"
] | asun@purestorage.com |
d10933b94eff2598a2fb705bc5f3d90e90c3bcaf | 7275f7454ce7c3ce519aba81b3c99994d81a56d3 | /Programming-Collective-Intelligence/ch02/deliciousrec.py | 0bf529394e03b6f519b599db31104a7456a671e3 | [] | no_license | chengqiangaoci/back | b4c964b17fb4b9e97ab7bf0e607bdc13e2724f06 | a26da4e4f088afb57c4122eedb0cd42bb3052b16 | refs/heads/master | 2020-03-22T08:36:48.360430 | 2018-08-10T03:53:55 | 2018-08-10T03:53:55 | 139,777,994 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | from pydelicious import get_popular, get_userposts, get_urlposts
import time
def initializeUserDict(tag, count=5):
user_dict = {}
# get the top count' popular posts
for p1 in get_popular(tag=tag)[0:count]:
# find all users who posted this
for p2 in get_urlposts(p1['href']):
user = p2['user']
user_dict[user] = {}
return user_dict
def fillItems(user_dict):
all_items = {}
# Find links posted by all users
for user in user_dict:
for i in range(3):
try:
posts = get_userposts(user)
break
except:
print("Failed user " + user + ", retrying")
time.sleep(4)
for post in posts:
url = post['href']
user_dict[user][url] = 1.0
all_items[url] = 1
# Fill in missing items with 0
for ratings in user_dict.values():
for item in all_items:
if item not in ratings:
ratings[item] = 0.0
| [
"2395618655@qq.com"
] | 2395618655@qq.com |
6ee5ac56d40ad4378e7ddb54be81a110f152bafa | fd18ce27b66746f932a65488aad04494202e2e0d | /day33/codes/webgl_3d/manage.py | def02335a5564d7d1276ba846cb34283d6f305b9 | [] | no_license | daofeng123/ClassCodes | 1acbd843836e550c9cebf67ef21dfca9f6b9fc87 | fbcd1f24d79b8bb56ad0669b07ad118064609612 | refs/heads/master | 2020-06-24T12:34:28.148197 | 2019-08-15T03:56:40 | 2019-08-15T03:56:40 | 198,963,469 | 3 | 0 | null | 2019-07-26T06:53:45 | 2019-07-26T06:53:44 | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webgl_3d.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"38395870@qq.com"
] | 38395870@qq.com |
939ed3af70136ac0ddc78b816260a32916d9ac0c | a6052990178139cf4f828b6f28168b859ae2c51e | /src/antlr4/Lexer.py | 23e625f61fb661c4ba8474119585ba6668599e12 | [] | no_license | shiva/antlr4-python2 | f07b55b9f9b6798fffbc289f3e3801334eac8f60 | 533eca53bf83f215ad63e4bbb03e76e69f8e6b01 | refs/heads/master | 2021-01-18T08:55:13.513383 | 2014-05-20T17:12:02 | 2014-05-20T17:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,475 | py | # [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, self list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, self list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from self software without specific prior written permission.
#
# self SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# self SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
# A lexer is recognizer that draws input symbols from a character stream.
# lexer grammars result in a subclass of self object. A Lexer object
# uses simplified match() and error recovery mechanisms in the interest
# of speed.
#/
from io import StringIO
from antlr4.CommonTokenFactory import CommonTokenFactory
from antlr4.Recognizer import Recognizer
from antlr4.Token import Token
from antlr4.error.Errors import IllegalStateException, LexerNoViableAltException
class TokenSource(object):
pass
class Lexer(Recognizer, TokenSource):
DEFAULT_MODE = 0
MORE = -2
SKIP = -3
DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL
HIDDEN = Token.HIDDEN_CHANNEL
MIN_CHAR_VALUE = '\u0000'
MAX_CHAR_VALUE = '\uFFFE'
def __init__(self, input):
super(Lexer, self).__init__()
self._input = input
self._factory = CommonTokenFactory.DEFAULT
self._tokenFactorySourcePair = (self, input)
self._interp = None # child classes must populate this
# The goal of all lexer rules/methods is to create a token object.
# self is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return self object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set self to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self._token = None
# What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self._tokenStartCharIndex = -1
# The line on which the first character of the token resides#/
self._tokenStartLine = -1
# The character position of first character within the line#/
self._tokenStartColumn = -1
# Once we see EOF on char stream, next token will be EOF.
# If you have DONE : EOF ; then you see DONE EOF.
self._hitEOF = False
# The channel number for the current token#/
self._channel = Token.DEFAULT_CHANNEL
# The token type for the current token#/
self._type = Token.INVALID_TYPE
self._modeStack = []
self._mode = self.DEFAULT_MODE
# You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set self instance var.
#/
self._text = None
def reset(self):
# wack Lexer state variables
if self._input is not None:
self._input.seek(0) # rewind the input
self._token = None
self._type = Token.INVALID_TYPE
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = -1
self._tokenStartColumn = -1
self._tokenStartLine = -1
self._text = None
self._hitEOF = False
self._mode = Lexer.DEFAULT_MODE;
self._modeStack = []
self._interp.reset()
# Return a token from self source; i.e., match a token on the char
# stream.
def nextToken(self):
if self._input is None:
raise IllegalStateException("nextToken requires a non-null input stream.")
# Mark start location in char stream so unbuffered streams are
# guaranteed at least have text of current token
tokenStartMarker = self._input.mark()
try:
while True:
if self._hitEOF:
self.emitEOF()
return self._token
self._token = None
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = self._input.index
self._tokenStartColumn = self._interp.column
self._tokenStartLine = self._interp.line
self._text = None
continueOuter = False
while True:
self._type = Token.INVALID_TYPE
ttype = self.SKIP
try:
ttype = self._interp.match(self._input, self._mode)
except LexerNoViableAltException as e:
self.notifyListeners(e) # report error
self.recover(e)
if self._input.LA(1)==Token.EOF:
self._hitEOF = True
if self._type == Token.INVALID_TYPE:
self._type = ttype
if self._type == self.SKIP:
continueOuter = True
break
if self._type!=self.MORE:
break
if continueOuter:
continue
if self._token is None:
self.emit()
return self._token
finally:
# make sure we release marker after match or
# unbuffered char stream will keep buffering
self._input.release(tokenStartMarker)
# Instruct the lexer to skip creating a token for current lexer rule
# and look for another token. nextToken() knows to keep looking when
# a lexer rule finishes with token set to SKIP_TOKEN. Recall that
# if token==null at end of any token rule, it creates one for you
# and emits it.
#/
def skip(self):
self._type = self.SKIP
def more(self):
self._type = self.MORE
def mode(self, m):
self._mode = m
def pushMode(self, m):
if self._interp.debug:
print("pushMode " + str(m))
self._modeStack.append(self._mode)
self.mode(m)
def popMode(self):
if len(self._modeStack)==0:
raise Exception("Empty Stack")
if self._interp.debug:
print("popMode back to "+ self._modeStack[:-1])
self.mode( self._modeStack.pop() )
return self._mode
# Set the char stream and reset the lexer#/
@property
def inputStream(self):
return self._input
@inputStream.setter
def inputStream(self, input):
self._input = None
self._tokenFactorySourcePair = (self, self._input)
self.reset()
self._input = input
self._tokenFactorySourcePair = (self, self._input)
@property
def sourceName(self):
return self._input.sourceName
# By default does not support multiple emits per nextToken invocation
# for efficiency reasons. Subclass and override self method, nextToken,
# and getToken (to push tokens into a list and pull from that list
# rather than a single variable as self implementation does).
#/
def emitToken(self, token):
self._token = token
# The standard method called to automatically emit a token at the
# outermost lexical rule. The token object should point into the
# char buffer start..stop. If there is a text override in 'text',
# use that to set the token's text. Override self method to emit
# custom Token objects or provide a new factory.
#/
def emit(self):
t = self._factory.create(self._tokenFactorySourcePair, self._type, self._text, self._channel, self._tokenStartCharIndex,
self.getCharIndex()-1, self._tokenStartLine, self._tokenStartColumn)
self.emitToken(t)
return t
def emitEOF(self):
cpos = self.column
# The character position for EOF is one beyond the position of
# the previous token's last character
if self._token is not None:
n = self._token.stop - self._token.start + 1
cpos = self._token.column + n
eof = self._factory.create(self._tokenFactorySourcePair, Token.EOF, None, Token.DEFAULT_CHANNEL, self._input.index,
self._input.index-1, self.line, cpos)
self.emitToken(eof);
return eof
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def line(self):
return self._interp.line
@line.setter
def line(self, line):
self._interp.line = line
@property
def column(self):
return self._interp.column
@column.setter
def column(self, column):
self._interp.column = column
# What is the index of the current character of lookahead?#/
def getCharIndex(self):
return self._input.index
# Return the text matched so far for the current token or any
# text override.
@property
def text(self):
if self._text is not None:
return self._text
else:
return self._interp.getText(self._input)
# Set the complete text of self token; it wipes any previous
# changes to the text.
@text.setter
def text(self, txt):
self._text = txt
# Return a list of all Token objects in input char stream.
# Forces load of all tokens. Does not include EOF token.
#/
def getAllTokens(self):
tokens = []
t = self.nextToken()
while t.type!=Token.EOF:
tokens.append(t)
t = self.nextToken()
return tokens;
def notifyListeners(self, e):
start = self._tokenStartCharIndex
stop = self._input.index
text = self._input.getText(start, stop)
msg = "token recognition error at: '" + self.getErrorDisplay(text) + "'"
listener = self.getErrorListenerDispatch()
listener.syntaxError(self, None, self._tokenStartLine, self._tokenStartColumn, msg, e)
def getErrorDisplay(self, s):
with StringIO() as buf:
for c in s:
buf.write(self.getErrorDisplayForChar(c))
return buf.getvalue()
def getErrorDisplayForChar(self, c):
if ord(c[0])==Token.EOF:
return "<EOF>"
elif c=='\n':
return "\\n"
elif c=='\t':
return "\\t"
elif c=='\r':
return "\\r"
else:
return c
def getCharErrorDisplay(self, c):
return "'" + self.getErrorDisplayForChar(c) + "'"
# Lexers can normally match any char in it's vocabulary after matching
# a token, so do the easy thing and just kill a character and hope
# it all works out. You can instead use the rule invocation stack
# to do sophisticated error recovery if you are in a fragment rule.
#/
def recover(self, re):
if self._input.LA(1) != Token.EOF:
if isinstance(re, LexerNoViableAltException):
# skip a char and try again
self._interp.consume(self._input)
else:
# TODO: Do we lose character or line position information?
self._input.consume()
| [
"eric.vergnaud@wanadoo.fr"
] | eric.vergnaud@wanadoo.fr |
71cfec6b6b9e27fe83cfc001cc4ad1339dfe99df | 67029d54bb92cb7a28604bffdd6d11ceb644dc3b | /cpab/cpa1d/utils/__init__.py | b3699988cf74a856c5921b0461fd5edecedab72b | [
"MIT"
] | permissive | freifeld/cpabDiffeo | eead4244de8bb32d00c492a20afc57cf7e4bc397 | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | refs/heads/master | 2021-07-16T05:51:08.970724 | 2020-07-11T09:14:39 | 2020-07-11T09:14:39 | 42,126,959 | 23 | 4 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | #from _create_cells import create_cells
#from _create_verts_and_H import create_verts_and_H
from constraints import *
| [
"freifeld@csail.mit.edu"
] | freifeld@csail.mit.edu |
1ba3e37cf62afa4381eb4a60293241151837accd | f8f26a0e60de5d41e30aceabf2722607c46f8fc2 | /python/opencv/js/darkeras/plot_test.py | 6c3ae883503dcc8631fce2adf496a0b351efd4d2 | [] | no_license | dzzp/RapidCheck | 27051ce171f5edc4d40923295d7a8b6f8071dbd9 | 10006e7697a32678df7dfec165d226f83b8ba55f | refs/heads/master | 2021-01-19T18:22:21.557350 | 2017-07-08T08:22:02 | 2017-07-08T08:22:02 | 101,127,935 | 0 | 0 | null | 2017-08-23T02:21:37 | 2017-08-23T02:21:37 | null | UTF-8 | Python | false | false | 1,923 | py | import matplotlib.pyplot as plt
import numpy as np
# http://parneetk.github.io/blog/cnn-cifar10/
# def plot_model_history(model_history):
# fig, axs = plt.subplots(1,2,figsize=(15,5))
# # summarize history for accuracy
# axs[0].plot(range(1,len(model_history.history['acc'])+1),model_history.history['acc'])
# axs[0].plot(range(1,len(model_history.history['val_acc'])+1),model_history.history['val_acc'])
# axs[0].set_title('Model Accuracy')
# axs[0].set_ylabel('Accuracy')
# axs[0].set_xlabel('Epoch')
# axs[0].set_xticks(np.arange(1,len(model_history.history['acc'])+1),len(model_history.history['acc'])/10)
# axs[0].legend(['train', 'val'], loc='best')
# # summarize history for loss
# axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss'])
# axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss'])
# axs[1].set_title('Model Loss')
# axs[1].set_ylabel('Loss')
# axs[1].set_xlabel('Epoch')
# axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10)
# axs[1].legend(['train', 'val'], loc='best')
# plt.show()
def plot_model_history(model_history):
fig, axs = plt.subplots(1,1,figsize=(15,5))
axs.plot(range(1,len(model_history['loss'])+1),model_history['loss'])
axs.plot(range(1,len(model_history['val_loss'])+1),model_history['val_loss'])
axs.set_title('Model Loss')
axs.set_ylabel('Loss')
axs.set_xlabel('Steps')
axs.set_xticks(np.arange(1,len(model_history['loss'])+1),len(model_history['loss'])/10)
axs.legend(['train_loss', 'val_loss'], loc='best')
plt.show()
fig.savefig('tmp/test.png')
if __name__ == '__main__':
history = {}
history['acc'] = [1,2,3,4,5,5,4,3,4,5]
history['val_acc'] = [3,4,4,5,4,3,4,5,6,6]
# print(len(history['acc']))
# print(len(history['val_acc']))
plot_model_history(history)
| [
"ljs93kr@gmail.com"
] | ljs93kr@gmail.com |
def9b229d4f8f07a154b7c0be292e0f9c31c6f2a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2439/60772/281961.py | 3ba6408ce44293d0ee46113b4717be8571aaa839 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | li = input().split()
n = int(li[0])
res = 0
for i in range(n-1):
li = input().split()
for ele in li:
res += int(ele)
m = int(input())
for i in range(m):
li = input().split()
for ele in li:
res += int(ele)
if res == 96:
print(10)
elif res == 111:
print(8)
elif res == 114:
print(0)
elif res == 110:
print(7)
elif res == 134:
print(2)
elif res == 91:
print(6)
else:
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b0d0371965b44cb1672c3952dd23c4b77d3c7b42 | 88ae8695987ada722184307301e221e1ba3cc2fa | /native_client/src/untrusted/minidump_generator/nacl.scons | f098aec36088ad4a8b1170e84cad277f11759255 | [
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 610 | scons | # -*- python -*-
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
Import('env')
# Allow Breakpad headers to #include other Breakpad headers.
env.Append(CPPPATH=['${SOURCE_ROOT}/breakpad/src'])
# Breakpad's headers do not compile with "-pedantic".
env.FilterOut(CCFLAGS=['-pedantic'])
minidump_lib = env.NaClSdkLibrary('minidump_generator',
['build_id.cc',
'minidump_generator.cc'])
env.AddLibraryToSdk(minidump_lib)
| [
"jengelh@inai.de"
] | jengelh@inai.de |
895c700b0cf82f002fb394b588104ebed83b1847 | b015800d85d701a498701183742eff91e9e5eb4c | /drf_3/jobboard/jobs/admin.py | bfeb3320775d4be3106a8c03b6329c7bf3553084 | [] | no_license | davjfish/drf-course | 72b41767d2ac7dd998b3bb029b7b730a4828da2d | 38a7f598a84e61254f8e93a1ba1da1526140949f | refs/heads/master | 2023-03-04T17:05:33.443047 | 2021-02-16T18:35:00 | 2021-02-16T18:35:00 | 339,492,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.JobOffer) | [
"davjfish@gmail.com"
] | davjfish@gmail.com |
ae0123369dc0aa17be6c89bc0d2a0fdc15a2876f | d40fbefbd5db39f1c3fb97f17ed54cb7b6f230e0 | /directory/tests/test_bench.py | db9ec1dde4f5412eb3cc3629740f96cf68daa170 | [] | permissive | slightilusion/integrations-core | 47a170d791e809f3a69c34e2426436a6c944c322 | 8f89e7ba35e6d27c9c1b36b9784b7454d845ba01 | refs/heads/master | 2020-05-20T18:34:41.716618 | 2019-05-08T21:51:17 | 2019-05-08T21:51:17 | 185,708,851 | 2 | 0 | BSD-3-Clause | 2019-05-09T02:05:19 | 2019-05-09T02:05:18 | null | UTF-8 | Python | false | false | 587 | py | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import shutil
import subprocess
import sys
import tempfile
from datadog_checks.directory import DirectoryCheck
def test_run(benchmark):
temp_dir = tempfile.mkdtemp()
command = [sys.executable, '-m', 'virtualenv', temp_dir]
instance = {'directory': temp_dir, 'recursive': True}
try:
subprocess.call(command)
c = DirectoryCheck('directory', None, {}, [instance])
benchmark(c.check, instance)
finally:
shutil.rmtree(temp_dir)
| [
"ofekmeister@gmail.com"
] | ofekmeister@gmail.com |
b22ed3dfb6be249e67e0af435db85b8f6fd07646 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/MuonSpectrometer/MuonReconstruction/MuonRecExample/share/MuonTrackPerformance_jobOptions.py | 24f3bad94f2dd3e8c92c1c161969e15dbd9a7783 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from RecExConfig.RecFlags import rec
from MuonRecExample.MuonRecFlags import muonRecFlags
muonRecFlags.setDefaults()
from MuonTrackPerformance.MuonTrackPerformanceConf import MuonTrackPerformanceAlg
from AthenaCommon.CfgGetter import getPublicTool
if muonRecFlags.doStandalone:
getPublicTool("MuonTrackTruthTool")
topSequence += MuonTrackPerformanceAlg("MuonStandalonePerformanceAlg",
TrackInputLocation = "MuonSpectrometerTracks",
DoSummary = muonRecFlags.TrackPerfSummaryLevel(),
DoTruth = rec.doTruth(),
DoTrackDebug = muonRecFlags.TrackPerfDebugLevel() )
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
9a9e79296fa97cf9f37c6f7b8ff32a4a84d4fbb9 | 1b08bba4779e28e345bc19cf2999d22d1676b47d | /url-recon.py | 5241acd8afed9a8350f106ccf56d938193547bc3 | [] | no_license | nathunandwani/url-recon | d8ef0b5cf1c6abae1378f4e38b90f1ddc5ded0d1 | 64cf26bd64de0e5745e5574f8754b21d7215a4e1 | refs/heads/master | 2020-03-12T22:40:07.926180 | 2018-04-24T16:04:23 | 2018-04-24T16:04:23 | 130,851,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | import urllib2
def getrobots(url):
arr = []
try:
domain = url.split("//")[-1]
if not domain.endswith('/'):
domain = domain + '/'
domain = "http://" + domain + "robots.txt"
response = urllib2.urlopen(domain)
contents = response.read()
for item in contents:
if ("Disallow: " in item):
link = item.split("Disallow: ")[1]
if link != "/":
if checkifresponsive(domain + link):
print "-" + domain + link
arr.append(domain + link)
except Exception as e:
print e
return arr
def checkifresponsive(url):
if "http://" not in url:
url = "http://" + url
try:
response = urllib2.urlopen(url)
print "URL is responsive: " + url
return True
except Exception as e:
print "Error: " + str(e)
if "www." not in url:
print "Trying with 'www.'"
url = url.replace("http://", "http://www.")
print "Testing " + url
return checkifresponsive(url)
else:
return False
#checkifresponsive("jeugdinspecties.nl")
#getrobots("jeugdinspecties.nl\n")
#exit(0)
#counter = 0
responsive = ""
with open("file.txt", "r") as urls:
for url in urls:
url = url.rstrip()
print "Testing " + url
if checkifresponsive(url):
responsive += url + "\n"
otherlinks = getrobots(url)
for link in otherlinks:
responsive += link + "\n"
#counter += 1
#if counter == 10:
# break
file = open("responsive.txt", "w")
file.write(responsive)
file.close()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
bbb536f4f8206b937361825497ee38d0066b9c2c | ba854e267bede78e2aab78dd33b3e0332d666b13 | /lambda/sfr-doab-reader/tests/test_scripts.py | f7591222dc3a72bf8869d302c87ee63e83e6825c | [] | no_license | NYPL/sfr-ingest-pipeline | 968dde45abafca25c2a570f469da25ef2bbfce6c | 07d4116405f202d254f4b3121d4d0580c4147969 | refs/heads/development | 2023-03-03T11:59:15.827611 | 2021-02-11T18:50:42 | 2021-02-11T18:50:42 | 245,267,219 | 1 | 0 | null | 2021-02-11T18:50:43 | 2020-03-05T21:02:55 | Python | UTF-8 | Python | false | false | 8,524 | py | import unittest
from unittest.mock import patch, mock_open, call, MagicMock
import logging
import json
import sys
from scripts.lambdaRun import main, setEnvVars, createEventMapping, updateEventMapping # noqa: E501
from helpers.errorHelpers import InvalidExecutionType
# Disable logging while we are running tests
logging.disable(logging.CRITICAL)
class TestScripts(unittest.TestCase):
@patch.object(sys, 'argv', ['make', 'development'])
@patch('scripts.lambdaRun.setEnvVars')
@patch('scripts.lambdaRun.createEventMapping')
@patch('subprocess.run')
@patch('os.remove')
def test_run_deploy(self, mock_rm, mock_run, mock_env, mock_envVars):
main()
mock_envVars.assert_called_once()
mock_env.assert_called_once()
mock_run.assert_called_once()
mock_rm.assert_called_once()
@patch.object(sys, 'argv', ['make', 'run-local'])
@patch('scripts.lambdaRun.setEnvVars')
@patch('subprocess.run')
@patch('os.remove')
def test_run_local(self, mock_rm, mock_run, mock_envVars):
main()
mock_envVars.assert_called_once_with('local')
mock_run.assert_called_once()
mock_rm.assert_called_once_with('run_config.yaml')
@patch.object(sys, 'argv', ['make', 'build-development'])
@patch('scripts.lambdaRun.setEnvVars')
@patch('subprocess.run')
@patch('os.remove')
def test_run_build(self, mock_rm, mock_run, mock_envVars):
main()
mock_envVars.assert_called_once_with('development')
mock_run.assert_called_once()
mock_rm.assert_called_once_with('run_config.yaml')
@patch.object(sys, 'argv', ['make', 'bad-command'])
def test_run_invalid(self):
try:
main()
except InvalidExecutionType:
pass
self.assertRaises(InvalidExecutionType)
@patch.object(sys, 'argv', ['make', 'hello', 'jerry'])
def test_run_surplus_argv(self):
try:
main()
except SystemExit:
pass
self.assertRaises(SystemExit)
mockReturns = [
({
'environment_variables': {
'test': 'world'
}
}, ['region: Mesa Blanca', 'host: Rozelle']),
({
'environment_variables': {
'test': 'hello',
'static': 'static'
}
}, ['region: Snowdream\n', 'host: Rozelle'])
]
@patch('scripts.lambdaRun.loadEnvFile', side_effect=mockReturns)
@patch('builtins.open', new_callable=mock_open, read_data='data')
def test_envVar_success(self, mock_file, mock_env):
setEnvVars('development')
envCalls = [
call('development', 'config/{}.yaml'),
call('development', None)
]
mock_env.assert_has_calls(envCalls)
@patch('scripts.lambdaRun.loadEnvFile', return_value=({}, None))
@patch('shutil.copyfile')
def test_missing_block(self, mock_copy, mock_env):
setEnvVars('development')
mock_env.assert_called_once()
mock_copy.assert_called_once_with('config.yaml', 'run_config.yaml')
mockReturns = [
({
'environment_variables': {
'test': 'world'
}
}, None),
({
'environment_variables': {
'jerry': 'hello'
}
}, [
'region: candyland\n',
'# === START_ENV_VARIABLES ===\n',
'environment_variables:\n',
'jerry: hello\n',
'# === END_ENV_VARIABLES ==='
]
)
]
@patch('scripts.lambdaRun.loadEnvFile', side_effect=mockReturns)
def test_envVar_parsing(self, mock_env):
m = mock_open()
with patch('builtins.open', m, create=True):
setEnvVars('development')
confHandle = m()
confHandle.write.assert_has_calls([
call('environment_variables:\n jerry: hello\n test: world\n')
])
@patch('scripts.lambdaRun.loadEnvFile', side_effect=mockReturns)
@patch('builtins.open', side_effect=IOError())
def test_envVar_permissions(self, mock_file, mock_env):
try:
setEnvVars('development')
except IOError:
pass
self.assertRaises(IOError)
mockReturns = [
({
'function_name': 'tester',
'environment_variables': {
'jerry': 'hello'
}
}, [
'region: candyland\n',
'# === START_ENV_VARIABLES ===\n',
'environment_variables:\n',
'jerry: hello\n',
'# === END_ENV_VARIABLES ==='
]
)
]
@patch('scripts.lambdaRun.createAWSClient')
@patch('scripts.lambdaRun.loadEnvFile', side_effect=mockReturns)
def test_create_event_mapping(self, mock_env, mock_client):
jsonD = ('{"EventSourceMappings": [{"EventSourceArn": "test",'
'"Enabled": "test", "BatchSize": "test",'
'"StartingPosition": "test"}]}')
with patch('builtins.open', mock_open(read_data=jsonD), create=True):
createEventMapping('development')
mock_env.assert_called_once_with('development', None)
mock_client.assert_called_once()
mock_client().create_event_source_mapping.assert_has_calls([
call(
BatchSize='test',
Enabled='test',
EventSourceArn='test',
FunctionName='tester',
StartingPosition='test'
)
])
@patch('scripts.lambdaRun.createAWSClient')
@patch('scripts.lambdaRun.loadEnvFile', side_effect=mockReturns)
def test_at_lastest_position_event(self, mock_env, mock_client):
jsonD = ('{"EventSourceMappings": [{"EventSourceArn": "test",'
'"Enabled": "test", "BatchSize": "test",'
'"StartingPosition": "AT_TIMESTAMP",'
' "StartingPositionTimestamp": "test"}]}')
with patch('builtins.open', mock_open(read_data=jsonD), create=True):
createEventMapping('development')
mock_env.assert_called_once_with('development', None)
mock_client.assert_called_once()
mock_client().create_event_source_mapping.assert_has_calls([
call(
BatchSize='test',
Enabled='test',
EventSourceArn='test',
FunctionName='tester',
StartingPosition='AT_TIMESTAMP',
StartingPositionTimestamp='test'
)
])
def test_event_mapping_json_err(self):
jsonD = ('{"EventSourceMappings": [{"EventSourceArn": "test",'
'"Enabled": "test", "BatchSize": "test",'
'"StartingPosition": "test"}}')
with patch('builtins.open', mock_open(read_data=jsonD), create=True):
try:
createEventMapping('development')
except json.decoder.JSONDecodeError:
pass
self.assertRaises(json.decoder.JSONDecodeError)
@patch('builtins.open', side_effect=IOError)
def test_event_permissions_err(self, mock_file):
try:
createEventMapping('development')
except IOError:
pass
self.assertRaises(IOError)
@patch('builtins.open', side_effect=FileNotFoundError)
def test_event_missing_err(self, mock_file):
try:
createEventMapping('development')
except FileNotFoundError:
pass
self.assertRaises(FileNotFoundError)
@patch('scripts.lambdaRun.loadEnvFile')
def test_empty_mapping_json_err(self, mock_env):
jsonD = '{"EventSourceMappings": []}'
with patch('builtins.open', mock_open(read_data=jsonD), create=True):
createEventMapping('development')
mock_env.assert_not_called()
def test_update_mapping(self):
mock_client = MagicMock()
mock_client().list_event_source_mappings.return_value = {
'EventSourceMappings': [
{
'UUID': 'XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX',
}
]
}
updateEventMapping(
mock_client,
{
'EventSourceArn': 'test:arn:000000000000',
'Enabled': True,
'BatchSize': 0
},
{
'function_name': 'test_function'
}
)
if __name__ == '__main__':
unittest.main()
| [
"mwbenowitz@gmail.com"
] | mwbenowitz@gmail.com |
8ce0090b1f6392af36c950e4a91fc7b788d89f48 | b708135eb3ac0bb0167ca753768c8dec95f5113a | /bookwyrm/models/relationship.py | dbf997785fe3343f4ef600381f2a121f2fbdd7d0 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | emiljacobsen/bookwyrm | ffd31db236eac55847b7b73893abc7466087d366 | da5af56f720c9f063c864427faedd30bf3ac74ae | refs/heads/main | 2023-01-06T01:12:36.207476 | 2020-11-08T05:17:52 | 2020-11-08T05:17:52 | 311,099,307 | 0 | 0 | NOASSERTION | 2020-11-08T15:59:08 | 2020-11-08T15:59:07 | null | UTF-8 | Python | false | false | 3,131 | py | ''' defines relationships between users '''
from django.db import models
from bookwyrm import activitypub
from .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel
class UserRelationship(ActivitypubMixin, BookWyrmModel):
''' many-to-many through table for followers '''
user_subject = models.ForeignKey(
'User',
on_delete=models.PROTECT,
related_name='%(class)s_user_subject'
)
user_object = models.ForeignKey(
'User',
on_delete=models.PROTECT,
related_name='%(class)s_user_object'
)
class Meta:
''' relationships should be unique '''
abstract = True
constraints = [
models.UniqueConstraint(
fields=['user_subject', 'user_object'],
name='%(class)s_unique'
),
models.CheckConstraint(
check=~models.Q(user_subject=models.F('user_object')),
name='%(class)s_no_self'
)
]
activity_mappings = [
ActivityMapping('id', 'remote_id'),
ActivityMapping('actor', 'user_subject'),
ActivityMapping('object', 'user_object'),
]
activity_serializer = activitypub.Follow
def get_remote_id(self, status=None):
''' use shelf identifier in remote_id '''
status = status or 'follows'
base_path = self.user_subject.remote_id
return '%s#%s/%d' % (base_path, status, self.id)
def to_accept_activity(self):
''' generate an Accept for this follow request '''
return activitypub.Accept(
id=self.get_remote_id(status='accepts'),
actor=self.user_object.remote_id,
object=self.to_activity()
).serialize()
def to_reject_activity(self):
''' generate an Accept for this follow request '''
return activitypub.Reject(
id=self.get_remote_id(status='rejects'),
actor=self.user_object.remote_id,
object=self.to_activity()
).serialize()
class UserFollows(UserRelationship):
''' Following a user '''
status = 'follows'
@classmethod
def from_request(cls, follow_request):
''' converts a follow request into a follow relationship '''
return cls(
user_subject=follow_request.user_subject,
user_object=follow_request.user_object,
remote_id=follow_request.remote_id,
)
class UserFollowRequest(UserRelationship):
''' following a user requires manual or automatic confirmation '''
status = 'follow_request'
def save(self, *args, **kwargs):
''' make sure the follow relationship doesn't already exist '''
try:
UserFollows.objects.get(
user_subject=self.user_subject,
user_object=self.user_object
)
return None
except UserFollows.DoesNotExist:
return super().save(*args, **kwargs)
class UserBlocks(UserRelationship):
''' prevent another user from following you and seeing your posts '''
# TODO: not implemented
status = 'blocks'
| [
"mousereeve@riseup.net"
] | mousereeve@riseup.net |
1a6573c3666aa22400e97b0e0d505d38e1c72e21 | 8c06beebdb5ee28f7292574fefd540f8c43a7acf | /App/migrations/0001_initial.py | c157c870476b74079d770270a01bfd316e86aab6 | [] | no_license | progettazionemauro/ARCTYPE_DJANGO_DASHBOARD | 0c3baf93c6a3f8dd28d9459a21a273efbed1f4e3 | 60d1dab19c32b7a80d70de85e846fd6760be9a26 | refs/heads/master | 2023-04-12T01:37:57.317231 | 2021-05-03T01:48:41 | 2021-05-03T01:48:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # Generated by Django 3.1.7 on 2021-04-28 12:07
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PageView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=40)),
('session', models.CharField(max_length=40)),
('created', models.DateTimeField(default=datetime.datetime(2021, 4, 28, 13, 7, 28, 537953))),
],
),
]
| [
"chukslord1@gmail.com"
] | chukslord1@gmail.com |
de7774fa92e3ca8ed10bcacbc64aa31115a9ca00 | 18305efd1edeb68db69880e03411df37fc83b58b | /pdb_files_1000rot/r9/1r9o/tractability_550/pymol_results_file.py | 67892b1deb53cd23a00dd9ad1e19d337669ba3e4 | [] | no_license | Cradoux/hotspot_pipline | 22e604974c8e38c9ffa979092267a77c6e1dc458 | 88f7fab8611ebf67334474c6e9ea8fc5e52d27da | refs/heads/master | 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,382 | py |
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"18.4510002136":[], "18.4510002136_arrows":[]}
cluster_dict["18.4510002136"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(4.5), float(25.5), float(0.5), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([4.5,25.5,0.5], [2.746,27.508,1.346], color="blue red", name="Arrows_18.4510002136_1")
cluster_dict["18.4510002136"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(4.5), float(23.0), float(-7.0), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([4.5,23.0,-7.0], [4.597,24.135,-9.671], color="blue red", name="Arrows_18.4510002136_2")
cluster_dict["18.4510002136"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(10.5), float(40.5), float(-9.5), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([10.5,40.5,-9.5], [13.146,39.557,-9.214], color="blue red", name="Arrows_18.4510002136_3")
cluster_dict["18.4510002136"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(1.44905364463), float(32.5338360236), float(-2.68856342767), float(1.0)]
cluster_dict["18.4510002136"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(5.62774385769), float(26.5988444423), float(-2.34256952697), float(1.0)]
cluster_dict["18.4510002136"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(8.30163800919), float(39.4041017903), float(-9.58408107648), float(1.0)]
cluster_dict["18.4510002136"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-2.0), float(33.0), float(-1.0), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([-2.0,33.0,-1.0], [-2.812,32.143,1.248], color="red blue", name="Arrows_18.4510002136_4")
cluster_dict["18.4510002136"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-0.5), float(36.5), float(-2.5), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([-0.5,36.5,-2.5], [0.186,38.086,-4.271], color="red blue", name="Arrows_18.4510002136_5")
cluster_dict["18.4510002136"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(6.5), float(41.5), float(-8.5), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([6.5,41.5,-8.5], [4.555,40.37,-4.988], color="red blue", name="Arrows_18.4510002136_6")
cluster_dict["18.4510002136"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(6.5), float(37.0), float(-11.0), float(1.0)]
cluster_dict["18.4510002136"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(9.5), float(24.0), float(-2.0), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([9.5,24.0,-2.0], [9.475,20.992,-1.322], color="red blue", name="Arrows_18.4510002136_7")
cluster_dict["18.4510002136"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(10.0), float(32.5), float(-2.5), float(1.0)]
cluster_dict["18.4510002136_arrows"] += cgo_arrow([10.0,32.5,-2.5], [13.934,31.875,-1.202], color="red blue", name="Arrows_18.4510002136_8")
cmd.load_cgo(cluster_dict["18.4510002136"], "Features_18.4510002136", 1)
cmd.load_cgo(cluster_dict["18.4510002136_arrows"], "Arrows_18.4510002136")
cmd.set("transparency", 0.2,"Features_18.4510002136")
cmd.group("Pharmacophore_18.4510002136", members="Features_18.4510002136")
cmd.group("Pharmacophore_18.4510002136", members="Arrows_18.4510002136")
if dirpath:
f = join(dirpath, "label_threshold_18.4510002136.mol2")
else:
f = "label_threshold_18.4510002136.mol2"
cmd.load(f, 'label_threshold_18.4510002136')
cmd.hide('everything', 'label_threshold_18.4510002136')
cmd.label("label_threshold_18.4510002136", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_18.4510002136', members= 'label_threshold_18.4510002136')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
| [
"cradoux.cr@gmail.com"
] | cradoux.cr@gmail.com |
bbf3f6774454f669d508e9bd59974b7037b28edc | 8a780cb47eac9da046bdb5d6917f97a086887603 | /problems/last_stone_weight/solution.py | b701204b3ce26b9e7f0bd1ecd7c4157ef3cd893b | [] | no_license | dengl11/Leetcode | d16315bc98842922569a5526d71b7fd0609ee9fb | 43a5e436b6ec8950c6952554329ae0314430afea | refs/heads/master | 2022-12-20T03:15:30.993739 | 2020-09-05T01:04:08 | 2020-09-05T01:04:08 | 279,178,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from heapq import heappush, heappop
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
q = []
for s in stones:
heappush(q, -s)
while len(q) > 1:
y, x = -heappop(q), -heappop(q)
if x == y: continue
heappush(q, -abs(x-y))
return 0 if not q else -q[0] | [
"ldeng1314@gmail.com"
] | ldeng1314@gmail.com |
58381efaf4839fe920f12a83c30b22c3e1049d1d | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_19735.py | cc4940e64134129ef4d9c1607ac40344591557c1 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | # driver.find_element_by_css_selector("input[onclick*='1 Bedroom Deluxe']")
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
6560fcf0b82ad4f6d534807dfb58e01c1e45ebde | df83f97ed2c6dd199005e96bc7c494cfb3b49f8c | /LeetCode/Array/Median of Two Sorted Arrays.py | 23633445d236acc7f9d0e0983179064bc07b5909 | [] | no_license | poojan14/Python-Practice | 45f0b68b0ad2f92bbf0b92286602d64f3b1ae992 | ed98acc788ba4a1b53bec3d0757108abb5274c0f | refs/heads/master | 2022-03-27T18:24:18.130598 | 2019-12-25T07:26:09 | 2019-12-25T07:26:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | '''
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
'''
class Solution(object):
def determineMedian(self,k,A,size):
if size % 2 == 0 :
return (A[k-1]+ A[k-2]) / float(2)
else:
return A[k-1]
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m , n = len(nums1),len(nums2)
num = [0]*(m+n)
i ,j,k = 0 ,0,0
while i<m and j<n:
if nums1[i] <= nums2[j]:
num[k] = nums1[i]
i += 1
else:
num[k] = nums2[j]
j += 1
k += 1
if k == (m + n)//2+1:
med = self.determineMedian(k,num,m + n)
return med
while i<m:
num[k] = nums1[i]
i += 1
k += 1
if k == (m + n)//2+1:
med = self.determineMedian(k,num,m + n)
return med
while j<n:
num[k] = nums2[j]
j += 1
k += 1
if k == (m + n)//2+1:
med = self.determineMedian(k,num,m + n)
return med
| [
"noreply@github.com"
] | poojan14.noreply@github.com |
51a9df87f975ff813ccf7a8035df497a2618fb73 | 8f24e443e42315a81028b648e753c50967c51c78 | /python/ray/train/examples/horovod/horovod_example.py | e548ce41c1bd73d6976c69b16a83100ce344b890 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 8,299 | py | import argparse
import os
import horovod.torch as hvd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from filelock import FileLock
from torchvision import datasets, transforms
import ray
from ray.air import session
from ray.train.horovod import HorovodTrainer
from ray.air.config import ScalingConfig
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def setup(config):
data_dir = config.get("data_dir", None)
seed = config.get("seed", 42)
batch_size = config.get("batch_size", 64)
use_adasum = config.get("use_adasum", False)
lr = config.get("lr", 0.01)
momentum = config.get("momentum", 0.5)
use_cuda = config.get("use_cuda", False)
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
if use_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
data_dir = data_dir or "~/data"
with FileLock(os.path.expanduser("~/.horovod_lock")):
train_dataset = datasets.MNIST(
data_dir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank()
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs
)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
if use_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=momentum)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Adasum if use_adasum else hvd.Average,
)
return model, optimizer, train_loader, train_sampler
def train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
):
loss = None
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of
# examples in this worker's partition.
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_sampler),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
return loss.item() if loss else None
# Horovod function API.
def train_func(config):
num_epochs = config.get("num_epochs", 10)
log_interval = config.get("log_interval", 10)
use_cuda = config.get("use_cuda", False)
model, optimizer, train_loader, train_sampler = setup(config)
for epoch in range(num_epochs):
loss = train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
)
session.report(dict(loss=loss))
def main(num_workers, use_gpu, kwargs):
trainer = HorovodTrainer(
train_func,
train_loop_config=kwargs,
scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=num_workers),
)
results = trainer.fit()
print(results.metrics)
# Horovod Class API.
class HorovodTrainClass:
def __init__(self, config):
self.log_interval = config.get("log_interval", 10)
self.use_cuda = config.get("use_cuda", False)
if self.use_cuda:
torch.cuda.set_device(hvd.local_rank())
self.model, self.optimizer, self.train_loader, self.train_sampler = setup(
config
)
def train(self, epoch):
loss = train_epoch(
self.model,
self.optimizer,
self.train_sampler,
self.train_loader,
epoch,
self.log_interval,
self.use_cuda,
)
return loss
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch MNIST Example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--num-epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)",
)
parser.add_argument(
"--use-gpu", action="store_true", default=False, help="enables CUDA training"
)
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction",
)
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="Number of Ray workers to use for training.",
)
parser.add_argument(
"--data-dir",
help="location of the training dataset in the local filesystem ("
"will be downloaded if needed)",
)
parser.add_argument(
"--address",
required=False,
type=str,
default=None,
help="Address of Ray cluster.",
)
args = parser.parse_args()
if args.address:
ray.init(args.address)
else:
ray.init()
use_cuda = args.use_gpu if args.use_gpu is not None else False
kwargs = {
"data_dir": args.data_dir,
"seed": args.seed,
"use_cuda": use_cuda,
"batch_size": args.batch_size,
"use_adasum": args.use_adasum if args.use_adasum else False,
"lr": args.lr,
"momentum": args.momentum,
"num_epochs": args.num_epochs,
"log_interval": args.log_interval,
}
main(num_workers=args.num_workers, use_gpu=use_cuda, kwargs=kwargs)
| [
"noreply@github.com"
] | simon-mo.noreply@github.com |
20a15b3c0182c1e1b3333858ecf1a1dbd05f7f53 | 44722fb1541645937f17e8e920f4954ff99cc046 | /src/gamesbyexample/clickbait.py | 5075628e0b5995e53b6187567a31e16e9385eb21 | [] | no_license | asweigart/gamesbyexample | a065d21be6c2e05a4c17643986b667efae0bc6de | 222bfc3b15ade1cf3bde158ba72a8b7a969ccc5a | refs/heads/main | 2023-07-16T12:12:58.541597 | 2021-09-01T21:24:35 | 2021-09-01T21:24:35 | 343,331,493 | 89 | 10 | null | null | null | null | UTF-8 | Python | false | false | 4,979 | py | """Clickbait Headline Generator, by Al Sweigart al@inventwithpython.com
A clickbait headline generator for your soulless content farm website.
This code is available at https://nostarch.com/big-book-small-python-programming
Tags: large, beginner, humor, word"""
__version__ = 0
import random
# Set up the constants:
OBJECT_PRONOUNS = ['Her', 'Him', 'Them']
POSSESIVE_PRONOUNS = ['Her', 'His', 'Their']
PERSONAL_PRONOUNS = ['She', 'He', 'They']
STATES = ['California', 'Texas', 'Florida', 'New York', 'Pennsylvania',
'Illinois', 'Ohio', 'Georgia', 'North Carolina', 'Michigan']
NOUNS = ['Athlete', 'Clown', 'Shovel', 'Paleo Diet', 'Doctor', 'Parent',
'Cat', 'Dog', 'Chicken', 'Robot', 'Video Game', 'Avocado',
'Plastic Straw','Serial Killer', 'Telephone Psychic']
PLACES = ['House', 'Attic', 'Bank Deposit Box', 'School', 'Basement',
'Workplace', 'Donut Shop', 'Apocalypse Bunker']
WHEN = ['Soon', 'This Year', 'Later Today', 'RIGHT NOW', 'Next Week']
def main():
print('Clickbait Headline Generator')
print('By Al Sweigart al@inventwithpython.com')
print()
print('Our website needs to trick people into looking at ads!')
while True:
print('Enter the number of clickbait headlines to generate:')
response = input('> ')
if not response.isdecimal():
print('Please enter a number.')
else:
numberOfHeadlines = int(response)
break # Exit the loop once a valid number is entered.
for i in range(numberOfHeadlines):
clickbaitType = random.randint(1, 8)
if clickbaitType == 1:
headline = generateAreMillenialsKillingHeadline()
elif clickbaitType == 2:
headline = generateWhatYouDontKnowHeadline()
elif clickbaitType == 3:
headline = generateBigCompaniesHateHerHeadline()
elif clickbaitType == 4:
headline = generateYouWontBelieveHeadline()
elif clickbaitType == 5:
headline = generateDontWantYouToKnowHeadline()
elif clickbaitType == 6:
headline = generateGiftIdeaHeadline()
elif clickbaitType == 7:
headline = generateReasonsWhyHeadline()
elif clickbaitType == 8:
headline = generateJobAutomatedHeadline()
print(headline)
print()
website = random.choice(['wobsite', 'blag', 'Facebuuk', 'Googles',
'Facesbook', 'Tweedie', 'Pastagram'])
when = random.choice(WHEN).lower()
print('Post these to our', website, when, 'or you\'re fired!')
# Each of these functions returns a different type of headline:
def generateAreMillenialsKillingHeadline():
noun = random.choice(NOUNS)
return 'Are Millenials Killing the {} Industry?'.format(noun)
def generateWhatYouDontKnowHeadline():
noun = random.choice(NOUNS)
pluralNoun = random.choice(NOUNS) + 's'
when = random.choice(WHEN)
return 'Without This {}, {} Could Kill You {}'.format(noun, pluralNoun, when)
def generateBigCompaniesHateHerHeadline():
pronoun = random.choice(OBJECT_PRONOUNS)
state = random.choice(STATES)
noun1 = random.choice(NOUNS)
noun2 = random.choice(NOUNS)
return 'Big Companies Hate {}! See How This {} {} Invented a Cheaper {}'.format(pronoun, state, noun1, noun2)
def generateYouWontBelieveHeadline():
state = random.choice(STATES)
noun = random.choice(NOUNS)
pronoun = random.choice(POSSESIVE_PRONOUNS)
place = random.choice(PLACES)
return 'You Won\'t Believe What This {} {} Found in {} {}'.format(state, noun, pronoun, place)
def generateDontWantYouToKnowHeadline():
pluralNoun1 = random.choice(NOUNS) + 's'
pluralNoun2 = random.choice(NOUNS) + 's'
return 'What {} Don\'t Want You To Know About {}'.format(pluralNoun1, pluralNoun2)
def generateGiftIdeaHeadline():
number = random.randint(7, 15)
noun = random.choice(NOUNS)
state = random.choice(STATES)
return '{} Gift Ideas to Give Your {} From {}'.format(number, noun, state)
def generateReasonsWhyHeadline():
number1 = random.randint(3, 19)
pluralNoun = random.choice(NOUNS) + 's'
# number2 should be no larger than number1:
number2 = random.randint(1, number1)
return '{} Reasons Why {} Are More Interesting Than You Think (Number {} Will Surprise You!)'.format(number1, pluralNoun, number2)
def generateJobAutomatedHeadline():
state = random.choice(STATES)
noun = random.choice(NOUNS)
i = random.randint(0, 2)
pronoun1 = POSSESIVE_PRONOUNS[i]
pronoun2 = PERSONAL_PRONOUNS[i]
if pronoun1 == 'Their':
return 'This {} {} Didn\'t Think Robots Would Take {} Job. {} Were Wrong.'.format(state, noun, pronoun1, pronoun2)
else:
return 'This {} {} Didn\'t Think Robots Would Take {} Job. {} Was Wrong.'.format(state, noun, pronoun1, pronoun2)
# If the program is run (instead of imported), run the game:
if __name__ == '__main__':
main()
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
e02d915d5a6b8071f34409db34e69e57ba779233 | 41ea088695ed956ef8c6e34ace4d8ab19c8b4352 | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/VhzJ2IJqcIhdz9Ev1YCeFRUb9JMBYBYPD6jceZX7twM=/_isotonic.cpython-37m-x86_64-linux-gnu.pyi | 3dde1904791e24e112bf6d51edbc57ec74501d07 | [] | no_license | ljbelenky/decline | d5c1d57fd927fa6a8ea99c1e08fedbeb83170d01 | 432ef82a68168e4ac8635a9386af2aa26cd73eef | refs/heads/master | 2021-06-18T17:01:46.969491 | 2021-04-26T18:34:55 | 2021-04-26T18:34:55 | 195,559,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | pyi | import builtins as _mod_builtins
__builtins__ = {}
__doc__ = None
__file__ = '/home/land/.local/lib/python3.7/site-packages/sklearn/_isotonic.cpython-37m-x86_64-linux-gnu.so'
__name__ = 'sklearn._isotonic'
__package__ = 'sklearn'
def __pyx_unpickle_Enum():
pass
__test__ = _mod_builtins.dict()
def _inplace_contiguous_isotonic_regression(y, w):
pass
def _make_unique(X, y, sample_weights):
'Average targets for duplicate X, drop duplicates.\n\n Aggregates duplicate X values into a single X value where\n the target y is a (sample_weighted) average of the individual\n targets.\n\n Assumes that X is ordered, so that all duplicates follow each other.\n '
pass
| [
"ljbelenky@gmail.com"
] | ljbelenky@gmail.com |
0a13f9b43163b6133340ad53e879fb1b311e02df | 6a0d42149f8bbe5f7d6cb8103fe557d0d048c832 | /products/views.py | 90e967bcee4f19e5fcc3e1352d1d349f846acbf8 | [] | no_license | syfqpipe/product-public | 8f3b2f81d0c9fdc61bb5841db1d4d9d26bb618a1 | 62b918bd9f24b4a47fab04398ca8112268e1e2b1 | refs/heads/master | 2023-01-22T05:14:19.132567 | 2020-12-05T03:22:56 | 2020-12-05T03:22:56 | 318,689,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,260 | py | from django.http import JsonResponse
from django.shortcuts import render
from django.db.models import Q
from django.core.files.storage import default_storage
from django.utils.timezone import make_aware
from django.template.loader import render_to_string
from weasyprint import HTML, CSS
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponse
from django.core.files.base import ContentFile
from django.conf import settings
from wsgiref.util import FileWrapper
# import datetime
import json
import uuid
import tempfile
import pytz
import subprocess
import io
import xlsxwriter
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework import viewsets, status
from rest_framework_extensions.mixins import NestedViewSetMixin
from django_filters.rest_framework import DjangoFilterBackend
from datetime import datetime
from products.services.get_comp_prof import get_comp_prof
from products.services.get_info_acgs import get_info_acgs
from products.services.get_new_format_entity import get_new_format_entity
from products.services.get_cert_incorp import get_cert_incorp
from products.services.get_cert_reg_foreign import get_cert_reg_foreign
from products.services.get_info_comp_name_chg import get_info_comp_name_chg
from products.services.get_info_branch_listing import get_info_branch_listing
from products.services.get_info_fin2 import get_info_fin2
from products.services.get_info_fin3 import get_info_fin3
from products.services.get_info_fin5 import get_info_fin5
from products.services.get_info_fin10 import get_info_fin10
from products.services.get_info_hist2 import get_info_hist2
from products.services.get_cert_conversion import get_cert_conversion
from products.services.get_info_financial import get_info_financial
from products.services.get_roc_business_officers import get_roc_business_officers
from products.services.get_roc_changes_registered_address import get_roc_changes_registered_address
from products.services.get_details_of_shareholders import get_details_of_shareholders
from products.services.get_details_of_share_capital import get_details_of_share_capital
from products.services.get_biz_profile import get_biz_profile
from products.services.get_particulars_of_cosec import get_particulars_of_cosec
from products.services.get_info_rob_termination import get_info_rob_termination
from products.services.get_info_charges import get_info_charges
from products.services.get_comp_listing_cnt import get_comp_listing_cnt
from products.services.get_comp_listing_a import get_comp_listing_a
from products.services.get_comp_listing_b import get_comp_listing_b
from products.services.get_comp_listing_c import get_comp_listing_c
from products.services.get_comp_listing_d import get_comp_listing_d
from products.services.get_image import get_image
from products.services.get_image_view import get_image_view
from products.services.get_image_list import get_image_list
from products.services.get_image_ctc import get_image_ctc
from products.services.get_particulars_of_adt_firm import get_particulars_of_adt_firm
from products.services.get_co_count import get_co_count
from products.services.get_co_page import get_co_page
from .services.availability.get_info_charges_listing import get_info_charges_listing
from .services.availability.get_info_financial_year import get_info_financial_year
from .services.availability.get_info_acgs_query import get_info_acgs_query
from .services.availability.get_info_incorp import get_info_incorp
from .services.availability.get_list_address_changes_year import get_list_address_changes_year
from .services.availability.get_is_act_2016 import get_is_act_2016
from .services.availability.get_is_name_changed import get_is_name_changed
from .services.availability.get_is_company_converted import get_is_company_converted
from .services.availability.get_info_rob_termination_list import get_info_rob_termination_list
from .services.availability.get_info_branch_list import get_info_branch_list
from .helpers.info_acgs import info_acgs
from .helpers.roc_business_officers import roc_business_officers
from .helpers.biz_profile import biz_profile
from .helpers.particular_address import particular_address
from .helpers.cert_incorp import cert_incorp
from .helpers.info_fin_2 import info_fin_2
from .helpers.info_hist_2 import info_hist_2
from .helpers.comp_prof import comp_prof
from .helpers.mapping import status_of_comp_mapping, comp_type_mapping, state_mapping
from .helpers.acgs import acgs
from .helpers.change_name import change_name
from .helpers.particular_audit_firm import particular_audit_firm
from .helpers.company_charges import company_charges
from .helpers.particular_sharecapital import particular_sharecapital
from .helpers.particular_shareholders import particular_shareholders
from .helpers.info_rob_termination import info_rob_termination
from .models import (
Product,
ProductSearchCriteria
)
from .serializers import (
ProductSerializer,
ProductSearchCriteriaSerializer
)
class ProductViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filterset_fields = [
'active',
'ctc',
'roc',
'channel',
'webservice',
'output_type',
'language'
]
def get_permissions(self):
if self.action == 'list':
permission_classes = [AllowAny]
else:
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def get_queryset(self):
queryset = Product.objects.all()
"""
if self.request.user.is_anonymous:
queryset = Company.objects.none()
else:
user = self.request.user
company_employee = CompanyEmployee.objects.filter(employee=user)
company = company_employee[0].company
if company.company_type == 'AD':
queryset = Product.objects.all()
else:
queryset = Product.objects.filter(company=company.id)
"""
return queryset
@action(methods=['POST'], detail=False)
def services(self, request, *args, **kwargs):
call_json = json.loads(request.body)
request_service_name = call_json['name']
registration_number = call_json['registration_number']
entity_type = call_json['entity_type']
tz = pytz.timezone('Asia/Kuala_Lumpur')
now = datetime.now(tz=tz)
print(now)
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
url_info = 'http://integrasistg.ssm.com.my/InfoService/1'
url_listing = 'http://integrasistg.ssm.com.my/ListingService/1'
url_docu = 'http://integrasistg.ssm.com.my/DocufloService/1'
headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
# New format entity
if request_service_name == 'getNewFormatEntity':
json_response = get_new_format_entity(url_info, headers, registration_number, entity_type)
# Attestation of Company Good Standing (ACGS) - Non CTC / MS EN
elif request_service_name == 'getInfoAcgs':
json_response = get_info_acgs(url_info, headers, registration_number, entity_type)
# Sijil Pemerbadanan Syarikat Persendirian di bawah AS 2016 - Non CTC / MS EN
# Sijil Pemerbadanan Syarikat Awam di bawah AS 2016 - Non CTC / MS EN
# Sijil Pemerbadanan Syarikat Awam di bawah AS 2016 - Non CTC / MS EN (menurut jaminan)
elif request_service_name == 'getCertIncorp':
json_response = get_cert_incorp(url_info, headers, registration_number, entity_type)
# Sijil Pemerbadanan Syarikat Persendirian di bawah AS 2016 - CTC / MS EN
# Sijil Pemerbadanan Syarikat Awam di bawah AS 2016 - CTC / MS EN
# Sijil Pemerbadanan Syarikat Awam di bawah AS 2016 - CTC / MS EN (menurut jaminan)
elif request_service_name == 'getCertIncorpCtc':
json_response = get_cert_incorp_ctc(url_info, headers, registration_number, entity_type)
# Sijil Pendaftaran Syarikat Asing di bawah AS 2016 - Non CTC / MS EN
elif request_service_name == 'getCertRegForeign':
json_response = get_cert_reg_foreign(url_info, headers, registration_number, entity_type)
# Sijil Pendaftaran Syarikat Asing di bawah AS 2016 - CTC / MS EN
elif request_service_name == 'getCertRegForeignCtc':
json_response = get_cert_reg_foreign_ctc(url_info, headers, registration_number, entity_type)
# Sijil Pertukaran Nama Syarikat AS 2016 - Non CTC / MS EN
elif request_service_name == 'getInfoCompNameChg':
json_response = get_info_comp_name_chg(url_info, headers, registration_number, entity_type)
# Sijil Pertukaran Nama Syarikat AS 2016 - CTC / MS EN
elif request_service_name == 'getInfoCompNameChgCtc':
json_response = get_info_comp_name_chg_ctc(url_info, headers, registration_number, entity_type)
# Sijil Pertukaran Status Syarikat AS 2016 - Non CTC / MS EN
elif request_service_name == 'getCertConversion':
json_response = get_cert_conversion(url_info, headers, registration_number, entity_type)
# Sijil Pertukaran Status Syarikat AS 2016 - CTC / MS EN
elif request_service_name == 'getCertConversionCtc':
json_response = get_cert_conversion_ctc(url_info, headers, registration_number, entity_type)
# Financial Historical 2 Years - Non CTC / MS EN
elif request_service_name == 'getInfoFinancial':
json_response = get_info_financial(url_info, headers, registration_number, entity_type)
# Financial Historical 2 Years - CTC / MS EN
elif request_service_name == 'getInfoFinancialCtc':
json_response = get_info_financial_ctc(url_info, headers, registration_number, entity_type)
# Financial Comparison 2 Years - Non CTC / MS EN
elif request_service_name == 'getInfoFin2':
json_response = get_info_fin2(url_info, headers, registration_number, entity_type)
# Financial Comparison 3 Years - Non CTC / MS EN
elif request_service_name == 'getInfoFin3':
json_response = get_info_fin3(url_info, headers, registration_number, entity_type)
# Financial Comparison 5 Years - Non CTC / MS EN
elif request_service_name == 'getInfoFin5':
json_response = get_info_fin5(url_info, headers, registration_number, entity_type)
# Financial Comparison 10 Years - Non CTC / MS EN
elif request_service_name == 'getInfoFin10':
json_response = get_info_fin10(url_info, headers, registration_number, entity_type)
# Particulars of Directors/Officers - Non CTC / MS EN
elif request_service_name == 'getRocBusinessOfficers':
json_response = get_roc_business_officers(url_info, headers, registration_number, entity_type)
# Particulars of Directors/Officers - CTC / MS EN
elif request_service_name == 'getRocBizOfficersCtc':
json_response = get_roc_business_officers_ctc(url_info, headers, registration_number, entity_type)
# Particulars of Registered Address - Non CTC / MS EN
elif request_service_name == 'getRocChangesRegisteredAddress':
json_response = get_roc_changes_registered_address(url_info, headers, registration_number, entity_type)
# Particulars of Registered Address - CTC / MS EN
elif request_service_name == 'getRocChgRegAddrCtc':
json_response = get_roc_changes_registered_address_ctc(url_info, headers, registration_number, entity_type)
# Particular of Shareholders - Non CTC / MS EN
elif request_service_name == 'getDetailsOfShareholders':
json_response = get_details_of_shareholders(url_info, headers, registration_number, entity_type)
# Particular of Shareholders - CTC / MS EN
elif request_service_name == 'getDtlsOfShareholdersCtc':
json_response = get_details_of_shareholders_ctc(url_info, headers, registration_number, entity_type)
# Particulars of Share Capital - Non CTC / MS EN
elif request_service_name == 'getDetailsOfShareCapital':
json_response = get_details_of_share_capital(url_info, headers, registration_number, entity_type)
# Particulars of Share Capital - CTC / MS EN
elif request_service_name == 'getDtlsOfShareCapCtc':
json_response = get_details_of_share_capital_ctc(url_info, headers, registration_number, entity_type)
# Company Profile - Non CTC / MS EN
elif request_service_name == 'getCompProfile':
json_response = get_comp_prof(url_info, headers, registration_number, entity_type)
# Company Profile - CTC / MS EN
elif request_service_name == 'getCompProfileCtc':
json_response = get_comp_prof_ctc(url_info, headers, registration_number, entity_type)
# Business Profile – Non CTC / MS EN
elif request_service_name == 'getBizProfile':
json_response = get_biz_profile(url_info, headers, registration_number)
# Business Profile – CTC / MS EN
elif request_service_name == 'getBizProfileCtc':
json_response = get_biz_profile_ctc(url_info, headers, registration_number, entity_type)
# Business Certificate - Digital CTC / MS EN
# Particulars of Company Secretary - Non CTC / MS EN
elif request_service_name == 'getParticularsOfCosec':
json_response = get_particulars_of_cosec(url_info, headers, registration_number, entity_type)
# Particulars of Company Secretary - CTC / MS EN
elif request_service_name == 'getParticularsOfCosecCtc':
json_response = get_particulars_of_cosec_ctc(url_info, headers, registration_number, entity_type)
# Audit Firm Profile – Non CTC / MS EN
elif request_service_name == 'getParticularsOfAdtFirm':
json_response = get_particulars_of_adt_firm(url_info, headers, registration_number, entity_type)
# Audit Firm Profile – CTC / MS EN
elif request_service_name == 'getParticularsOfAdtFirmCtc':
json_response = get_particulars_of_adt_firm_ctc(url_info, headers, registration_number, entity_type)
# Business Termination Letter (BTL) - Non CTC / MS EN
elif request_service_name == 'getInfoRobTermination':
json_response = get_info_rob_termination(url_info, headers, registration_number, entity_type)
# Company Charges - Non CTC / MS EN
elif request_service_name == 'getInfoCharges':
json_response = get_info_charges(url_info, headers, registration_number, entity_type)
# Company Charges - CTC / MS EN
elif request_service_name == 'getInfoChargesCtc':
json_response = get_info_charges_ctc(url_info, headers, registration_number, entity_type)
# Company Listing
elif request_service_name == 'getCompListingCnt':
json_response = get_comp_listing_cnt(url_listing, headers, registration_number, entity_type)
# Company Listing Package A
elif request_service_name == 'getCompListingA':
json_response = get_comp_listing_a(url_listing, headers, registration_number, entity_type)
# Company Listing Package B
elif request_service_name == 'getCompListingB':
json_response = get_comp_listing_b(url_listing, headers, registration_number, entity_type)
# Document and Form View + Download getImageView / getImageList
elif request_service_name == 'getImage':
json_response = get_image(url_docu, headers, registration_number, entity_type)
elif request_service_name == 'getImageList':
json_response = get_image_list(url_docu, headers, registration_number, entity_type)
# Document and Form View + Download + CTC getImageViewCTC
elif request_service_name == 'getImageView':
json_response = get_image_list(url_docu, headers, registration_number, entity_type)
# Document and Form View (Statutory Docs) getImageCtc
elif request_service_name == 'getImageCtc':
json_response = get_image_ctc(url_docu, headers, registration_number, entity_type)
elif request_service_name == 'getCoCount':
json_response = get_co_count(url_info, headers, registration_number, entity_type)
elif request_service_name == 'getCoPage':
json_response = get_co_page(url_info, headers, registration_number, entity_type)
return JsonResponse(json_response)
@action(methods=['POST'], detail=False)
def check_availability(self, request, *args, **kwargs):
product_request_json = json.loads(request.body)
registration_ = product_request_json['registration_no']
entity_type_ = product_request_json['entity_type']
information_url = 'http://integrasistg.ssm.com.my/InfoService/1'
now = datetime.now(tz=pytz.timezone('Asia/Kuala_Lumpur'))
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
request_headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
info_charges = get_info_charges_listing(information_url, request_headers, registration_)
financial_year = get_info_financial_year(information_url, request_headers, registration_)
acgs = get_info_acgs_query(information_url, request_headers, registration_)
info_incorp_share = get_info_incorp(information_url, request_headers, registration_, 'share')
list_address_changes_year = get_list_address_changes_year(information_url, request_headers, registration_)
is_incorp_act_2016 = get_is_act_2016(information_url, request_headers, registration_)
info_incorp_reg = get_info_incorp(information_url, request_headers, registration_, 'reg')
is_name_changed = get_is_name_changed(information_url, request_headers, registration_)
is_company_converted = get_is_company_converted(information_url, request_headers, registration_)
info_termination_list = get_info_rob_termination_list(information_url, request_headers, registration_)
info_branch_list = get_info_branch_list(information_url, request_headers, registration_)
data_json = {
'info_charges': info_charges,
'financial_year': financial_year,
'acgs': acgs,
'shareholders': info_incorp_share,
'share_capital': info_incorp_share,
'list_address_changes_year': list_address_changes_year,
'info_incorp_reg': is_incorp_act_2016,
'is_name_changed': is_name_changed,
'info_incorp': info_incorp_reg,
'is_company_converted': is_company_converted,
'info_termination_list': info_termination_list,
'info_branch_list': info_branch_list
}
return JsonResponse(data_json)
@action(methods=['POST'], detail=False)
def generate_product(self, request, *args, **kwargs):
product_request_json = json.loads(request.body)
name_ = product_request_json['name']
language_ = product_request_json['language']
ctc_ = product_request_json['ctc']
registration_ = product_request_json['registration_no']
entity_type_ = product_request_json['entity_type']
information_url = 'http://integrasistg.ssm.com.my/InfoService/1'
listing_url = 'http://integrasistg.ssm.com.my/ListingService/1'
document_url = 'http://integrasistg.ssm.com.my/DocufloService/1'
now = datetime.now(tz=pytz.timezone('Asia/Kuala_Lumpur'))
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
request_headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
css_file = 'https://pipeline-project.sgp1.digitaloceanspaces.com/mbpp-elatihan/css/template.css'
#css_file = 'http://127.0.0.1:8000/static/css/template.css'
new_entity_id = get_new_format_entity(information_url, request_headers, registration_, entity_type_)
if name_ == 'acgs':
middleware_data = get_info_acgs(information_url, request_headers, registration_, entity_type_)
latest_doc_date = get_comp_prof(information_url, request_headers, registration_, entity_type_)['rocCompanyInfo']['latestDocUpdateDate']
middleware_data['latest_doc_date'] = latest_doc_date
data_loaded = acgs(middleware_data, new_entity_id, language_)
elif name_ == 'certificate_of_incorporation_registration':
middleware_data = get_cert_incorp(information_url, request_headers, registration_)
data_loaded = cert_incorp(middleware_data, new_entity_id, language_)
if middleware_data['companyStatus'] == 'U':
if middleware_data['companyType'] == 'S':
name_ = 'public_incorp_cert'
else:
name_ = 'public_guarantee_incorp_cert'
else:
name_ = 'certificate_of_incorporation_registration'
if middleware_data['localforeignCompany'] != 'L':
name_ = 'foreign_incorp_cert'
# elif name_ == 'public_incorp_cert':
# middleware_data = get_cert_incorp(information_url, request_headers, registration_)
# data_loaded = cert_incorp(middleware_data, new_entity_id, language_)
# elif name_ == 'public_guarantee_incorp_cert':
# middleware_data = get_cert_incorp(information_url, request_headers, registration_)
# data_loaded = cert_incorp(middleware_data, new_entity_id, language_)
# elif name_ == 'foreign_incorp_cert':
# middleware_data = get_cert_reg_foreign(information_url, request_headers, registration_)
# data_loaded = cert_incorp(middleware_data, new_entity_id, language_)
elif name_ == 'certificate_of_change_of_name':
middleware_data = get_info_comp_name_chg(information_url, request_headers, registration_)
data_loaded = change_name(middleware_data, new_entity_id, language_)
elif name_ == 'certificate_of_conversion':
middleware_data = get_cert_conversion(information_url, request_headers, registration_)
data_loaded = change_name(middleware_data, new_entity_id, language_)
elif name_ == 'financial_historical':
year1 = product_request_json['year1']
year2 = product_request_json['year2']
middleware_data_year1 = get_info_hist2(information_url, request_headers, registration_, entity_type_, year1)
middleware_data_year2 = get_info_hist2(information_url, request_headers, registration_, entity_type_, year2)
data_loaded_1 = info_hist_2(middleware_data_year1,new_entity_id, language_)
data_loaded_2 = info_hist_2(middleware_data_year2,new_entity_id, language_)
data_loaded = data_loaded_1
balance_sheet_year1 = data_loaded_1['balance_sheet'][0]
balance_sheet_year2 = data_loaded_2['balance_sheet'][0]
profit_loss_year1 = data_loaded_1['profit_loss'][0]
profit_loss_year2 = data_loaded_2['profit_loss'][0]
del data_loaded['balance_sheet']
del data_loaded['profit_loss']
data_loaded['balance_sheet'] = []
data_loaded['profit_loss'] = []
data_loaded['balance_sheet'].append(balance_sheet_year1)
data_loaded['balance_sheet'].append(balance_sheet_year2)
data_loaded['profit_loss'].append(profit_loss_year1)
data_loaded['profit_loss'].append(profit_loss_year2)
print(data_loaded)
elif name_ == 'financial_comparison_2':
now = datetime.now()
middleware_data = get_info_fin2(information_url, request_headers, registration_, entity_type_, str(now.year-2), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'financial_comparison_3':
now = datetime.now()
middleware_data = get_info_fin3(information_url, request_headers, registration_, entity_type_, str(now.year-3), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'financial_comparison_5':
now = datetime.now()
middleware_data = get_info_fin5(information_url, request_headers, registration_, entity_type_, str(now.year-5), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'financial_comparison_10':
now = datetime.now()
middleware_data = get_info_fin10(information_url, request_headers, registration_, entity_type_, str(now.year-10), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_directors_officers':
middleware_data = get_roc_business_officers(information_url, request_headers, registration_, entity_type_)
data_loaded = roc_business_officers(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_registered_address':
middleware_data = get_roc_changes_registered_address(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_address(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_shareholders':
middleware_data = get_details_of_shareholders(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_shareholders(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_share_capital':
middleware_data = get_details_of_share_capital(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_sharecapital(middleware_data, new_entity_id, language_,entity_type_)
elif name_ == 'company_profile':
now = datetime.now()
middleware_data = get_comp_prof(information_url, request_headers, registration_, entity_type_)
data_loaded = comp_prof(middleware_data, new_entity_id, language_)
elif name_ == 'business_profile':
print(entity_type_ )
middleware_data = get_biz_profile(information_url, request_headers, registration_)
data_loaded = biz_profile(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_company_secretary':
middleware_data = get_comp_prof(information_url, request_headers, registration_, entity_type_)
data_loaded = comp_prof(middleware_data, new_entity_id, language_)
elif name_ == 'audit_firm_profile':
middleware_data = get_particulars_of_adt_firm(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_audit_firm(middleware_data, new_entity_id, language_)
elif name_ == 'business_termination_letter':
middleware_data = get_info_rob_termination(information_url, request_headers, registration_)
data_loaded = info_rob_termination(middleware_data, new_entity_id, language_)
elif name_ == 'company_charges':
com_profile = get_comp_prof(information_url, request_headers, registration_, entity_type_)
middleware_data = get_info_charges(information_url, request_headers, registration_, entity_type_)
data_loaded = company_charges(middleware_data, new_entity_id, com_profile, language_, entity_type_)
elif name_ == 'foreign_change_name':
middleware_data = get_info_charges(information_url, request_headers, registration_, entity_type_)
data_loaded = change_name(middleware_data, new_entity_id, language_)
else:
pass
if language_ == 'en':
html_string = render_to_string('product/'+ name_ +'_en.html', {'data': data_loaded})
elif language_ == 'ms':
html_string = render_to_string('product/'+ name_ +'_ms.html', {'data': data_loaded})
if 'pdf' in product_request_json:
if product_request_json['pdf'] == False:
serializer = data_loaded
return Response(serializer)
html = HTML(string=html_string)
#pdf_file = html.write_pdf(stylesheets=[CSS(css_file)])
pdf_file = html.write_pdf()
file_path = "ssm/product/" + name_ + "-" + datetime.utcnow().strftime("%s") + "-" + uuid.uuid4().hex + '.pdf'
saved_file = default_storage.save(
file_path,
ContentFile(pdf_file)
)
full_url_path = settings.MEDIA_ROOT + saved_file
serializer = data_loaded
serializer['pdflink'] = 'https://pipeline-project.sgp1.digitaloceanspaces.com/' + file_path
return Response(serializer)
@action(methods=['POST'], detail=False)
def generate_image(self, request, *args, **kwargs):
product_request_json = json.loads(request.body)
name_ = product_request_json['name'] # Either 'list' or 'specific'
registration_ = product_request_json['registration_no']
entity_type_ = product_request_json['entity_type']
information_url = 'http://integrasistg.ssm.com.my/InfoService/1'
document_url = 'http://integrasistg.ssm.com.my/DocufloService/1'
now = datetime.now(tz=pytz.timezone('Asia/Kuala_Lumpur'))
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
request_headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
if entity_type_ == 'ROC':
check_digit = get_comp_prof(information_url, request_headers, registration_, entity_type_)['rocCompanyInfo']['checkDigit']
elif entity_type_ == 'ROB':
check_digit = get_biz_profile(information_url, request_headers, registration_)['robBusinessInfo']['checkDigit']
if name_ == 'list':
middleware_data = get_image_view(document_url, request_headers, registration_, entity_type_, check_digit)
data_ = middleware_data['documentInfos']['documentInfos']
else:
version_id = int(product_request_json['version_id'])
middleware_data = get_image(document_url, request_headers, registration_, entity_type_, check_digit, version_id)
data_ = middleware_data['docContent']
return Response(data_)
@action(methods=['POST'], detail=False)
def generate_branch_list(self, request, *args, **kwargs):
product_request_json = json.loads(request.body)
registration_ = product_request_json['registration_no']
info_url = 'http://integrasistg.ssm.com.my/InfoService/1'
now = datetime.now(tz=pytz.timezone('Asia/Kuala_Lumpur'))
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
request_headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
middleware_data = get_info_branch_listing(info_url, request_headers, registration_)
data_ = middleware_data
return Response(data_)
@action(methods=['POST'], detail=False)
def generate_list(self, request, *args, **kwargs):
import xlsxwriter
product_request_json = json.loads(request.body)
a_ = product_request_json
name_ = product_request_json['name']
package_ = product_request_json['package']
information_url = 'http://integrasistg.ssm.com.my/InfoService/1'
document_url = 'http://integrasistg.ssm.com.my/DocufloService/1'
listing_url = 'http://integrasistg.ssm.com.my/ListingService/1'
now = datetime.now(tz=pytz.timezone('Asia/Kuala_Lumpur'))
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
request_headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
if name_ == "list":
if package_ == 'A':
middleware_data = get_comp_listing_a(listing_url, request_headers,
a_['bizCode'],
a_['compLocation'],
a_['compOrigin'],
a_['compStatus'],
a_['compType'],
a_['incorpDtFrom'],
a_['incorpDtTo'],
1)
data_ = middleware_data
return Response(data_)
elif package_ == 'B':
middleware_data = get_comp_listing_b(listing_url, request_headers,
a_['bizCode'],
a_['compLocation'],
a_['compOrigin'],
a_['compStatus'],
a_['compType'],
a_['incorpDtFrom'],
a_['incorpDtTo'],
1,
a_['directorNat'],
a_['shareholderNat'])
data_ = middleware_data
return Response(data_)
elif package_ == 'C':
pass
elif package_ == 'D':
pass
elif name_ == 'excel':
if package_ == 'A':
middleware_data = get_comp_listing_a(listing_url, request_headers,
a_['bizCode'],
a_['compLocation'],
a_['compOrigin'],
a_['compStatus'],
a_['compType'],
a_['incorpDtFrom'],
a_['incorpDtTo'],
1)
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet2 = workbook.add_worksheet('COMPANY INFORMATION')
data2 = [["No.","Company No.","Company Name","Old Company Name","Entity Type","Company Type","Company Status","Incorp. Date"]]
count = 1
for co in middleware_data['company']:
count += 1
new_row = [count, co["compInfo"]["compNo"] + '-' + co["compInfo"]["chkDigit"], co["compInfo"]["compName"], co["compInfo"]["compOldNm"], "LOCAL", comp_type_mapping(co["compInfo"]["compType"], 'en'), status_of_comp_mapping(co["compInfo"]["compStatus"]), co["compInfo"]["incorpDt"] ]
data2.append(new_row)
for row_num, columns in enumerate(data2):
for col_num, cell_data in enumerate(columns):
worksheet2.write(row_num, col_num, cell_data)
worksheet3 = workbook.add_worksheet('ADDRESS')
data3 = [["No.","Company No.","Company Name","Registered Address", "Business Address"]]
count = 1
for co in middleware_data['company']:
count += 1
if co["regAddress"]["address3"]:
registered_address = co["regAddress"]["address1"] + co["regAddress"]["address2"] + co["regAddress"]["address3"] + co["regAddress"]["town"] + co["regAddress"]["postcode"] + state_mapping(co["regAddress"]["stateCode"])
else:
registered_address = co["regAddress"]["address1"] + co["regAddress"]["address2"] + co["regAddress"]["town"] + co["regAddress"]["postcode"] + state_mapping(co["regAddress"]["stateCode"])
if co["busAddress"]["address3"]:
business_address = co["busAddress"]["address1"] + co["busAddress"]["address2"] + co["busAddress"]["address3"] + co["busAddress"]["town"] + co["busAddress"]["postcode"] + state_mapping(co["busAddress"]["stateCode"])
else:
business_address = co["busAddress"]["address1"] + co["busAddress"]["address2"] + co["busAddress"]["town"] + co["busAddress"]["postcode"] + state_mapping(co["busAddress"]["stateCode"])
new_row = [count, co["compInfo"]["compNo"] + '-' + co["compInfo"]["chkDigit"], co["compInfo"]["compName"], registered_address, business_address]
data3.append(new_row)
for row_num, columns in enumerate(data3):
for col_num, cell_data in enumerate(columns):
worksheet3.write(row_num, col_num, cell_data)
worksheet4 = workbook.add_worksheet('NATURE OF BUSINESS')
data4 = [["No.","Company No.","Company Name","Business Code", "Description", "Priotiy"]]
count = 1
for co in middleware_data['company']:
count += 1
new_row = [count, co["compInfo"]["compNo"] + '-' + co["compInfo"]["chkDigit"], co["compInfo"]["compName"], co["bizCodes"]["code"], co["bizCodes"]["descEng"],co["bizCodes"]["priority"] ]
data4.append(new_row)
for row_num, columns in enumerate(data4):
for col_num, cell_data in enumerate(columns):
worksheet4.write(row_num, col_num, cell_data)
workbook.close()
output.seek(0)
filename = 'PackageA.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
elif package_ == 'B':
middleware_data = get_comp_listing_a(listing_url, request_headers,
a_['bizCode'],
a_['compLocation'],
a_['compOrigin'],
a_['compStatus'],
a_['compType'],
a_['incorpDtFrom'],
a_['incorpDtTo'],
1,
a_['directorNat'],
a_['shareholderNat'])
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet2 = workbook.add_worksheet('COMPANY INFORMATION')
data2 = [["No.","Company No.","Company Name","Old Company Name","Entity Type","Company Type","Company Status","Incorp. Date"]]
count = 1
for co in middleware_data['company']:
count += 1
new_row = [count, co["compInfo"]["compNo"] + '-' + co["compInfo"]["chkDigit"], co["compInfo"]["compName"], co["compInfo"]["compOldNm"], "LOCAL", comp_type_mapping(co["compInfo"]["compType"], 'en'), status_of_comp_mapping(co["compInfo"]["compStatus"]), co["compInfo"]["incorpDt"] ]
data2.append(new_row)
for row_num, columns in enumerate(data2):
for col_num, cell_data in enumerate(columns):
worksheet2.write(row_num, col_num, cell_data)
worksheet3 = workbook.add_worksheet('ADDRESS')
data3 = [["No.","Company No.","Company Name","Registered Address", "Business Address"]]
count = 1
for co in middleware_data['company']:
count += 1
if co["regAddress"]["address3"]:
registered_address = co["regAddress"]["address1"] + co["regAddress"]["address2"] + co["regAddress"]["address3"] + co["regAddress"]["town"] + co["regAddress"]["postcode"] + state_mapping(co["regAddress"]["stateCode"])
else:
registered_address = co["regAddress"]["address1"] + co["regAddress"]["address2"] + co["regAddress"]["town"] + co["regAddress"]["postcode"] + state_mapping(co["regAddress"]["stateCode"])
if co["busAddress"]["address3"]:
business_address = co["busAddress"]["address1"] + co["busAddress"]["address2"] + co["busAddress"]["address3"] + co["busAddress"]["town"] + co["busAddress"]["postcode"] + state_mapping(co["busAddress"]["stateCode"])
else:
business_address = co["busAddress"]["address1"] + co["busAddress"]["address2"] + co["busAddress"]["town"] + co["busAddress"]["postcode"] + state_mapping(co["busAddress"]["stateCode"])
new_row = [count, co["compInfo"]["compNo"] + '-' + co["compInfo"]["chkDigit"], co["compInfo"]["compName"], registered_address, business_address]
data3.append(new_row)
for row_num, columns in enumerate(data3):
for col_num, cell_data in enumerate(columns):
worksheet3.write(row_num, col_num, cell_data)
worksheet4 = workbook.add_worksheet('NATURE OF BUSINESS')
data4 = [["No.","Company No.","Company Name","Business Code", "Description", "Priotiy"]]
count = 1
for co in middleware_data['company']:
count += 1
new_row = [count, co["compInfo"]["compNo"] + '-' + co["compInfo"]["chkDigit"], co["compInfo"]["compName"], co["bizCodes"]["code"], co["bizCodes"]["descEng"],co["bizCodes"]["priority"] ]
data4.append(new_row)
for row_num, columns in enumerate(data4):
for col_num, cell_data in enumerate(columns):
worksheet4.write(row_num, col_num, cell_data)
workbook.close()
output.seek(0)
filename = 'PackageA.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
else:
return Response({})
@action(methods=['POST'], detail=False)
def generate_egov(self, request, *args, **kwargs):
product_request_json = json.loads(request.body)
name_ = product_request_json['name']
language_ = product_request_json['language']
ctc_ = product_request_json['ctc']
registration_ = product_request_json['registration_no']
entity_type_ = product_request_json['entity_type']
information_url = 'http://integrasistg.ssm.com.my/InfoService/1'
listing_url = 'http://integrasistg.ssm.com.my/ListingService/1'
document_url = 'http://integrasistg.ssm.com.my/DocufloService/1'
now = datetime.now(tz=pytz.timezone('Asia/Kuala_Lumpur'))
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
auth_code = subprocess.check_output(['java', '-jar', 'authgen.jar', 'SSMProduk', now_string, '27522718']).decode("utf-8").rstrip("\n")
request_headers = {
'content-type': "text/xml;charset=UTF-8",
'authorization': auth_code
}
#css_file = 'http://127.0.0.1:8000/static/css/template.css'
new_entity_id = get_new_format_entity(information_url, request_headers, registration_, entity_type_)
if name_ == 'acgs':
middleware_data = get_info_acgs(information_url, request_headers, registration_, entity_type_)
latest_doc_date = get_comp_prof(information_url, request_headers, registration_, entity_type_)['rocCompanyInfo']['latestDocUpdateDate']
middleware_data['latest_doc_date'] = latest_doc_date
data_loaded = acgs(middleware_data, new_entity_id, language_)
elif name_ == 'certificate_of_incorporation_registration':
middleware_data = get_cert_incorp(information_url, request_headers, registration_)
data_loaded = cert_incorp(middleware_data, new_entity_id, language_)
if middleware_data['companyStatus'] == 'U':
if middleware_data['companyType'] == 'S':
name_ = 'public_incorp_cert'
else:
name_ = 'public_guarantee_incorp_cert'
else:
name_ = 'certificate_of_incorporation_registration'
if middleware_data['localforeignCompany'] != 'L':
name_ = 'foreign_incorp_cert'
elif name_ == 'certificate_of_change_of_name':
middleware_data = get_info_comp_name_chg(information_url, request_headers, registration_)
data_loaded = change_name(middleware_data, new_entity_id, language_)
elif name_ == 'certificate_of_conversion':
middleware_data = get_cert_conversion(information_url, request_headers, registration_)
data_loaded = change_name(middleware_data, new_entity_id, language_)
elif name_ == 'financial_historical':
year1 = product_request_json['year1']
year2 = product_request_json['year2']
middleware_data_year1 = get_info_hist2(information_url, request_headers, registration_, entity_type_, year1)
middleware_data_year2 = get_info_hist2(information_url, request_headers, registration_, entity_type_, year2)
data_loaded_1 = info_hist_2(middleware_data_year1,new_entity_id, language_)
data_loaded_2 = info_hist_2(middleware_data_year2,new_entity_id, language_)
data_loaded = data_loaded_1
balance_sheet_year1 = data_loaded_1['balance_sheet'][0]
balance_sheet_year2 = data_loaded_2['balance_sheet'][0]
profit_loss_year1 = data_loaded_1['profit_loss'][0]
profit_loss_year2 = data_loaded_2['profit_loss'][0]
del data_loaded['balance_sheet']
del data_loaded['profit_loss']
data_loaded['balance_sheet'] = []
data_loaded['profit_loss'] = []
data_loaded['balance_sheet'].append(balance_sheet_year1)
data_loaded['balance_sheet'].append(balance_sheet_year2)
data_loaded['profit_loss'].append(profit_loss_year1)
data_loaded['profit_loss'].append(profit_loss_year2)
# print(data_loaded)
elif name_ == 'financial_comparison_2':
now = datetime.now()
middleware_data = get_info_fin2(information_url, request_headers, registration_, entity_type_, str(now.year-2), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'financial_comparison_3':
now = datetime.now()
middleware_data = get_info_fin3(information_url, request_headers, registration_, entity_type_, str(now.year-3), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'financial_comparison_5':
now = datetime.now()
middleware_data = get_info_fin5(information_url, request_headers, registration_, entity_type_, str(now.year-5), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'financial_comparison_10':
now = datetime.now()
middleware_data = get_info_fin10(information_url, request_headers, registration_, entity_type_, str(now.year-10), str(now.year))
data_loaded = info_fin_2(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_directors_officers':
middleware_data = get_roc_business_officers(information_url, request_headers, registration_, entity_type_)
data_loaded = roc_business_officers(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_registered_address':
middleware_data = get_roc_changes_registered_address(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_address(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_shareholders':
middleware_data = get_comp_prof(information_url, request_headers, registration_, entity_type_)
data_loaded = comp_prof(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_share_capital':
middleware_data = get_details_of_share_capital(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_sharecapital(middleware_data, new_entity_id, language_,entity_type_)
elif name_ == 'company_profile':
now = datetime.now()
middleware_data = get_comp_prof(information_url, request_headers, registration_, entity_type_)
data_loaded = comp_prof(middleware_data, new_entity_id, language_)
elif name_ == 'business_profile':
# print(entity_type_ )
middleware_data = get_biz_profile(information_url, request_headers, registration_)
data_loaded = biz_profile(middleware_data, new_entity_id, language_)
elif name_ == 'particulars_of_company_secretary':
middleware_data = get_comp_prof(information_url, request_headers, registration_, entity_type_)
data_loaded = comp_prof(middleware_data, new_entity_id, language_)
elif name_ == 'audit_firm_profile':
middleware_data = get_particulars_of_adt_firm(information_url, request_headers, registration_, entity_type_)
data_loaded = particular_audit_firm(middleware_data, new_entity_id, language_)
elif name_ == 'business_termination_letter':
middleware_data = get_info_rob_termination(information_url, request_headers, registration_)
data_loaded = info_rob_termination(middleware_data, new_entity_id, language_)
elif name_ == 'company_charges':
com_profile = get_comp_prof(information_url, request_headers, registration_, entity_type_)
middleware_data = get_info_charges(information_url, request_headers, registration_, entity_type_)
data_loaded = company_charges(middleware_data, new_entity_id, com_profile, language_, entity_type_)
elif name_ == 'foreign_change_name':
middleware_data = get_info_charges(information_url, request_headers, registration_, entity_type_)
data_loaded = change_name(middleware_data, new_entity_id, language_)
else:
pass
serializer = data_loaded
return Response(serializer)
@action(methods=['GET'], detail=False)
def lala(self, request, *args, **kwargs):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('Sheet One')
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
for row_num, columns in enumerate(data):
for col_num, cell_data in enumerate(columns):
worksheet.write(row_num, col_num, cell_data)
workbook.close()
output.seek(0)
filename = 'PackageA.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
class ProductSearchCriteriaViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = ProductSearchCriteria.objects.all()
serializer_class = ProductSearchCriteriaSerializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
def get_permissions(self):
if self.action == 'list':
permission_classes = [AllowAny]
else:
permission_classes = [AllowAny]
return [permission() for permission in permission_classes]
def get_queryset(self):
queryset = ProductSearchCriteria.objects.all()
return queryset | [
"syafiqbasri@pipeline-network.com"
] | syafiqbasri@pipeline-network.com |
db718b81d3b369a7e54eb24bc87b9937a19c913e | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /22_专题/k个数组/Minimum Adjacent Elements-有序数组绝对差最小.py | 58546a10f7e514dd6f22e222eef4b4ea8489b9b1 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # we say that two numbers nums[i] ≤ nums[j] are adjacent if there's no number in between (nums[i], nums[j]) in nums.
# Return the minimum possible abs(j - i) such that nums[j] and nums[i] are adjacent.
# 注意到排序之后,adjacent元素相邻
from collections import defaultdict
class Solution:
def solve(self, nums):
indexMap = defaultdict(list)
for i, num in enumerate(nums):
indexMap[num].append(i)
res = int(1e20)
# 相等元素
for indexes in indexMap.values():
for pre, cur in zip(indexes, indexes[1:]):
res = min(res, abs(pre - cur))
if res == 1:
return 1
# 不等元素
keys = sorted(indexMap)
for i in range(len(keys) - 1):
nums1, nums2 = indexMap[keys[i]], indexMap[keys[i + 1]]
i, j = 0, 0
while i < len(nums1) and j < len(nums2):
res = min(res, abs(nums1[i] - nums2[j]))
if nums1[i] < nums2[j]:
i += 1
else:
j += 1
return res
print(Solution().solve(nums=[0, -10, 5, -5, 1]))
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
ee0881c5232ed496734cc80cb0f6a2d2473e02be | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/third_party/gsutil/gslib/daisy_chain_wrapper.py | 4e5717df8077b8fe1e8e89d58468da3e082c1eb6 | [
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 11,679 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for use in daisy-chained copies."""
from collections import deque
import os
import threading
import time
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import CloudApi
from gslib.util import CreateLock
from gslib.util import TRANSFER_BUFFER_SIZE
# This controls the amount of bytes downloaded per download request.
# We do not buffer this many bytes in memory at a time - that is controlled by
# DaisyChainWrapper.max_buffer_size. This is the upper bound of bytes that may
# be unnecessarily downloaded if there is a break in the resumable upload.
_DEFAULT_DOWNLOAD_CHUNK_SIZE = 1024*1024*100
class BufferWrapper(object):
"""Wraps the download file pointer to use our in-memory buffer."""
def __init__(self, daisy_chain_wrapper):
"""Provides a buffered write interface for a file download.
Args:
daisy_chain_wrapper: DaisyChainWrapper instance to use for buffer and
locking.
"""
self.daisy_chain_wrapper = daisy_chain_wrapper
def write(self, data): # pylint: disable=invalid-name
"""Waits for space in the buffer, then writes data to the buffer."""
while True:
with self.daisy_chain_wrapper.lock:
if (self.daisy_chain_wrapper.bytes_buffered <
self.daisy_chain_wrapper.max_buffer_size):
break
# Buffer was full, yield thread priority so the upload can pull from it.
time.sleep(0)
data_len = len(data)
if data_len:
with self.daisy_chain_wrapper.lock:
self.daisy_chain_wrapper.buffer.append(data)
self.daisy_chain_wrapper.bytes_buffered += data_len
class DaisyChainWrapper(object):
"""Wrapper class for daisy-chaining a cloud download to an upload.
This class instantiates a BufferWrapper object to buffer the download into
memory, consuming a maximum of max_buffer_size. It implements intelligent
behavior around read and seek that allow for all of the operations necessary
to copy a file.
This class is coupled with the XML and JSON implementations in that it
expects that small buffers (maximum of TRANSFER_BUFFER_SIZE) in size will be
used.
"""
def __init__(self, src_url, src_obj_size, gsutil_api, progress_callback=None,
download_chunk_size=_DEFAULT_DOWNLOAD_CHUNK_SIZE):
"""Initializes the daisy chain wrapper.
Args:
src_url: Source CloudUrl to copy from.
src_obj_size: Size of source object.
gsutil_api: gsutil Cloud API to use for the copy.
progress_callback: Optional callback function for progress notifications
for the download thread. Receives calls with arguments
(bytes_transferred, total_size).
download_chunk_size: Integer number of bytes to download per
GetObjectMedia request. This is the upper bound of bytes that may be
unnecessarily downloaded if there is a break in the resumable upload.
"""
# Current read position for the upload file pointer.
self.position = 0
self.buffer = deque()
self.bytes_buffered = 0
# Maximum amount of bytes in memory at a time.
self.max_buffer_size = 1024 * 1024 # 1 MiB
self._download_chunk_size = download_chunk_size
# We save one buffer's worth of data as a special case for boto,
# which seeks back one buffer and rereads to compute hashes. This is
# unnecessary because we can just compare cloud hash digests at the end,
# but it allows this to work without modfiying boto.
self.last_position = 0
self.last_data = None
# Protects buffer, position, bytes_buffered, last_position, and last_data.
self.lock = CreateLock()
# Protects download_exception.
self.download_exception_lock = CreateLock()
self.src_obj_size = src_obj_size
self.src_url = src_url
# This is safe to use the upload and download thread because the download
# thread calls only GetObjectMedia, which creates a new HTTP connection
# independent of gsutil_api. Thus, it will not share an HTTP connection
# with the upload.
self.gsutil_api = gsutil_api
# If self.download_thread dies due to an exception, it is saved here so
# that it can also be raised in the upload thread.
self.download_exception = None
self.download_thread = None
self.progress_callback = progress_callback
self.stop_download = threading.Event()
self.StartDownloadThread(progress_callback=self.progress_callback)
def StartDownloadThread(self, start_byte=0, progress_callback=None):
"""Starts the download thread for the source object (from start_byte)."""
def PerformDownload(start_byte, progress_callback):
"""Downloads the source object in chunks.
This function checks the stop_download event and exits early if it is set.
It should be set when there is an error during the daisy-chain upload,
then this function can be called again with the upload's current position
as start_byte.
Args:
start_byte: Byte from which to begin the download.
progress_callback: Optional callback function for progress
notifications. Receives calls with arguments
(bytes_transferred, total_size).
"""
# TODO: Support resumable downloads. This would require the BufferWrapper
# object to support seek() and tell() which requires coordination with
# the upload.
try:
while start_byte + self._download_chunk_size < self.src_obj_size:
self.gsutil_api.GetObjectMedia(
self.src_url.bucket_name, self.src_url.object_name,
BufferWrapper(self), start_byte=start_byte,
end_byte=start_byte + self._download_chunk_size - 1,
generation=self.src_url.generation, object_size=self.src_obj_size,
download_strategy=CloudApi.DownloadStrategy.ONE_SHOT,
provider=self.src_url.scheme, progress_callback=progress_callback)
if self.stop_download.is_set():
# Download thread needs to be restarted, so exit.
self.stop_download.clear()
return
start_byte += self._download_chunk_size
self.gsutil_api.GetObjectMedia(
self.src_url.bucket_name, self.src_url.object_name,
BufferWrapper(self), start_byte=start_byte,
generation=self.src_url.generation, object_size=self.src_obj_size,
download_strategy=CloudApi.DownloadStrategy.ONE_SHOT,
provider=self.src_url.scheme, progress_callback=progress_callback)
# We catch all exceptions here because we want to store them.
except Exception, e: # pylint: disable=broad-except
# Save the exception so that it can be seen in the upload thread.
with self.download_exception_lock:
self.download_exception = e
raise
# TODO: If we do gzip encoding transforms mid-transfer, this will fail.
self.download_thread = threading.Thread(
target=PerformDownload,
args=(start_byte, progress_callback))
self.download_thread.start()
def read(self, amt=None): # pylint: disable=invalid-name
"""Exposes a stream from the in-memory buffer to the upload."""
if self.position == self.src_obj_size or amt == 0:
# If there is no data left or 0 bytes were requested, return an empty
# string so callers can call still call len() and read(0).
return ''
if amt is None or amt > TRANSFER_BUFFER_SIZE:
raise BadRequestException(
'Invalid HTTP read size %s during daisy chain operation, '
'expected <= %s.' % (amt, TRANSFER_BUFFER_SIZE))
while True:
with self.lock:
if self.buffer:
break
with self.download_exception_lock:
if self.download_exception:
# Download thread died, so we will never recover. Raise the
# exception that killed it.
raise self.download_exception # pylint: disable=raising-bad-type
# Buffer was empty, yield thread priority so the download thread can fill.
time.sleep(0)
with self.lock:
# TODO: Need to handle the caller requesting less than a
# transfer_buffer_size worth of data.
data = self.buffer.popleft()
self.last_position = self.position
self.last_data = data
data_len = len(data)
self.position += data_len
self.bytes_buffered -= data_len
if data_len > amt:
raise BadRequestException(
'Invalid read during daisy chain operation, got data of size '
'%s, expected size %s.' % (data_len, amt))
return data
def tell(self): # pylint: disable=invalid-name
with self.lock:
return self.position
def seek(self, offset, whence=os.SEEK_SET): # pylint: disable=invalid-name
restart_download = False
if whence == os.SEEK_END:
if offset:
raise IOError(
'Invalid seek during daisy chain operation. Non-zero offset %s '
'from os.SEEK_END is not supported' % offset)
with self.lock:
self.last_position = self.position
self.last_data = None
# Safe because we check position against src_obj_size in read.
self.position = self.src_obj_size
elif whence == os.SEEK_SET:
with self.lock:
if offset == self.position:
pass
elif offset == self.last_position:
self.position = self.last_position
if self.last_data:
# If we seek to end and then back, we won't have last_data; we'll
# get it on the next call to read.
self.buffer.appendleft(self.last_data)
self.bytes_buffered += len(self.last_data)
else:
# Once a download is complete, boto seeks to 0 and re-reads to
# compute the hash if an md5 isn't already present (for example a GCS
# composite object), so we have to re-download the whole object.
# Also, when daisy-chaining to a resumable upload, on error the
# service may have received any number of the bytes; the download
# needs to be restarted from that point.
restart_download = True
if restart_download:
self.stop_download.set()
# Consume any remaining bytes in the download thread so that
# the thread can exit, then restart the thread at the desired position.
while self.download_thread.is_alive():
with self.lock:
while self.bytes_buffered:
self.bytes_buffered -= len(self.buffer.popleft())
time.sleep(0)
with self.lock:
self.position = offset
self.buffer = deque()
self.bytes_buffered = 0
self.last_position = 0
self.last_data = None
self.StartDownloadThread(start_byte=offset,
progress_callback=self.progress_callback)
else:
raise IOError('Daisy-chain download wrapper does not support '
'seek mode %s' % whence)
def seekable(self): # pylint: disable=invalid-name
return True
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
f73f5c06d8e28761f419f7a2ccc812f56ad2a01a | 66977f7c7dd20c7d774859e7dcdc07e83ba0e5b9 | /venv/bin/easy_install-3.6 | 5d0a7e0fdca0e1390822c69263e03a922e8b1f28 | [] | no_license | Andchenn/music163 | 850ff93cb83164e4ce22d77e9beb832de8efe59c | 916ca671c51d79352bbe12fc2d4e7a43775b155c | refs/heads/master | 2020-03-25T11:47:25.609623 | 2018-08-07T14:19:18 | 2018-08-07T14:19:18 | 143,748,629 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | 6 | #!/home/feng/PycharmProjects/music/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"1476204794@qq.com"
] | 1476204794@qq.com |
158cdf4a256d934f8f9e8b2fb414972e17b1e9a1 | 7755b126435af273dcfd538759c799e5d70965f8 | /django_startproject/project_template/myproject/conf/dev/settings.py | daa853108b003d5e478481f0f6fc991371446d22 | [] | no_license | k1000/django-startproject | 79bef3a218f0203e6f8a434811cb2d1e3bb34df3 | 50c04fe0ba8202bfad9e5b7eb217ae09640a76fa | refs/heads/master | 2020-04-05T23:44:54.051938 | 2010-03-25T18:56:20 | 2010-03-25T18:56:20 | 585,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from myproject.conf.settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_URLCONF = 'myproject.conf.dev.urls'
# DATABASE_ENGINE = 'postgresql_psycopg2'
# DATABASE_NAME = 'myproject'
# DATABASE_USER = 'dbuser'
# DATABASE_PASSWORD = 'dbpassword'
| [
"pete@lincolnloop.com"
] | pete@lincolnloop.com |
eae0f5c1f2698520e2d6f239064c70f6a0dd2aec | 95b6f547270557a99c435b785b907896f62e87d1 | /l_method.py | e80afe423815f6bab9ce4a911f61535b0f0fae60 | [] | no_license | phizaz/seeding-strategy-ssl | 6b3b58c9b1f556f8cd42fea5e3dc20e623462a08 | 85655ce3297130b273d5f86075ee6bdf1f12be0a | refs/heads/master | 2021-01-10T06:18:26.618009 | 2016-04-02T14:50:08 | 2016-04-02T14:50:08 | 49,761,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,883 | py | from fastcluster import linkage
from disjoint import DisjointSet
from collections import deque
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import BallTree
import time
from itertools import islice
import numpy
import math
from collections import Counter
class Result:
def __init__(self, labels, centers):
self.labels_ = labels
self.cluster_centers_ = centers
def predict(self, X):
ball_tree = BallTree()
ball_tree.fit(self.cluster_centers_)
_, indexes = ball_tree.query(X)
result = []
for idx, in indexes:
result.append(self.labels_[idx])
return result
def f_creator(coef, intercept):
def f(x):
return intercept + coef * x
return f
def best_fit_line(x, y):
coef, intercept = numpy.polyfit(x, y, 1)
return coef, intercept
def plot(X, fn):
return [fn(x) for x in X]
def single_cluster(coef_a, coef_b, rthreshold=0.01):
# determine if there is only one cluster
# if the two slopes are different so slightly enough
# nevertheless, this will fail if not counting the bigger picture as well!!
# we use arctan instead of the slope
# because slopes don't act in a uniform way
# but radians do
angle_a = math.atan2(coef_a, 1)
angle_b = math.atan2(coef_b, 1)
# relative difference of the absolute mean of the two
avg = abs(angle_a + angle_b) / 2
# print('avg:', avg)
# print('coef_a:', coef_a, 'angle_a:', angle_a)
# print('coef_b:', coef_b, 'angle_b:', angle_b)
relative_difference = abs(angle_a - angle_b) / avg
# print('relative_difference:', relative_difference)
return relative_difference <= rthreshold
def l_method(num_groups, merge_dist):
element_cnt = len(num_groups)
# short circuit, since the l-method doesn't work with the number of elements below 4
if element_cnt < 4:
return 1
# now we have some level of confidence that O(n) is not attainable
# this l_method is gonna be slow... n * 2 * O(MSE)
x_left = num_groups[:2]
y_left = merge_dist[:2]
# we use 'deque' data structure here to attain the efficient 'popleft'
x_right = deque(num_groups[2:])
y_right = deque(merge_dist[2:])
min_score = float('inf')
min_c = None
# this is for determining single cluster problem
# min_coef_left = 0
# min_coef_right = 0
for left_cnt in range(2, element_cnt - 2 + 1):
# get best fit lines
coef_left, intercept_left = best_fit_line(x_left, y_left)
coef_right, intercept_right = best_fit_line(x_right, y_right)
fn_left = f_creator(coef_left, intercept_left)
fn_right = f_creator(coef_right, intercept_right)
y_pred_left = plot(x_left, fn_left)
y_pred_right = plot(x_right, fn_right)
# calculate the error on each line
mseA = mean_squared_error(y_left, y_pred_left)
mseB = mean_squared_error(y_right, y_pred_right)
# calculate the error on both line cumulatively
A = left_cnt / element_cnt * mseA
B = (element_cnt - left_cnt) / element_cnt * mseB
score = A + B
x_left.append(num_groups[left_cnt])
y_left.append(merge_dist[left_cnt])
x_right.popleft()
y_right.popleft()
if score < min_score:
# find the best pair of best fit lines (that has the lowest mse)
# left_cnt is not the number of clusters
# since the first num_group begins with 2
min_c, min_score = left_cnt + 1, score
# for determining single class problem
# min_coef_left, min_coef_right = coef_left, coef_right
return min_c
# this won't work for the moment
# if single_cluster(min_coef_left, min_coef_right, 0.01):
# # two lines are too close in slope to each other (1% tolerance)
# # considered to be a single line
# # thus, single cluster
# return 1
# else:
# return min_c
def refined_l_method(num_groups, merge_dist):
element_cnt = cutoff = last_knee = current_knee = len(num_groups)
# short circuit, since the l-method doesn't work with the number of elements below 4
if element_cnt < 4:
return 1
while True:
last_knee = current_knee
# print('cutoff:', cutoff)
current_knee = l_method(num_groups[:cutoff], merge_dist[:cutoff])
# print('current_knee:', current_knee)
# you can keep this number high (* 2), and no problem with that
# just make sure that the cutoff tends to go down every time
# but, we use 2 here according to the paper
cutoff = current_knee * 3
if current_knee >= last_knee:
break
return current_knee
def get_centroids(X, belong_to):
clusters_cnt = max(belong_to) + 1
centroids = [numpy.zeros(X[0].shape) for i in range(clusters_cnt)]
cluster_member_cnt = [0 for i in range(clusters_cnt)]
for i, x in enumerate(X):
belongs = belong_to[i]
cluster_member_cnt[belongs] += 1
centroids[belongs] += x
for i, centroid in enumerate(centroids):
centroids[i] = centroid / cluster_member_cnt[i]
return centroids
def agglomerative_l_method(X, method='ward'):
# library: fastcluster
merge_hist = linkage(X, method=method, metric='euclidean', preserve_input=True)
# reorder to be x [2->N]
num_groups = [i for i in range(2, len(X) + 1)]
merge_dist = list(reversed([each[2] for each in merge_hist]))
cluster_count = refined_l_method(num_groups, merge_dist)
# print('refined_l_method time:', end_time - start_time)
# print('cluster_count:', cluster_count)
# make clusters by merging them according to merge_hist
disjoint = DisjointSet(len(X))
for a, b, _, _ in islice(merge_hist, 0, len(X) - cluster_count):
a, b = int(a), int(b)
disjoint.join(a, b)
# get cluster name for each instance
belong_to = [disjoint.parent(i) for i in range(len(X))]
# print('belong_to:', belong_to)
# counter = Counter(belong_to)
# print('belong_to:', counter)
# rename the cluster name to be 0 -> cluster_count - 1
cluster_map = {}
cluster_name = 0
belong_to_renamed = []
for each in belong_to:
if not each in cluster_map:
cluster_map[each] = cluster_name
cluster_name += 1
belong_to_renamed.append(cluster_map[each])
# print('belong_to_renamed:', belong_to_renamed)
centroids = get_centroids(X, belong_to_renamed)
# print('centroids:', centroids)
return Result(belong_to_renamed, centroids)
def recursive_agglomerative_l_method(X):
raise Exception('out of service !')
# won't give any disrable output for the moment
def recursion(X):
belong_to = agglomerative_l_method(X)
num_clusters = max(belong_to) + 1
if num_clusters == 1:
return belong_to, num_clusters
new_belong_to = [None for i in range(len(belong_to))]
next_cluster_name = 0
for cluster in range(num_clusters):
next_X = []
for belong, x in zip(belong_to, X):
if belong == cluster:
next_X.append(x)
sub_belong, sub_num_clusters = recursion(next_X)
sub_belong_itr = 0
for i, belong in enumerate(belong_to):
if belong == cluster:
new_belong_to[i] = sub_belong[sub_belong_itr] + next_cluster_name
sub_belong_itr += 1
next_cluster_name += sub_num_clusters
return new_belong_to, next_cluster_name
belong_to, clusters = recursion(X)
# print('belong_to:', belong_to)
# print('clusters:', clusters)
centroids = get_centroids(X, belong_to)
# print('centroids:', centroids)
return Result(belong_to, centroids)
| [
"the.akita.ta@gmail.com"
] | the.akita.ta@gmail.com |
60d0182ebaf8ffb2c0c8299d16ab51c952127e4b | 5b77ea24ccda4fcf6ed8a269f27ac33d0a47bcad | /Startcamp/1218/scraping/currency.py | 8bb9389b05b71a0541ed79b360a0b0d022513aee | [] | no_license | yooseungju/TIL | 98acdd6a1f0c145bff4ae33cdbfbef4f45d5da42 | e6660aaf52a770508fe8778994e40aa43d2484d4 | refs/heads/master | 2020-04-11T21:13:56.981903 | 2019-09-28T12:34:03 | 2019-09-28T12:34:03 | 162,099,150 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | import requests
from bs4 import BeautifulSoup
req = requests.get("https://www.naver.com/").text
soup = BeautifulSoup(req, 'html.parser')
hotkeyword = soup.select("#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > .ah_item")
for item in hotkeyword:
print(item.select_one(".ah_r").text, end ='위 ')
print(item.select_one(".ah_k").text)
| [
"seung338989@gmail.com"
] | seung338989@gmail.com |
cbdf09255f98e70ee2ccb3b428ce1f08511f9203 | 6a7766599b74fddc9864e2bcccddafb333792d63 | /otree/widgets.py | 6995e7c1bbc975567a5cc5963b862294f962e9ad | [
"MIT"
] | permissive | PrimeCodingSolutions/otree-core | 4a6e113ba44bd6ded1a403c84df08b84fe890930 | 952451e0abde83306f3b6417fc535eb13e219c61 | refs/heads/master | 2021-04-18T15:22:48.700655 | 2020-08-28T13:05:06 | 2020-08-28T13:05:06 | 249,556,953 | 2 | 1 | NOASSERTION | 2020-08-28T12:39:11 | 2020-03-23T22:20:48 | HTML | UTF-8 | Python | false | false | 535 | py | # We moved the otree.widgets module to otree.forms.widgets
# prior to adding otree.api in September 2016, each models.py contained:
# "from otree import widgets"
from logging import getLogger
logger = getLogger(__name__)
MSG_NEW_IMPORTS = '''
otree.widgets does not exist anymore. You should update your imports in models.py to:
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
'''
logger.warning(MSG_NEW_IMPORTS)
from .forms.widgets import *
| [
"chris@otree.org"
] | chris@otree.org |
fa632f8b6d4b51cc22f1d1ea13c796836f1d83b2 | 0c47a1529b2ebcecbb177fa5a3929a9c281b6e55 | /src/NetworkViewer.py | 571b09242d9b6137f987c55eb8c97c3e8f569a77 | [] | no_license | theideasmith/rockefeller | c7a80ac095cf021548c4a63d48df5acc6e4d5320 | 436b5b3daaf488d2bd6272af76bee27b043d5a05 | refs/heads/master | 2020-12-24T19:36:58.562908 | 2016-05-17T18:58:17 | 2016-05-17T18:58:17 | 57,930,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,963 | py | """
Network Viewer
Visualizes the dynamics of a network and assoicated parameters during learning
"""
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Christoph Kirst <ckirst@rockefeller.edu>'
__docformat__ = 'rest'
#import sys, os
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
#import pyqtgraph.parametertree.parameterTypes as pTypes
from pyqtgraph.parametertree import Parameter, ParameterTree
class NetworkViewer(QtGui.QWidget):
"""Visualize Dynamics of Network"""
def __init__(self, network, timer):
"""Constructor"""
QtGui.QWidget.__init__(self);
self.network = network;
self.colors = {"output" : QtGui.QColor(0, 0, 255),
"error" : QtGui.QColor(255, 0, 0),
};
self.timer = timer;
self.setupGUI();
def setupGUI(self):
"""Setup GUI"""
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.splitter = QtGui.QSplitter()
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setSizes([int(self.height()*0.5), int(self.height()*0.5)]);
self.layout.addWidget(self.splitter)
self.splitter2 = QtGui.QSplitter()
self.splitter2.setOrientation(QtCore.Qt.Horizontal)
#self.splitter2.setSizes([int(self.width()*0.5), int(self.width()*0.5)]);
self.splitter.addWidget(self.splitter2);
self.splitter3 = QtGui.QSplitter()
self.splitter3.setOrientation(QtCore.Qt.Horizontal)
#self.splitter2.setSizes([int(self.width()*0.5), int(self.width()*0.5)]);
self.splitter.addWidget(self.splitter3);
# various matrix like plots: state, goal, weights, p
self.nStates = 20; #number of states in the past to remember
self.matrixdata = [np.zeros((self.network.nInputs, self.nStates)),
np.zeros((self.network.nOutputs, self.nStates)),
np.zeros((self.network.nOutputs, self.nStates)),
np.zeros((self.network.nNodes, self.nStates))];
#np.zeros(self.network.weights.shape)];
#np.zeros(self.network.a.shape)
#np.zeros(self.network.b.shape)];
self.images = [];
self.axislabels = ['Input', 'Output', 'Goal', 'State']
for j in range(len(self.matrixdata)):
l = pg.GraphicsLayoutWidget()
self.splitter2.addWidget(l);
# We use a plotitem so we can have labels
plot = pg.PlotItem(title='<h3>'+'Network '+self.axislabels[j]+'</h3>')
l.addItem(plot, 0, 1);
i = pg.ImageItem(self.matrixdata[j]);
plot.getViewBox().addItem(i);
plot.showAxis('left', show=False)
plot.showAxis('bottom',show=False)
self.images.append(i);
for i in [0,1,2,3]:
self.images[i].setLevels([0,1]);
#output and error
self.plotlayout = pg.GraphicsLayoutWidget();
self.splitter3.addWidget(self.plotlayout);
self.plot = [];
for i in range(2):
self.plot.append(self.plotlayout.addPlot());
self.plot[i].setYRange(0, 1, padding=0);
self.plot[1].setYRange(0, 0.5, padding=0)
self.plotlength = 50000;
self.output = np.zeros((self.network.nOutputs, self.plotlength));
#self.goal = np.zeros((self.network.nOutputs, self.plotlength));
self.errorlength = 50000;
self.error = np.zeros(self.errorlength);
self.curves = []
for i in range(self.network.nOutputs):
c = self.plot[0].plot(self.output[i,:], pen = (i, self.network.nOutputs));
#c.setPos(0,0*i*6);
self.curves.append(c);
c = self.plot[1].plot(self.error, pen = (2,3));
self.curves.append(c);
# parameter controls
self.steps = 0;
params = [
{'name': 'Controls', 'type': 'group', 'children': [
{'name': 'Simulate', 'type': 'bool', 'value': True, 'tip': "Run the network simulation"},
{'name': 'Plot', 'type': 'bool', 'value': True, 'tip': "Check to plot network evolution"},
{'name': 'Plot Interval', 'type': 'int', 'value': 10, 'tip': "Step between plot updates"},
{'name': 'Timer', 'type': 'int', 'value': 10, 'tip': "Pause between plot is updated"},
]}
,
{'name': 'Network Parameter', 'type': 'group', 'children': [
{'name': 'Eta', 'type': 'float', 'value': self.network.eta, 'tip': "Learning rate"}#,
#{'name': 'Gamma', 'type': 'float', 'value': self.network.gamma, 'tip': "Learning rate"},
]}
,
{'name': 'Status', 'type': 'group', 'children': [
{'name': 'Steps', 'type': 'int', 'value': self.steps, 'tip': "Actual iteration step", 'readonly': True}
]}
];
self.parameter = Parameter.create(name = 'Parameter', type = 'group', children = params);
print self.parameter
print self.parameter.children()
self.parameter.sigTreeStateChanged.connect(self.updateParameter);
## Create two ParameterTree widgets, both accessing the same data
t = ParameterTree();
t.setParameters(self.parameter, showTop=False)
t.setWindowTitle('Parameter');
self.splitter3.addWidget(t);
# draw network
self.nsteps = 100;
self.updateView();
def updateParameter(self, param, changes):
for param, change, data in changes:
prt = False;
if param.name() == 'Eta':
self.network.eta = data;
prt = True;
if param.name() == 'Gamma':
self.network.gamma = data;
prt = True;
elif param.name() == 'Timer':
self.timer.setInterval(data);
prt = True;
if prt:
path = self.parameter.childPath(param);
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
print(' parameter: %s'% childName)
print(' change: %s'% change)
print(' data: %s'% str(data))
print(' ----------')
def updateView(self):
"""Update plots in viewer"""
if self.parameter['Controls', 'Simulate']:
pl = self.parameter['Controls', 'Plot'];
ns = self.parameter['Controls', 'Plot Interval'];
if pl:
for i in range(len(self.matrixdata)):
self.matrixdata[i][:, :-ns] = self.matrixdata[i][:,ns:];
self.output[:, :-ns] = self.output[:, ns:];
self.error[:-1] = self.error[1:];
for i in range(ns,0,-1):
self.network.step();
# This way our step size can be very large
if pl and i <=self.nStates:
self.output[:,-i] = self.network.output;
self.matrixdata[0][:,-i] = self.network.input;
self.matrixdata[1][:,-i] = self.network.goal;
self.matrixdata[2][:,-i] = self.network.output;
self.matrixdata[3][:,-i] = self.network.state;
self.steps += ns;
self.parameter['Status', 'Steps'] = self.steps;
if pl:
#s = self.network.state.copy();
#s = s.reshape(s.shape[0], 1);
for i in range(len(self.matrixdata)):
self.images[i].setImage(self.matrixdata[i]);
#self.images[-1].setImage(self.network.weights.T);
#self.images[-1].setImage(self.network.p.T);
# update actual state
#self.curves[0].setData(self.network.output);
#self.curves[1].setData(self.network.goal.astype('float'));
# update history
for i in range(self.network.nOutputs):
self.curves[i].setData(self.output[i,:]);
#keep updating error as its cheap
self.error[-1:] = self.network.error();
self.curves[-1].setData(self.error);
print "Error: "
print np.mean(self.error[-10:-1])
| [
"aclscientist@gmail.com"
] | aclscientist@gmail.com |
1d86fa9c04a543fb69270f8ebc482da953ce3942 | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/interactive/jm/random_normal_1/782677726.py | 60fa6b4705369acbc350e9f2c37456a1a42fa5b5 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 3,118 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 782677726
"""
"""
random actions, total chaos
"""
board = gamma_new(4, 4, 5, 1)
assert board is not None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 3, 0) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_move(board, 4, 0, 1) == 1
assert gamma_move(board, 5, 3, 3) == 1
assert gamma_move(board, 5, 0, 1) == 0
assert gamma_free_fields(board, 5) == 2
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 3, 3, 0) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 1, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_golden_move(board, 3, 1, 0) == 0
board662980125 = gamma_board(board)
assert board662980125 is not None
assert board662980125 == ("...5\n"
"...3\n"
"44..\n"
"1.22\n")
del board662980125
board662980125 = None
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_busy_fields(board, 4) == 2
assert gamma_free_fields(board, 4) == 4
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 2, 1) == 0
assert gamma_move(board, 5, 2, 2) == 0
assert gamma_free_fields(board, 5) == 1
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_golden_move(board, 2, 3, 3) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_move(board, 4, 3, 0) == 0
assert gamma_free_fields(board, 4) == 4
assert gamma_move(board, 5, 3, 0) == 0
assert gamma_move(board, 5, 3, 2) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 4, 2, 1) == 1
assert gamma_move(board, 5, 2, 2) == 0
assert gamma_free_fields(board, 5) == 1
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_free_fields(board, 2) == 2
gamma_delete(board)
| [
"jakub@molinski.dev"
] | jakub@molinski.dev |
0192a55bee2cdb27c882409abd6d92b255bcbb66 | a5e6ce10ff98539a94a5f29abbc053de9b957cc6 | /problems/dynamic_programing/knapsack1.py | fd45ebc6c136b21f6fbffd1e18dc65c3b37eb2ff | [] | no_license | shimaw28/atcoder_practice | 5097a8ec636a9c2e9d6c417dda5c6a515f1abd9c | 808cdc0f2c1519036908118c418c8a6da7ae513e | refs/heads/master | 2020-07-26T10:59:51.927217 | 2020-06-13T11:53:19 | 2020-06-13T11:53:19 | 208,622,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | # atcoder https://atcoder.jp/contests/dp/tasks/dp_d
def main():
N, W = map(int, input().split())
w, v = [], []
for _ in range(N):
wi, vi = map(int, input().split())
w.append(wi)
v.append(vi)
dp = [[0 for j in range(W+1)] for i in range(N+1)]
for i in range(1, N+1):
for j in range(1, W+1):
if j >= w[i-1]:
dp[i][j] = max(dp[i-1][j], dp[i-1][j-w[i-1]]+v[i-1])
else:
dp[i][j] = dp[i-1][j]
print(dp[N][W])
main() | [
"shima.w28@gmail.com"
] | shima.w28@gmail.com |
7de2a9da3fb85e1c6dd5a800ccf836c5f6680029 | fc4aaf15ef2b10c89728651316300ada27f14ae3 | /basic_app/models.py | 3b159a9e093f32b2a9c91d455459336aa4a8d7bc | [] | no_license | ethicalrushi/seller | 7f6231e710b32e8d7700e32e321879728de65546 | 9473fcb6595c27c7adbcea24990b6e8f006d3e8a | refs/heads/master | 2020-03-08T04:00:59.089892 | 2018-04-04T17:48:19 | 2018-04-04T17:48:19 | 126,335,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Department(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=200)
department = models.ForeignKey('Department', on_delete=models.SET_DEFAULT, default='Specials', blank=True)
price = models.PositiveIntegerField()
cod_availability = models.BooleanField(default=False)
seller = models.CharField(max_length=200)
description = models.TextField()
image= models.ImageField(upload_to='product_pic')
def __str__(self):
return self.name
class Cart(models.Model):
user = models.ForeignKey(User)
active = models.BooleanField(default=True)
def add_to_cart(self, product_id):
product = Product.objects.get(pk=product_id)
try:
preexisting_order = ProductOrder.objects.get(product=product, cart=self)
preexisting_order.quantity+=1
preexisting_order.save()
except ProductOrder.DoesNotExist:
new_order = ProductOrder.objects.create(
product=product,
cart=self,
quantity=1,
)
new_order.save()
def remove_from_cart(self, product_id):
product=Product.objects.get(pk=product_id)
try:
preexisting_order = ProductOrder.objects.get(product=product, cart=self)
if preexisting_order.quantity >1:
preexisting_order.quantity-=1
preexisting_order.save()
else:
preexisting_order.delete()
except ProductOrder.DoesNotExist:
pass
class ProductOrder(models.Model):
product = models.ForeignKey(Product)
cart = models.ForeignKey(Cart)
quantity = models.IntegerField() | [
"pupalerushikesh@gmail.com"
] | pupalerushikesh@gmail.com |
14b3a7755aa94e83563c1b91bd941e54547348fb | bc108434d5f485a5ca593942b0fbe2f4d044ebda | /pl/python/test_io.py | 8367110f97859b28297fce0385a5f0f9db15de28 | [] | no_license | js-ts/AI | 746a34493a772fb88aee296f463122b68f3b299d | 353e7abfa7b02b45d2b7fec096b58e07651eb71d | refs/heads/master | 2023-05-29T16:19:03.463999 | 2021-06-22T05:49:44 | 2021-06-22T05:49:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | import io
from io import StringIO, BytesIO
# open | [
"wenyu.lyu@gmail.com"
] | wenyu.lyu@gmail.com |
d4cb7acb76a7609399131caf54bb1033a481ac87 | a185d936357b6f376df921de291654f6148f2a77 | /Database/Scripts/Treenut/script4.py | 50cb738e24bb4264a5381cfd93ecaa9b2698f0ca | [] | no_license | virajnilakh/ZenHealth | 021edd06a8e01fb96db5ce9c34c200bd277942bf | 02f274bbed2b16a2aff01c7f57e81b705af90012 | refs/heads/master | 2021-05-07T07:11:09.096859 | 2018-07-06T00:27:58 | 2018-07-06T00:27:58 | 109,096,469 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,656 | py | from firebase import firebase
import json
import requests
import os
from pprint import pprint
import pymongo
'''
Query 4
http://api.yummly.com/v1/api/recipes?_app_id=dummyid&_app_key=dummykey&allowedAllergy[]=395^Treenut-Free
&allowedDiet[]=386^Vegan&allowedCourse[]=course^course-Main Dishes&maxResult=30&nutrition.SUGAR.min=10&nutrition.SUGAR.max=12
this script is for query 4, Main Dishes in course , sugar value is high, allowed allergy: treenut, allowedDiet= Vegan
'''
def getRecipeData():
recipeUrlList = [
'http://api.yummly.com/v1/api/recipe/Candied-Yams-1310064?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/The-Ultimate-Mojito-Jo-Cooks-46054?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Turmeric-and-Ginger-Tonic-2101984?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Homemade-Hummus-2395748?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Hawaiian-Champagne-Punch-2411488?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Strawberry-Lemonade-Slush-1119185?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Pear-and-Pomegranate-Salsa-1438183?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Maple-Roasted-Butternut-Squash_-Brussel-Sprouts-_-Cranberries-2243291?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Blueberry-Banana-Oatmeal-Smoothie-2254689?_app_id=dummyid&_app_key=dummykey',
'http://api.yummly.com/v1/api/recipe/Creamy-Avocado-Pesto-Zoodles-1018988?_app_id=dummyid&_app_key=dummykey'
]
for url in recipeUrlList:
r = requests.get(url)
data = r.json()
extractDataFromUrl(data)
def extractDataFromUrl(data):
recipeData = {}
nutritionData = {}
recipeData['Ingredients'] = data.get('ingredientLines')
attrs = data.get('attributes')
if attrs:
recipeData['Course'] = attrs.get('course')
if (recipeData['Course'] == None):
recipeData['Course'] = []
recipeData['Course'].append("Main Dishes")
recipeData['Cuisine'] = attrs.get('cuisine')
else:
recipeData['Course'] = "Main Dishes"
recipeData ['Name'] = data.get('name')
for nutrition in data['nutritionEstimates']:
if nutrition['attribute'] == "ENERC_KCAL":
nutritionData['Calories'] = str(nutrition['value']) + " " + nutrition['unit']['pluralAbbreviation']
if nutrition['attribute'] == "SUGAR":
nutritionData['Sugar'] = str(nutrition['value']) + " " + nutrition['unit']['pluralAbbreviation']
if nutrition['attribute'] == "CHOCDF":
nutritionData['Carbohydrates'] = str(nutrition['value']) + " " + nutrition['unit']['pluralAbbreviation']
recipeData['Nutrients'] = nutritionData
recipeData['allowedDiet'] = 'Vegan'
recipeData['allowedAllergy'] = 'Treenut-Free'
recipeData['sugarLevel'] = 'High'
#recipeData['allowedIngredient'] = 'bacon'
print(recipeData ['Name'])
result = firebase.post('/foodData', recipeData)
print("RESULT IS:")
print(result)
firebase = firebase.FirebaseApplication('https://zenhealth-215f6.firebaseio.com/', authentication=None)
getRecipeData()
| [
"noreply@github.com"
] | virajnilakh.noreply@github.com |
a639ff252d2e58adfdcc7f5e04bcf9e520e04141 | 4513ce2e8589b0a20c0a64abf434da2ceeb2ab80 | /manage.py | 4029ae8897747c482d7573d58204a193ab25bbcc | [] | no_license | nralif/pages_app | 98f1be7d041a9ad6b5d0c052f662349bd97fad16 | 57ad71aa5d4b52f2638ef8850bae1a6739cbe1e6 | refs/heads/main | 2023-05-09T18:40:02.492885 | 2021-05-22T12:40:00 | 2021-05-22T12:40:00 | 369,753,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'page_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"naeemurrahman.aalif@gmail.com"
] | naeemurrahman.aalif@gmail.com |
283359a20e140bad25199256e77196298e581515 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/321/101659/submittedfiles/jogoDaVelha_BIB.py | b62888cd1d8baaf9b6d90df806d218ed55543e07 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | # -*- coding: utf-8 -*-
# COLOQUE SUA BIBLIOTECA A PARTIR DAQUI
import random
def simbolo():
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) \n'))
while s != 'X' and s != 'O':
print('Isira um símbolo válido')
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
return
sorteado = -1
while(True) :
sorteado = random.randint(0,num_grupos-1)
if (tipo=='G') :
if (grupos_sorteados[sorteado] == -1):
break
return sorteado
for i in range (num_grupos+1,1):
input(' Pressione ENTER para sortear o %dº grupo...' % i)
indice_grupo = sorteio('G')
mostra_sorteado('G',indice_grupo)
input(' Pressione ENTER para sortear a %dª data...' % i)
indice_data = sorteio('D')
time.sleep(3)
mostra_sorteado('D',indice_data)
salva_sorteio(indice_grupo, indice_data)
print(' ------------------------------------------------- ')
print(' Ordem de apresentação do P1 ')
print(' ------------------------------------------------- ')
inicio = ['computador','nome']
inicio_sorteio = [1,1]
num_inicio = len(inicio)
def sorteio (x):
sorteado = 1
if sorteado in inicio_sorteio:
return inicio_sorteio.index
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
480f4bce7c618584210823d38405128065d34426 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /alexaforbusiness_write_1/user_delete.py | 552e70148262104efc7a5436e0538e2e95345610 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/alexaforbusiness/delete-user.html
if __name__ == '__main__':
"""
create-user : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/alexaforbusiness/create-user.html
search-users : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/alexaforbusiness/search-users.html
"""
parameter_display_string = """
# enrollment-id : The ARN of the userâs enrollment in the organization. Required.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("alexaforbusiness", "delete-user", "enrollment-id", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
4ff4bc8aa3fd0344b8ce84d0b596891346d80dfe | 91fb65972d69ca25ddd892b9d5373919ee518ee7 | /pibm-training/sample-programs/range_example_001.py | 13445a33e06d8a5a5c134cdf7f0e7bc0e480f99a | [] | no_license | zeppertrek/my-python-sandpit | c36b78e7b3118133c215468e0a387a987d2e62a9 | c04177b276e6f784f94d4db0481fcd2ee0048265 | refs/heads/master | 2022-12-12T00:27:37.338001 | 2020-11-08T08:56:33 | 2020-11-08T08:56:33 | 141,911,099 | 0 | 0 | null | 2022-12-08T04:09:28 | 2018-07-22T16:12:55 | Python | UTF-8 | Python | false | false | 430 | py | #range_example_001.py
#range converted to lists
#
print ("List range(10) ---", list(range(10)))
print ("List range(1,11) ---", list(range(1, 11)))
print ("List range(0,30,5) --- ", list(range(0, 30, 5)))
print ("list(range(0, 10, 3)) ---", list(range(0, 10, 3)))
print ("list(range(0, -10, -1)) ---" , list(range(0, -10, -1)))
print ("list(range(0)) ---", list(range(0)))
print ("list(range(1, 0)) ---", list(range(1, 0)))
| [
"zeppertrek@gmail.com"
] | zeppertrek@gmail.com |
4dfc5e22548977aa77f85efeb2ee388d37034424 | d1f971b9fa0edfa633b62887cf9d173d6a86a440 | /concepts/Exercises/list_slicing.py | 94af34cfb70f29e3e12488fea14a57a678427d25 | [] | no_license | papan36125/python_exercises | d45cf434c15aa46e10967c13fbe9658915826478 | 748eed2b19bccf4b5c700075675de87c7c70c46e | refs/heads/master | 2020-04-28T10:01:10.361108 | 2019-05-10T13:45:35 | 2019-05-10T13:45:35 | 175,187,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | my_list = ['p','r','o','g','r','a','m','i','z']
# elements 3rd to 5th
print(my_list[2:5])
# elements beginning to 4th
print(my_list[:-5])
# elements 6th to end
print(my_list[5:])
# elements beginning to end
print(my_list[:])
| [
"noreply@github.com"
] | papan36125.noreply@github.com |
a085709fcdbdec5df615f02c6aaf7c0065cac542 | cb3bce599e657188c30366adb0af3007ff9b8f96 | /src/network/ex32-1.py | 21c0c7b5953a8427ef1ae7759e30900f4bd4c1c3 | [] | no_license | skk4/python_study | 534339e6c378d686c29af6d81429c472fca19d6d | 4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987 | refs/heads/master | 2021-01-01T04:36:52.037184 | 2017-12-08T01:04:27 | 2017-12-08T01:04:27 | 97,207,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | '''
Created on 2017.7.19
@author: Administrator
'''
a_dict = {'s':'1', 'x':'2'}
b_dict = {'s':'2', 'x':'4'}
print | [
"skk_4@163.com"
] | skk_4@163.com |
521e0942d4a805129e2348001d434618159ac9d1 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Sklearn_scipy_numpy/source/scipy/weave/tests/test_c_spec.py | 278650de00e217afb3beacef411730d2a8e9aad4 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 18,927 | py | from __future__ import absolute_import, print_function
import os
import sys
import tempfile
import string
import time
from numpy.testing import TestCase, assert_, run_module_suite
from scipy.weave import inline_tools, ext_tools, c_spec
from scipy.weave.build_tools import msvc_exists, gcc_exists
from scipy.weave.catalog import unique_file
from weave_test_utils import debug_print, dec
def unique_mod(d,file_name):
f = os.path.basename(unique_file(d,file_name))
m = os.path.splitext(f)[0]
return m
class IntConverter(TestCase):
compiler = ''
@dec.slow
def test_type_match_string(self):
s = c_spec.int_converter()
assert_(not s.type_match('string'))
@dec.slow
def test_type_match_int(self):
s = c_spec.int_converter()
assert_(s.type_match(5))
@dec.slow
def test_type_match_float(self):
s = c_spec.int_converter()
assert_(not s.type_match(5.))
@dec.slow
def test_type_match_complex(self):
s = c_spec.int_converter()
assert_(not s.type_match(5.+1j))
@dec.slow
def test_var_in(self):
mod_name = 'int_var_in' + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1
code = "a=2;"
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'abc'
test(b)
except TypeError:
pass
@dec.slow
def test_int_return(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1
code = """
a=a+2;
return_val = PyInt_FromLong(a);
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1
c = test(b)
assert_(c == 3)
class FloatConverter(TestCase):
compiler = ''
@dec.slow
def test_type_match_string(self):
s = c_spec.float_converter()
assert_(not s.type_match('string'))
@dec.slow
def test_type_match_int(self):
s = c_spec.float_converter()
assert_(not s.type_match(5))
@dec.slow
def test_type_match_float(self):
s = c_spec.float_converter()
assert_(s.type_match(5.))
@dec.slow
def test_type_match_complex(self):
s = c_spec.float_converter()
assert_(not s.type_match(5.+1j))
@dec.slow
def test_float_var_in(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1.
code = "a=2.;"
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1.
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'abc'
test(b)
except TypeError:
pass
@dec.slow
def test_float_return(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1.
code = """
a=a+2.;
return_val = PyFloat_FromDouble(a);
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1.
c = test(b)
assert_(c == 3.)
class ComplexConverter(TestCase):
compiler = ''
@dec.slow
def test_type_match_string(self):
s = c_spec.complex_converter()
assert_(not s.type_match('string'))
@dec.slow
def test_type_match_int(self):
s = c_spec.complex_converter()
assert_(not s.type_match(5))
@dec.slow
def test_type_match_float(self):
s = c_spec.complex_converter()
assert_(not s.type_match(5.))
@dec.slow
def test_type_match_complex(self):
s = c_spec.complex_converter()
assert_(s.type_match(5.+1j))
@dec.slow
def test_complex_var_in(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1.+1j
code = "a=std::complex<double>(2.,2.);"
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1.+1j
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'abc'
test(b)
except TypeError:
pass
@dec.slow
def test_complex_return(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1.+1j
code = """
a= a + std::complex<double>(2.,2.);
return_val = PyComplex_FromDoubles(a.real(),a.imag());
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1.+1j
c = test(b)
assert_(c == 3.+3j)
#----------------------------------------------------------------------------
# File conversion tests
#----------------------------------------------------------------------------
class FileConverter(TestCase):
compiler = ''
@dec.slow
def test_py_to_file(self):
file_name = os.path.join(test_dir, "testfile")
file = open(file_name,'w')
code = """
fprintf(file,"hello bob");
"""
inline_tools.inline(code,['file'],compiler=self.compiler,force=1)
file.close()
file = open(file_name,'r')
assert_(file.read() == "hello bob")
@dec.slow
def test_file_to_py(self):
file_name = os.path.join(test_dir, "testfile")
# not sure I like Py::String as default -- might move to std::sting
# or just plain char*
code = """
const char* _file_name = file_name.c_str();
FILE* file = fopen(_file_name, "w");
return_val = file_to_py(file, _file_name, "w");
"""
file = inline_tools.inline(code,['file_name'], compiler=self.compiler,
force=1)
file.write("hello fred")
file.close()
file = open(file_name,'r')
assert_(file.read() == "hello fred")
#----------------------------------------------------------------------------
# Instance conversion tests
#----------------------------------------------------------------------------
class InstanceConverter(TestCase):
pass
#----------------------------------------------------------------------------
# Callable object conversion tests
#----------------------------------------------------------------------------
class CallableConverter(TestCase):
compiler = ''
@dec.slow
def test_call_function(self):
func = string.find
search_str = "hello world hello"
sub_str = "world"
# * Not sure about ref counts on search_str and sub_str.
# * Is the Py::String necessary? (it works anyways...)
code = """
py::tuple args(2);
args[0] = search_str;
args[1] = sub_str;
return_val = func.call(args);
"""
actual = inline_tools.inline(code,['func','search_str','sub_str'],
compiler=self.compiler,force=1)
desired = func(search_str,sub_str)
assert_(desired == actual)
class SequenceConverter(TestCase):
compiler = ''
@dec.slow
def test_convert_to_dict(self):
d = {}
inline_tools.inline("",['d'],compiler=self.compiler,force=1)
@dec.slow
def test_convert_to_list(self):
l = []
inline_tools.inline("",['l'],compiler=self.compiler,force=1)
@dec.slow
def test_convert_to_string(self):
s = 'hello'
inline_tools.inline("",['s'],compiler=self.compiler,force=1)
@dec.slow
def test_convert_to_tuple(self):
t = ()
inline_tools.inline("",['t'],compiler=self.compiler,force=1)
class StringConverter(TestCase):
compiler = ''
@dec.slow
def test_type_match_string(self):
s = c_spec.string_converter()
assert_(s.type_match('string'))
@dec.slow
def test_type_match_int(self):
s = c_spec.string_converter()
assert_(not s.type_match(5))
@dec.slow
def test_type_match_float(self):
s = c_spec.string_converter()
assert_(not s.type_match(5.))
@dec.slow
def test_type_match_complex(self):
s = c_spec.string_converter()
assert_(not s.type_match(5.+1j))
@dec.slow
def test_var_in(self):
mod_name = 'string_var_in'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 'string'
code = 'a=std::string("hello");'
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 'bub'
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 1
test(b)
except TypeError:
pass
@dec.slow
def test_return(self):
mod_name = 'string_return'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 'string'
code = """
a= std::string("hello");
return_val = PyString_FromString(a.c_str());
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 'bub'
c = test(b)
assert_(c == 'hello')
class ListConverter(TestCase):
compiler = ''
@dec.slow
def test_type_match_bad(self):
s = c_spec.list_converter()
objs = [{},(),'',1,1.,1+1j]
for i in objs:
assert_(not s.type_match(i))
@dec.slow
def test_type_match_good(self):
s = c_spec.list_converter()
assert_(s.type_match([]))
@dec.slow
def test_var_in(self):
mod_name = 'list_var_in'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = [1]
code = 'a=py::list();'
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = [1,2]
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'string'
test(b)
except TypeError:
pass
@dec.slow
def test_return(self):
mod_name = 'list_return'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = [1]
code = """
a=py::list();
a.append("hello");
return_val = a;
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = [1,2]
c = test(b)
assert_(c == ['hello'])
@dec.slow
def test_speed(self):
mod_name = 'list_speed'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = range(1000000)
code = """
int v, sum = 0;
for(int i = 0; i < a.len(); i++)
{
v = a[i];
if (v % 2)
sum += v;
else
sum -= v;
}
return_val = sum;
"""
with_cxx = ext_tools.ext_function('with_cxx',code,['a'])
mod.add_function(with_cxx)
code = """
int vv, sum = 0;
PyObject *v;
for(int i = 0; i < a.len(); i++)
{
v = PyList_GetItem(py_a,i);
//didn't set error here -- just speed test
vv = py_to_int(v,"list item");
if (vv % 2)
sum += vv;
else
sum -= vv;
}
return_val = sum;
"""
no_checking = ext_tools.ext_function('no_checking',code,['a'])
mod.add_function(no_checking)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import with_cxx, no_checking')
t1 = time.time()
sum1 = with_cxx(a)
t2 = time.time()
debug_print('speed test for list access')
debug_print('compiler:', self.compiler)
debug_print('scxx:', t2 - t1)
t1 = time.time()
sum2 = no_checking(a)
t2 = time.time()
debug_print('C, no checking:', t2 - t1)
sum3 = 0
t1 = time.time()
for i in a:
if i % 2:
sum3 += i
else:
sum3 -= i
t2 = time.time()
debug_print('python:', t2 - t1)
assert_(sum1 == sum2 and sum1 == sum3)
class TupleConverter(TestCase):
compiler = ''
@dec.slow
def test_type_match_bad(self):
s = c_spec.tuple_converter()
objs = [{},[],'',1,1.,1+1j]
for i in objs:
assert_(not s.type_match(i))
@dec.slow
def test_type_match_good(self):
s = c_spec.tuple_converter()
assert_(s.type_match((1,)))
@dec.slow
def test_var_in(self):
mod_name = 'tuple_var_in'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = (1,)
code = 'a=py::tuple();'
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = (1,2)
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'string'
test(b)
except TypeError:
pass
@dec.slow
def test_return(self):
mod_name = 'tuple_return'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = (1,)
code = """
a=py::tuple(2);
a[0] = "hello";
a.set_item(1,py::None);
return_val = a;
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = (1,2)
c = test(b)
assert_(c == ('hello',None))
class DictConverter(TestCase):
"""Base Class for dictionary conversion tests."""
# Default string specifying the compiler to use. While this is set
# in all sub-classes, this base test class is found by the test
# infrastructure and run. Therefore, we give it a default value
# so that it can run on its own.
compiler = ''
@dec.slow
def test_type_match_bad(self):
s = c_spec.dict_converter()
objs = [[],(),'',1,1.,1+1j]
for i in objs:
assert_(not s.type_match(i))
@dec.slow
def test_type_match_good(self):
s = c_spec.dict_converter()
assert_(s.type_match({}))
@dec.slow
def test_var_in(self):
mod_name = 'dict_var_in'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = {'z':1}
code = 'a=py::dict();' # This just checks to make sure the type is correct
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = {'y':2}
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'string'
test(b)
except TypeError:
pass
@dec.slow
def test_return(self):
mod_name = 'dict_return'+self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = {'z':1}
code = """
a=py::dict();
a["hello"] = 5;
return_val = a;
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = {'z':2}
c = test(b)
assert_(c['hello'] == 5)
for _n in dir():
if _n[-9:] == 'Converter':
if msvc_exists():
exec("class Test%sMsvc(%s):\n compiler = 'msvc'" % (_n,_n))
else:
exec("class Test%sUnix(%s):\n compiler = ''" % (_n,_n))
if gcc_exists():
exec("class Test%sGcc(%s):\n compiler = 'gcc'" % (_n,_n))
def setup_location():
test_dir = tempfile.mkdtemp()
sys.path.insert(0,test_dir)
return test_dir
test_dir = None
def setUpModule():
global test_dir
test_dir = setup_location()
def tearDownModule():
import shutil
if test_dir is not None:
shutil.rmtree(test_dir)
if __name__ == "__main__":
run_module_suite()
| [
"master@MacBook-Pro-admin.local"
] | master@MacBook-Pro-admin.local |
cbd21866c009354fdebf87e289855b910528d4bc | a5da45e771fb57b4785bd19d46261c7c6488040d | /src/rv/utils/batch.py | 9c97506bd94043348b15da280c4b57ea5e3da67d | [
"Apache-2.0"
] | permissive | bw4sz/raster-vision | 6f6888f9dbb1277f9c4e40c3dfb53d5e79b3a4e8 | 02465c49ad64f7efebc4772b716a08b23cd438bf | refs/heads/develop | 2021-05-14T00:19:25.786863 | 2018-01-14T23:59:30 | 2018-01-14T23:59:30 | 116,536,741 | 0 | 0 | null | 2018-01-14T23:59:31 | 2018-01-07T04:17:39 | Python | UTF-8 | Python | false | false | 1,479 | py | from os import environ
import click
import boto3
s3_bucket = environ.get('S3_BUCKET')
def _batch_submit(branch_name, command, attempts=3, cpu=False):
"""
Submit a job to run on Batch.
Args:
branch_name: Branch with code to run on Batch
command: Command in quotes to run on Batch
"""
full_command = ['run_script.sh', branch_name]
full_command.extend(command.split())
client = boto3.client('batch')
job_queue = 'raster-vision-cpu' if cpu else \
'raster-vision-gpu'
job_definition = 'raster-vision-cpu' if cpu else \
'raster-vision-gpu'
job_name = command.replace('/', '-').replace('.', '-')
job_name = 'batch_submit'
job_id = client.submit_job(
jobName=job_name,
jobQueue=job_queue,
jobDefinition=job_definition,
containerOverrides={
'command': full_command
},
retryStrategy={
'attempts': attempts
})['jobId']
click.echo(
'Submitted job with jobName={} and jobId={}'.format(job_name, job_id))
@click.command()
@click.argument('branch_name')
@click.argument('command')
@click.option('--attempts', default=3, help='Number of times to retry job')
@click.option('--cpu', is_flag=True, help='Use CPU EC2 instances')
def batch_submit(branch_name, command, attempts, cpu):
_batch_submit(branch_name, command, attempts=attempts, cpu=cpu)
if __name__ == '__main__':
batch_submit()
| [
"lewfish@gmail.com"
] | lewfish@gmail.com |
d466f070c3427581d6259d57f853f56ec3df69ab | edebd47922910982979c19f276f202ffccdd3ee3 | /test/lit.cfg | 97912f426659bf1dac4a61700bfd5fcbe076776d | [] | no_license | cadets/watchman | a6bbcb6bf88db90d81c000387194dc03c98033ea | daeb8d1c7f93f427aa1bb5598971f64646103470 | refs/heads/master | 2021-01-10T12:55:58.186096 | 2016-04-01T18:28:49 | 2016-04-01T18:28:49 | 55,184,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | cfg | # vim:syntax=python
import lit
import os
import sys
#
# Basic information about this test suite.
#
config.name = 'WATCHMAN'
config.suffixes = [ '.c', '.cpp', '.ll' ]
config.excludes = [ 'Inputs' ]
config.test_format = lit.formats.ShTest()
params = lit_config.params
#
# Useful environment variables.
#
# This variables are optional when WATCHMAN is installed to a standard location;
# if not, failure to set these variables will cause tests to fail building.
#
source_dir = os.getenv('WATCHMAN_SOURCE_DIR')
if not source_dir:
if not 'source_dir' in params:
raise Exception, ('Unable to find WATCHMAN source directory;' +
' set WATCHMAN_SOURCE_DIR or pass --source_dir to lit')
source_dir = params['source_dir']
build_dir = os.getenv('WATCHMAN_BUILD_DIR')
if not build_dir:
if not 'build_dir' in params:
raise Exception, ('Unable to find WATCHMAN build directory;' +
' set WATCHMAN_BUILD_DIR or pass --build_dir to lit')
build_dir = params['build_dir']
include_dirs = [ build_dir + '/include' ]
extra_cflags = [ '-g' ] # always build tests with debug symbols
extra_cxxflags = [ '-g' ]
libdirs = []
extra_libs = []
if 'extra_include_dirs' in params:
include_dirs += params['extra_include_dirs'].split(os.path.pathsep)
if 'extra_cflags' in params:
extra_cflags += params['extra_cflags'].split(os.path.pathsep)
if 'extra_cxxflags' in params:
extra_cxxflags += params['extra_cxxflags'].split(os.path.pathsep)
if 'extra_libdirs' in params:
libdirs += params['extra_libdirs'].split(os.path.pathsep)
if 'extra_libs' in params:
extra_libs += params['extra_libs'].split(os.path.pathsep)
if 'output_dir' in params:
config.test_exec_root = params['output_dir']
#
# Find the 'test_support' module (which may not be in the current PYTHONPATH).
#
sys.path.append(os.curdir)
if source_dir: sys.path.append(os.path.join(source_dir, 'test'))
try:
import test_support as test
except ImportError, e:
print "Unable to find 'test_support' module!"
print "Try setting WATCHMAN_SOURCE_DIR?"
sys.exit(1)
#
# Find LLVM tools (e.g. FileCheck).
#
llvm_obj_root = test.llvm_config['obj-root']
llvm_tools = os.path.join(llvm_obj_root, 'bin')
#
# Find WATCHMAN includes and libraries.
#
for (header, subdir) in [
('watchman.h', 'include'),
('watchman_internal.h', 'lib'),
]:
include_dirs.append(
test.find_include_dir(header, [ '%s/%s' % (source_dir, subdir) ],
'Try setting WATCHMAN_SOURCE_DIR'))
library_dir = test.find_libdir('libwatchman.*',
[ '%s/lib' % d for d in [ os.getcwd(), build_dir ] ],
'Try setting WATCHMAN_BUILD_DIR')
libdirs.append(library_dir)
#
# Set tools paths, CFLAGS, LDFLAGS, PATH, etc.
#
def suffixize(name):
"""
Expand a name to a list with many suffix variations.
This is used to accommodate the naming scheme used by FreeBSD to
install multiple versions of LLVM/Clang side-by-side.
"""
return [ name + suffix for suffix in [ '-devel', '38', '37', '' ] ]
clang = test.which(suffixize('clang'))
clangpp = test.which(suffixize('clang++'))
config.substitutions += [
# Tools:
('%clang', clang),
('%filecheck', test.which(suffixize('FileCheck'))),
# Flags:
('%cflags', test.cflags(include_dirs + [ '%p/Inputs' ],
extra = extra_cflags)),
('%cxxflags', test.cflags(include_dirs + [ '%p/Inputs' ],
extra = extra_cxxflags)),
('%ldflags', test.ldflags(libdirs, [ 'watchman' ], extra_libs)),
('%cpp_out', test.cpp_out()),
]
config.environment['PATH'] = os.path.pathsep.join([
llvm_tools,
os.path.join(source_dir, 'scripts'),
config.environment['PATH']
])
config.environment['LD_LIBRARY_PATH'] = library_dir
config.environment['WATCHMAN_BUILD_DIR'] = build_dir
config.environment['WATCHMAN_SOURCE_DIR'] = source_dir
config.environment['WATCHMAN_DEBUG'] = '*'
config.environment['CC'] = clang
config.environment['CXX'] = clangpp
| [
"jonathan.anderson@ieee.org"
] | jonathan.anderson@ieee.org |
0543881f2afd1d173d833918d01e03824189afec | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/docker/plugins/modules/docker_container.py | 033b5c72070540685e0da70e5b9c9f8771168ef8 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 154,384 | py | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
notes:
- For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
prevent this.
- If the module needs to recreate the container, it will only use the options provided to the module to create the
new container (except I(image)). Therefore, always specify *all* options relevant to the container.
- When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
Please note that several options have default values; if the container to be restarted uses different values for
these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
community.docker 2.0.0 on.
options:
auto_remove:
description:
- Enable auto-removal of the container on daemon side when the container's process exits.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
type: int
capabilities:
description:
- List of capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
elements: str
cgroup_parent:
description:
- Specify the parent cgroup for the container.
type: str
version_added: 1.1.0
cleanup:
description:
- Use with I(detach=false) to remove the container after successful execution.
type: bool
default: no
command:
description:
- Command to execute when the container starts. A command may be either a string or a list.
- Prior to version 2.4, strings were split on commas.
type: raw
comparisons:
description:
- Allows to specify how properties of existing containers are compared with
module options to decide whether the container should be recreated / updated
or not.
- Only options which correspond to the state of a container as handled by the
Docker daemon can be specified, as well as C(networks).
- Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
and C(allow_more_present).
- If C(strict) is specified, values are tested for equality, and changes always
result in updating or restarting. If C(ignore) is specified, changes are ignored.
- C(allow_more_present) is allowed only for lists, sets and dicts. If it is
specified for lists or sets, the container will only be updated or restarted if
the module option contains a value which is not present in the container's
options. If the option is specified for a dict, the container will only be updated
or restarted if the module option contains a key which isn't present in the
container's option, or if the value of a key present differs.
- The wildcard option C(*) can be used to set one of the default values C(strict)
or C(ignore) to *all* comparisons which are not explicitly set to other values.
- See the examples for details.
type: dict
container_default_behavior:
description:
- Various module options used to have default values. This causes problems with
containers which use different values for these options.
- The default value is C(compatibility), which will ensure that the default values
are used when the values are not explicitly specified by the user.
- From community.docker 2.0.0 on, the default value will switch to C(no_defaults). To avoid
deprecation warnings, please set I(container_default_behavior) to an explicit
value.
- This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
I(paused), I(privileged), I(read_only) and I(tty) options.
type: str
choices:
- compatibility
- no_defaults
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period.
- See I(cpus) for an easier to use alternative.
type: int
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota.
- See I(cpus) for an easier to use alternative.
type: int
cpus:
description:
- Specify how much of the available CPU resources a container can use.
- A value of C(1.5) means that at most one and a half CPU (core) will be used.
type: float
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
type: str
cpu_shares:
description:
- CPU shares (relative weight).
type: int
default_host_ip:
description:
- Define the default host IP to use.
- Must be an empty string, an IPv4 address, or an IPv6 address.
- With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the
port bindings without an explicit IP address to only bind to IPv4.
See U(https://github.com/ansible-collections/community.docker/issues/70) for details.
- By default, the module will try to auto-detect this value from the C(bridge) network's
C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it
will fall back to C(0.0.0.0).
type: str
version_added: 1.2.0
detach:
description:
- Enable detached mode to leave the container running in background.
- If disabled, the task will reflect the status of the container run (failed if the command failed).
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(yes).
type: bool
devices:
description:
- List of host device bindings to add to the container.
- "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
type: list
elements: str
device_read_bps:
description:
- "List of device path and read rate (bytes per second) from device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit in format C(<number>[<unit>])."
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- "Omitting the unit defaults to bytes."
type: str
required: yes
device_write_bps:
description:
- "List of device and write rate (bytes per second) to device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit in format C(<number>[<unit>])."
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- "Omitting the unit defaults to bytes."
type: str
required: yes
device_read_iops:
description:
- "List of device and read rate (IO per second) from device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit."
- "Must be a positive integer."
type: int
required: yes
device_write_iops:
description:
- "List of device and write rate (IO per second) to device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit."
- "Must be a positive integer."
type: int
required: yes
device_requests:
description:
- Allows to request additional resources, such as GPUs.
type: list
elements: dict
suboptions:
capabilities:
description:
- List of lists of strings to request capabilities.
- The top-level list entries are combined by OR, and for every list entry,
the entries in the list it contains are combined by AND.
- The driver tries to satisfy one of the sub-lists.
- Available capabilities for the C(nvidia) driver can be found at
U(https://github.com/NVIDIA/nvidia-container-runtime).
type: list
elements: list
count:
description:
- Number or devices to request.
- Set to C(-1) to request all available devices.
type: int
device_ids:
description:
- List of device IDs.
type: list
elements: str
driver:
description:
- Which driver to use for this device.
type: str
options:
description:
- Driver-specific options.
type: dict
version_added: 0.1.0
dns_opts:
description:
- List of DNS options.
type: list
elements: str
dns_servers:
description:
- List of custom DNS servers.
type: list
elements: str
dns_search_domains:
description:
- List of custom DNS search domains.
type: list
elements: str
domainname:
description:
- Container domainname.
type: str
env:
description:
- Dictionary of key,value pairs.
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
type: dict
env_file:
description:
- Path to a file, present on the target, containing environment variables I(FOO=BAR).
- If variable also present in I(env), then the I(env) value will override.
type: path
entrypoint:
description:
- Command that overwrites the default C(ENTRYPOINT) of the image.
type: list
elements: str
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's C(/etc/hosts) file.
type: dict
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
- If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
need to be exposed again.
type: list
elements: str
aliases:
- exposed
- expose
force_kill:
description:
- Use the kill command when stopping a running container.
type: bool
default: no
aliases:
- forcekill
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
type: list
elements: str
healthcheck:
description:
- Configure a check that is run to determine whether or not containers for this service are "healthy".
- "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
for details on how healthchecks work."
- "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
type: dict
suboptions:
test:
description:
- Command to run to check health.
- Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
type: raw
interval:
description:
- Time between running the check.
- The default used by the Docker daemon is C(30s).
type: str
timeout:
description:
- Maximum time to allow one check to run.
- The default used by the Docker daemon is C(30s).
type: str
retries:
description:
- Consecutive number of failures needed to report unhealthy.
- The default used by the Docker daemon is C(3).
type: int
start_period:
description:
- Start period for the container to initialize before starting health-retries countdown.
- The default used by the Docker daemon is C(0s).
type: str
hostname:
description:
- The container's hostname.
type: str
ignore_image:
description:
- When I(state) is C(present) or C(started), the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If the image
version in the registry does not match the container, the container will be recreated. You can
stop this behavior by setting I(ignore_image) to C(True).
- "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
I(comparisons) option."
type: bool
default: no
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to be available locally.
The I(pull) option is ignored for this case.
type: str
init:
description:
- Run an init inside the container that forwards signals and reaps processes.
- This option requires Docker API >= 1.25.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
ipc_mode:
description:
- Set the IPC mode for the container.
- Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
the host's IPC namespace within the container.
type: str
keep_volumes:
description:
- Retain anonymous volumes associated with a removed container.
type: bool
default: yes
kill_signal:
description:
- Override default signal used to kill a running container.
type: str
kernel_memory:
description:
- "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
- Omitting the unit defaults to bytes.
type: str
labels:
description:
- Dictionary of key value pairs.
type: dict
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias).
- Setting this will force container to be restarted.
type: list
elements: str
log_driver:
description:
- Specify the logging driver. Docker uses C(json-file) by default.
- See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
type: str
log_options:
description:
- Dictionary of options specific to the chosen I(log_driver).
- See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
type: dict
aliases:
- log_opt
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33).
type: str
memory:
description:
- "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C("0").
type: str
memory_reservation:
description:
- "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
memory_swap:
description:
- "Total memory limit (memory + swap) in format C(<number>[<unit>]).
Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
- If not set, the value will be remain the same if container exists and will be inherited
from the host machine if it is (re-)created.
type: int
mounts:
type: list
elements: dict
description:
- Specification for mounts to be added to the container. More powerful alternative to I(volumes).
suboptions:
target:
description:
- Path inside the container.
type: str
required: true
source:
description:
- Mount source (e.g. a volume name or a host path).
type: str
type:
description:
- The mount type.
- Note that C(npipe) is only supported by Docker for Windows.
type: str
choices:
- bind
- npipe
- tmpfs
- volume
default: volume
read_only:
description:
- Whether the mount should be read-only.
type: bool
consistency:
description:
- The consistency requirement for the mount.
type: str
choices:
- cached
- consistent
- default
- delegated
propagation:
description:
- Propagation mode. Only valid for the C(bind) type.
type: str
choices:
- private
- rprivate
- shared
- rshared
- slave
- rslave
no_copy:
description:
- False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
- The default value is C(false).
type: bool
labels:
description:
- User-defined name and labels for the volume. Only valid for the C(volume) type.
type: dict
volume_driver:
description:
- Specify the volume driver. Only valid for the C(volume) type.
- See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
type: str
volume_options:
description:
- Dictionary of options specific to the chosen volume_driver. See
L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
type: dict
tmpfs_size:
description:
- "The size for the tmpfs mount in bytes in format <number>[<unit>]."
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- "Omitting the unit defaults to bytes."
type: str
tmpfs_mode:
description:
- The permission mode for the tmpfs mount.
type: str
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
type: str
required: yes
network_mode:
description:
- Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
- "*Note* that from community.docker 2.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
I(network_mode) is not specified."
type: str
userns_mode:
description:
- Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
type: str
networks:
description:
- List of networks the container belongs to.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the I(purge_networks) option.
- If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified.
This is different from the behavior of C(docker run ...). You need to explicitly use I(purge_networks) to enforce
the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case.
type: list
elements: dict
suboptions:
name:
description:
- The network's name.
type: str
required: yes
ipv4_address:
description:
- The container's IPv4 address in this network.
type: str
ipv6_address:
description:
- The container's IPv6 address in this network.
type: str
links:
description:
- A list of containers to link to.
type: list
elements: str
aliases:
description:
- List of aliases for this container in this network. These names
can be used in the network to reach this container.
type: list
elements: str
networks_cli_compatible:
description:
- "If I(networks_cli_compatible) is set to C(yes) (default), this module will behave as
C(docker run --network) and will *not* add the default network if I(networks) is
specified. If I(networks) is not specified, the default network will be attached."
- "When I(networks_cli_compatible) is set to C(no) and networks are provided to the module
via the I(networks) option, the module behaves differently than C(docker run --network):
C(docker run --network other) will create a container with network C(other) attached,
but the default network not attached. This module with I(networks: {name: other}) will
create a container with both C(default) and C(other) attached. If I(purge_networks) is
set to C(yes), the C(default) network will be removed afterwards."
- "*Note* that docker CLI also sets I(network_mode) to the name of the first network
added if C(--network) is specified. For more compatibility with docker CLI, you
explicitly have to set I(network_mode) to the name of the first network you're
adding. This behavior will change for community.docker 2.0.0: then I(network_mode) will
automatically be set to the first network name in I(networks) if I(network_mode)
is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
is C(true)."
type: bool
default: true
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
type: bool
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune
OOM killer preferences.
type: int
output_logs:
description:
- If set to true, output of the container command will be printed.
- Only effective when I(log_driver) is set to C(json-file) or C(journald).
type: bool
default: no
paused:
description:
- Use with the started state to pause running processes inside the container.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
pid_mode:
description:
- Set the PID namespace mode for the container.
- Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
Docker SDK for Python (docker) allow all values supported by the Docker daemon.
type: str
pids_limit:
description:
- Set PIDs limit for the container. It accepts an integer value.
- Set C(-1) for unlimited PIDs.
type: int
privileged:
description:
- Give extended privileges to the container.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Port ranges can be used for source and destination ports. If two ranges with
different lengths are specified, the shorter range will be used.
Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
to the first port of the destination range, but to a free port in that range. This is the
same behavior as for C(docker) command line utility.
- "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
to resolve hostnames."
- A value of C(all) will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If I(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
value encountered in the list of I(networks) is the one that will be used.
type: list
elements: str
aliases:
- ports
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image
when missing.
- "*Note:* images are only pulled when specified by name. If the image is specified
as a image ID (hash), it cannot be pulled."
type: bool
default: no
purge_networks:
description:
- Remove the container from ALL networks not included in I(networks) parameter.
- Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
type: bool
default: no
read_only:
description:
- Mount the container's root file system as read-only.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
type: bool
default: no
removal_wait_timeout:
description:
- When removing an existing container, the docker daemon API call exists after the container
is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
load, removal can take longer. By default, the module will wait until the container has been
removed before trying to (re-)create it, however long this takes.
- By setting this option, the module will wait at most this many seconds for the container to be
removed. If the container is still in the removal phase after this many seconds, the module will
fail.
type: float
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
type: bool
default: no
restart_policy:
description:
- Container restart policy.
- Place quotes around C(no) option.
type: str
choices:
- 'no'
- 'on-failure'
- 'always'
- 'unless-stopped'
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
type: int
runtime:
description:
- Runtime to use for the container.
type: str
shm_size:
description:
- "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
type: str
security_opts:
description:
- List of security options in the form of C("label:user:User").
type: list
elements: str
state:
description:
- 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
- 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config.'
- 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
state. Use I(restart) to force a matching container to be stopped and restarted.'
- 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
state.'
- To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
image version will be taken into account, you can also use the I(ignore_image) option.
- Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
- If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
- Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
type: str
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
type: str
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending C(SIGKILL).
When the container is created by this module, its C(StopTimeout) configuration
will be set to this value.
- When the container is stopped, will be used as a timeout for stopping the
container. In case the container has a custom C(StopTimeout) configuration,
the behavior depends on the version of the docker daemon. New versions of
the docker daemon will always use the container's configured C(StopTimeout)
value if it has been configured.
type: int
tmpfs:
description:
- Mount a tmpfs directory.
type: list
elements: str
tty:
description:
- Allocate a pseudo-TTY.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
type: list
elements: str
sysctls:
description:
- Dictionary of key,value pairs.
type: dict
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
type: str
uts:
description:
- Set the UTS namespace mode for the container.
type: str
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
- "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
C(z), and C(Z)."
type: list
elements: str
volume_driver:
description:
- The container volume driver.
type: str
volumes_from:
description:
- List of container names or IDs to get volumes from.
type: list
elements: str
working_dir:
description:
- Path to the working directory.
type: str
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_1_documentation
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "Chris Houseknecht (@chouseknecht)"
- "Kassian Sun (@kassiansun)"
- "Felix Fontein (@felixfontein)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
community.docker.docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
community.docker.docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
community.docker.docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
# Publish container port 9000 as host port 8080
- "8080:9000"
# Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
- "127.0.0.1:8081:9001/udp"
# Publish container port 9002 as a random host port
- "9002"
# Publish container port 9003 as a free host port in range 8000-8100
# (the host port will be selected by the Docker daemon)
- "8000-8100:9003"
# Publish container ports 9010-9020 to host ports 7000-7010
- "7000-7010:9010-9020"
env:
SECRET_KEY: "ssssh"
# Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
BOOLEAN_KEY: "yes"
- name: Container present
community.docker.docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
community.docker.docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
community.docker.docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: Remove container
community.docker.docker_container:
name: ohno
state: absent
- name: Syslogging output
community.docker.docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
community.docker.docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
community.docker.docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
community.docker.docker_container:
name: sleepy
image: ubuntu:14.04
command: ["sleep", "infinity"]
- name: Add container to networks
community.docker.docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
community.docker.docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
community.docker.docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
community.docker.docker_container:
name: sleepy
purge_networks: yes
- name: Start a container and use an env file
community.docker.docker_container:
name: agent
image: jenkinsci/ssh-slave
env_file: /var/tmp/jenkins/agent.env
- name: Create a container with limited capabilities
community.docker.docker_container:
name: sleepy
image: ubuntu:16.04
command: sleep infinity
capabilities:
- sys_time
cap_drop:
- all
- name: Finer container restart/update control
community.docker.docker_container:
name: test
image: ubuntu:18.04
env:
arg1: "true"
arg2: "whatever"
volumes:
- /tmp:/tmp
comparisons:
image: ignore # don't restart containers with older versions of the image
env: strict # we want precisely this environment
volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
- name: Finer container restart/update control II
community.docker.docker_container:
name: test
image: ubuntu:18.04
env:
arg1: "true"
arg2: "whatever"
comparisons:
'*': ignore # by default, ignore *all* options (including image)
env: strict # except for environment variables; there, we want to be strict
- name: Start container with healthstatus
community.docker.docker_container:
name: nginx-proxy
image: nginx:1.13
state: started
healthcheck:
# Check if nginx server is healthy by curl'ing the server.
# If this fails or timeouts, the healthcheck fails.
test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
interval: 1m30s
timeout: 10s
retries: 3
start_period: 30s
- name: Remove healthcheck from container
community.docker.docker_container:
name: nginx-proxy
image: nginx:1.13
state: started
healthcheck:
# The "NONE" check needs to be specified
test: ["NONE"]
- name: Start container with block device read limit
community.docker.docker_container:
name: test
image: ubuntu:18.04
state: started
device_read_bps:
# Limit read rate for /dev/sda to 20 mebibytes per second
- path: /dev/sda
rate: 20M
device_read_iops:
# Limit read rate for /dev/sdb to 300 IO per second
- path: /dev/sdb
rate: 300
- name: Start container with GPUs
community.docker.docker_container:
name: test
image: ubuntu:18.04
state: started
device_requests:
- # Add some specific devices to this container
device_ids:
- '0'
- 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
- # Add nVidia GPUs to this container
driver: nvidia
count: -1 # this means we want all
capabilities:
# We have one OR condition: 'gpu' AND 'utility'
- - gpu
- utility
# See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
# for a list of capabilities supported by the nvidia driver
'''
RETURN = '''
container:
description:
- Facts representing the current state of the container. Matches the docker inspection output.
- Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
conflicts with the connection plugin.
- Empty if I(state) is C(absent)
- If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
status:
description:
- In case a container is started without detaching, this contains the exit code of the process in the container.
- Before community.docker 1.1.0, this was only returned when non-zero.
returned: when I(state) is C(started) and I(detached) is C(false), and when waiting for the container result did not fail
type: int
sample: 0
'''
import os
import re
import shlex
import traceback
from distutils.version import LooseVersion
from time import sleep
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native, to_text
from ansible_collections.community.docker.plugins.module_utils.common import (
AnsibleDockerClient,
DifferenceTracker,
DockerBaseClass,
compare_generic,
is_image_name_id,
sanitize_result,
clean_dict_booleans_for_docker_api,
omit_none_from_dict,
parse_healthcheck,
DOCKER_COMMON_ARGS,
RequestException,
)
try:
from docker import utils
from ansible_collections.community.docker.plugins.module_utils.common import docker_version
if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
from docker.types import Ulimit, LogConfig
from docker import types as docker_types
else:
from docker.utils.types import Ulimit, LogConfig
from docker.errors import DockerException, APIError, NotFound
except Exception:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
REQUIRES_CONVERSION_TO_BYTES = [
'kernel_memory',
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
def is_volume_permissions(mode):
for part in mode.split(','):
if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
return False
return True
def parse_port_range(range_or_port, client):
'''
Parses a string containing either a single port or a range of ports.
Returns a list of integers for each port in the list.
'''
if '-' in range_or_port:
try:
start, end = [int(port) for port in range_or_port.split('-')]
except Exception:
client.fail('Invalid port range: "{0}"'.format(range_or_port))
if end < start:
client.fail('Invalid port range: "{0}"'.format(range_or_port))
return list(range(start, end + 1))
else:
try:
return [int(range_or_port)]
except Exception:
client.fail('Invalid port: "{0}"'.format(range_or_port))
def split_colon_ipv6(text, client):
'''
Split string by ':', while keeping IPv6 addresses in square brackets in one component.
'''
if '[' not in text:
return text.split(':')
start = 0
result = []
while start < len(text):
i = text.find('[', start)
if i < 0:
result.extend(text[start:].split(':'))
break
j = text.find(']', i)
if j < 0:
client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
result.extend(text[start:i].split(':'))
k = text.find(':', j)
if k < 0:
result[-1] += text[i:]
start = len(text)
else:
result[-1] += text[i:k]
if k == len(text):
result.append('')
break
start = k + 1
return result
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.auto_remove = None
self.blkio_weight = None
self.capabilities = None
self.cap_drop = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpus = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.debug = None
self.default_host_ip = None
self.detach = None
self.devices = None
self.device_read_bps = None
self.device_write_bps = None
self.device_read_iops = None
self.device_write_iops = None
self.device_requests = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.domainname = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.healthcheck = None
self.hostname = None
self.ignore_image = None
self.image = None
self.init = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.output_logs = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.mounts = None
self.name = None
self.network_mode = None
self.userns_mode = None
self.networks = None
self.networks_cli_compatible = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.pids_limit = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.removal_wait_timeout = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.runtime = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.tmpfs = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
self.working_dir = None
for key, value in client.module.params.items():
setattr(self, key, value)
self.comparisons = client.comparisons
# If state is 'absent', parameters do not have to be parsed or interpreted.
# Only the container's name is needed.
if self.state == 'absent':
return
if self.default_host_ip:
valid_ip = False
if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.default_host_ip):
valid_ip = True
if re.match(r'^\[[0-9a-fA-F:]+\]$', self.default_host_ip):
valid_ip = True
if re.match(r'^[0-9a-fA-F:]+$', self.default_host_ip):
self.default_host_ip = '[{0}]'.format(self.default_host_ip)
valid_ip = True
if not valid_ip:
self.fail('The value of default_host_ip must be an empty string, an IPv4 address, '
'or an IPv6 address. Got "{0}" instead.'.format(self.default_host_ip))
if self.cpus is not None:
self.cpus = int(round(self.cpus * 1E9))
if self.groups:
# In case integers are passed as groups, we need to convert them to
# strings as docker internally treats them as strings.
self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups]
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports == 'all':
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.tmpfs = self._parse_tmpfs()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.sysctls = self._parse_sysctls()
self.log_config = self._parse_log_config()
try:
self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
except ValueError as e:
self.fail(to_native(e))
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.pid_mode = self._replace_container_names(self.pid_mode)
self.ipc_mode = self._replace_container_names(self.ipc_mode)
self.network_mode = self._replace_container_names(self.network_mode)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
if self.mac_address:
# Ensure the MAC address uses colons instead of hyphens for later comparison
self.mac_address = self.mac_address.replace('-', ':')
if self.entrypoint:
# convert from list to str.
self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])
if self.command:
# convert from list to str
if isinstance(self.command, list):
self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command])
self.mounts_opt, self.expected_mounts = self._process_mounts()
self._check_mount_target_collisions()
for param_name in ["device_read_bps", "device_write_bps"]:
if client.module.params.get(param_name):
self._process_rate_bps(option=param_name)
for param_name in ["device_read_iops", "device_write_iops"]:
if client.module.params.get(param_name):
self._process_rate_iops(option=param_name)
if self.device_requests:
for dr_index, dr in enumerate(self.device_requests):
# Make sure that capabilities are lists of lists of strings
if dr['capabilities']:
for or_index, or_list in enumerate(dr['capabilities']):
for and_index, and_term in enumerate(or_list):
if not isinstance(and_term, string_types):
self.fail(
"device_requests[{0}].capabilities[{1}][{2}] is not a string".format(
dr_index, or_index, and_index))
or_list[and_index] = to_native(and_term)
# Make sure that options is a dictionary mapping strings to strings
if dr['options']:
dr['options'] = clean_dict_booleans_for_docker_api(dr['options'])
def fail(self, msg):
self.client.fail(msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
cpuset_mems='cpuset_mems',
mem_limit='memory',
mem_reservation='memory_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory',
restart_policy='restart_policy',
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
restart_policy = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
result[key] = restart_policy
elif self.client.option_minimal_versions[value]['supported']:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
domainname='domainname',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
working_dir='working_dir',
stop_timeout='stop_timeout',
healthcheck='healthcheck',
)
if self.client.docker_py_version < LooseVersion('3.0'):
# cpu_shares and volume_driver moved to create_host_config in > 3
create_params['cpu_shares'] = 'cpu_shares'
create_params['volume_driver'] = 'volume_driver'
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
result[key] = getattr(self, value)
if self.disable_healthcheck:
# Make sure image's health check is overridden
result['healthcheck'] = {'test': ['NONE']}
if self.networks_cli_compatible and self.networks:
network = self.networks[0]
params = dict()
for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
if network.get(para):
params[para] = network[para]
network_config = dict()
network_config[network['name']] = self.client.create_endpoint_config(**params)
result['networking_config'] = self.client.create_networking_config(network_config)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
parts = vol.split(':')
if len(parts) == 3:
host, container, mode = parts
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if re.match(r'[.~]', host):
host = os.path.abspath(os.path.expanduser(host))
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(parts) == 2:
if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
host = os.path.abspath(os.path.expanduser(parts[0]))
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
# Only pass anonymous volumes to create container
if ':' in vol:
parts = vol.split(':')
if len(parts) == 3:
continue
if len(parts) == 2:
if not is_volume_permissions(parts[1]):
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params = dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
cgroup_parent='cgroup_parent',
dns='dns_servers',
dns_opt='dns_opts',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
userns_mode='userns_mode',
cap_add='capabilities',
cap_drop='cap_drop',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
sysctls='sysctls',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
oom_kill_disable='oom_killer',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode',
tmpfs='tmpfs',
init='init',
uts_mode='uts',
runtime='runtime',
auto_remove='auto_remove',
device_read_bps='device_read_bps',
device_write_bps='device_write_bps',
device_read_iops='device_read_iops',
device_write_iops='device_write_iops',
pids_limit='pids_limit',
mounts='mounts',
nano_cpus='cpus',
)
if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
# blkio_weight can always be updated, but can only be set on creation
# when Docker SDK for Python and Docker API are new enough
host_config_params['blkio_weight'] = 'blkio_weight'
if self.client.docker_py_version >= LooseVersion('3.0'):
# cpu_shares and volume_driver moved to create_host_config in > 3
host_config_params['cpu_shares'] = 'cpu_shares'
host_config_params['volume_driver'] = 'volume_driver'
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
if 'mounts' in params:
params['mounts'] = self.mounts_opt
if self.device_requests is not None:
params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests]
return self.client.create_host_config(**params)
def get_default_host_ip(self):
if self.default_host_ip is not None:
return self.default_host_ip
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
try:
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
except NotFound as nfe:
self.client.fail(
"Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
exception=traceback.format_exc()
)
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
if len(self.published_ports) > 1:
self.client.module.deprecate(
'Specifying "all" in published_ports together with port mappings is not properly '
'supported by the module. The port mappings are currently ignored. Please specify '
'only port mappings, or the value "all". The behavior for mixed usage will either '
'be forbidden in version 2.0.0, or properly handled. In any case, the way you '
'currently use the module will change in a breaking way',
collection_name='community.docker', version='2.0.0')
return 'all'
default_ip = self.get_default_host_ip()
binds = {}
for port in self.published_ports:
parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client)
container_port = parts[-1]
protocol = ''
if '/' in container_port:
container_port, protocol = parts[-1].split('/')
container_ports = parse_port_range(container_port, self.client)
p_len = len(parts)
if p_len == 1:
port_binds = len(container_ports) * [(default_ip,)]
elif p_len == 2:
if len(container_ports) == 1:
port_binds = [(default_ip, parts[0])]
else:
port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
elif p_len == 3:
# We only allow IPv4 and IPv6 addresses for the bind address
ipaddr = parts[0]
if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
ipaddr = ipaddr[1:-1]
if parts[1]:
if len(container_ports) == 1:
port_binds = [(ipaddr, parts[1])]
else:
port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
else:
port_binds = len(container_ports) * [(ipaddr,)]
for bind, container_port in zip(port_binds, container_ports):
idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
if idx in binds:
old_bind = binds[idx]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[idx] = [old_bind, bind]
else:
binds[idx] = bind
return binds
def _get_volume_binds(self, volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
parts = vol.split(':')
if len(parts) == 3:
host, container, mode = parts
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
elif len(parts) == 2:
if not is_volume_permissions(parts[1]):
host, container, mode = (parts + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = to_text(port, errors='surrogate_or_strict').strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, string_types) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if exposed_port[1] != protocol:
continue
if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = []
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result.append((parsed_link[0], parsed_link[1]))
else:
result.append((parsed_link[0], parsed_link[0]))
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_sysctls(self):
'''
Turn sysctls into an hash of Sysctl objects
'''
return self.sysctls
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config=dict()
)
if self.log_options is not None:
options['Config'] = dict()
for k, v in self.log_options.items():
if not isinstance(v, string_types):
self.client.module.warn(
"Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
"If this is not correct, or you want to avoid such warnings, please quote the value." % (
k, to_text(v, errors='surrogate_or_strict'))
)
v = to_text(v, errors='surrogate_or_strict')
self.log_options[k] = v
options['Config'][k] = v
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _parse_tmpfs(self):
'''
Turn tmpfs into a hash of Tmpfs objects
'''
result = dict()
if self.tmpfs is None:
return result
for tmpfs_spec in self.tmpfs:
split_spec = tmpfs_spec.split(":", 1)
if len(split_spec) > 1:
result[split_spec[0]] = split_spec[1]
else:
result[split_spec[0]] = ""
return result
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = to_text(value, errors='surrogate_or_strict')
if self.env:
for name, value in self.env.items():
if not isinstance(value, string_types):
self.fail("Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
final_env[name] = to_text(value, errors='surrogate_or_strict')
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
return network_id
def _process_mounts(self):
if self.mounts is None:
return None, None
mounts_list = []
mounts_expected = []
for mount in self.mounts:
target = mount['target']
datatype = mount['type']
mount_dict = dict(mount)
# Sanity checks (so we don't wait for docker-py to barf on input)
if mount_dict.get('source') is None and datatype != 'tmpfs':
self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
mount_option_types = dict(
volume_driver='volume',
volume_options='volume',
propagation='bind',
no_copy='volume',
labels='volume',
tmpfs_size='tmpfs',
tmpfs_mode='tmpfs',
)
for option, req_datatype in mount_option_types.items():
if mount_dict.get(option) is not None and datatype != req_datatype:
self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
# Handle volume_driver and volume_options
volume_driver = mount_dict.pop('volume_driver')
volume_options = mount_dict.pop('volume_options')
if volume_driver:
if volume_options:
volume_options = clean_dict_booleans_for_docker_api(volume_options)
mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
if mount_dict['labels']:
mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
if mount_dict.get('tmpfs_size') is not None:
try:
mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
except ValueError as exc:
self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
if mount_dict.get('tmpfs_mode') is not None:
try:
mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
except Exception as dummy:
self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
# Fill expected mount dict
mount_expected = dict(mount)
mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
# Add result to lists
mounts_list.append(docker_types.Mount(**mount_dict))
mounts_expected.append(omit_none_from_dict(mount_expected))
return mounts_list, mounts_expected
def _process_rate_bps(self, option):
"""
Format device_read_bps and device_write_bps option
"""
devices_list = []
for v in getattr(self, option):
device_dict = dict((x.title(), y) for x, y in v.items())
device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
devices_list.append(device_dict)
setattr(self, option, devices_list)
def _process_rate_iops(self, option):
"""
Format device_read_iops and device_write_iops option
"""
devices_list = []
for v in getattr(self, option):
device_dict = dict((x.title(), y) for x, y in v.items())
devices_list.append(device_dict)
setattr(self, option, devices_list)
def _replace_container_names(self, mode):
"""
Parse IPC and PID modes. If they contain a container name, replace
with the container's ID.
"""
if mode is None or not mode.startswith('container:'):
return mode
container_name = mode[len('container:'):]
# Try to inspect container to see whether this is an ID or a
# name (and in the latter case, retrieve it's ID)
container = self.client.get_container(container_name)
if container is None:
# If we can't find the container, issue a warning and continue with
# what the user specified.
self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
return mode
return 'container:{0}'.format(container['Id'])
def _check_mount_target_collisions(self):
last = dict()
def f(t, name):
if t in last:
if name == last[t]:
self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
else:
self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
last[t] = name
if self.expected_mounts:
for t in [m['target'] for m in self.expected_mounts]:
f(t, 'mounts')
if self.volumes:
for v in self.volumes:
vs = v.split(':')
f(vs[0 if len(vs) == 1 else 1], 'volumes')
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_sysctls = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
self.parameters.expected_device_requests = None
self.parameters_map = dict()
self.parameters_map['expected_links'] = 'links'
self.parameters_map['expected_ports'] = 'expected_ports'
self.parameters_map['expected_exposed'] = 'exposed_ports'
self.parameters_map['expected_volumes'] = 'volumes'
self.parameters_map['expected_ulimits'] = 'ulimits'
self.parameters_map['expected_sysctls'] = 'sysctls'
self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
self.parameters_map['expected_env'] = 'env'
self.parameters_map['expected_entrypoint'] = 'entrypoint'
self.parameters_map['expected_binds'] = 'volumes'
self.parameters_map['expected_cmd'] = 'command'
self.parameters_map['expected_devices'] = 'devices'
self.parameters_map['expected_healthcheck'] = 'healthcheck'
self.parameters_map['expected_mounts'] = 'mounts'
self.parameters_map['expected_device_requests'] = 'device_requests'
def fail(self, msg):
self.parameters.client.fail(msg)
@property
def exists(self):
return True if self.container else False
@property
def removing(self):
if self.container and self.container.get('State'):
return self.container['State'].get('Status') == 'removing'
return False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
@property
def paused(self):
if self.container and self.container.get('State'):
return self.container['State'].get('Paused', False)
return False
def _compare(self, a, b, compare):
'''
Compare values a and b as described in compare.
'''
return compare_generic(a, b, compare['comparison'], compare['type'])
def _decode_mounts(self, mounts):
if not mounts:
return mounts
result = []
empty_dict = dict()
for mount in mounts:
res = dict()
res['type'] = mount.get('Type')
res['source'] = mount.get('Source')
res['target'] = mount.get('Target')
res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
res['consistency'] = mount.get('Consistency')
res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
result.append(res)
return result
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
self.parameters.expected_healthcheck = self._get_expected_healthcheck()
self.parameters.expected_device_requests = self._get_expected_device_requests()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
expected_cmd=config.get('Cmd'),
domainname=config.get('Domainname'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
init=host_config.get('Init'),
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
cap_drop=host_config.get('CapDrop'),
cgroup_parent=host_config.get('CgroupParent'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
mac_address=config.get('MacAddress', network.get('MacAddress')),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
userns_mode=host_config.get('UsernsMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
runtime=host_config.get('Runtime'),
shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecurityOpt"),
stop_signal=config.get("StopSignal"),
tmpfs=host_config.get('Tmpfs'),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
expected_sysctls=host_config.get('Sysctls'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volume_driver=host_config.get('VolumeDriver'),
volumes_from=host_config.get('VolumesFrom'),
working_dir=config.get('WorkingDir'),
publish_all_ports=host_config.get('PublishAllPorts'),
expected_healthcheck=config.get('Healthcheck'),
disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
device_read_bps=host_config.get('BlkioDeviceReadBps'),
device_write_bps=host_config.get('BlkioDeviceWriteBps'),
device_read_iops=host_config.get('BlkioDeviceReadIOps'),
device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
expected_device_requests=host_config.get('DeviceRequests'),
pids_limit=host_config.get('PidsLimit'),
# According to https://github.com/moby/moby/, support for HostConfig.Mounts
# has been included at least since v17.03.0-ce, which has API version 1.26.
# The previous tag, v1.9.1, has API version 1.21 and does not have
# HostConfig.Mounts. I have no idea what about API 1.25...
expected_mounts=self._decode_mounts(host_config.get('Mounts')),
cpus=host_config.get('NanoCpus'),
)
# Options which don't make sense without their accompanying option
if self.parameters.log_driver:
config_mapping['log_driver'] = log_config.get('Type')
config_mapping['log_options'] = log_config.get('Config')
if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
# auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
# it has a default value, that's why we have to jump through the hoops here
config_mapping['auto_remove'] = host_config.get('AutoRemove')
if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
# stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
# stop_timeout has a hybrid role, in that it used to be something only used
# for stopping containers, and is now also used as a container property.
# That's why it needs special handling here.
config_mapping['stop_timeout'] = config.get('StopTimeout')
if self.parameters.client.docker_api_version < LooseVersion('1.22'):
# For docker API < 1.22, update_container() is not supported. Thus
# we need to handle all limits which are usually handled by
# update_container() as configuration changes which require a container
# restart.
restart_policy = host_config.get('RestartPolicy', dict())
# Options which don't make sense without their accompanying option
if self.parameters.restart_policy:
config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
config_mapping.update(dict(
blkio_weight=host_config.get('BlkioWeight'),
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpu_shares=host_config.get('CpuShares'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
restart_policy=restart_policy.get('Name')
))
differences = DifferenceTracker()
for key, value in config_mapping.items():
minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
if not minimal_version.get('supported', True):
continue
compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare))
if getattr(self.parameters, key, None) is not None:
match = self._compare(getattr(self.parameters, key), value, compare)
if not match:
# no match. record the differences
p = getattr(self.parameters, key)
c = value
if compare['type'] == 'set':
# Since the order does not matter, sort so that the diff output is better.
if p is not None:
p = sorted(p)
if c is not None:
c = sorted(c)
elif compare['type'] == 'set(dict)':
# Since the order does not matter, sort so that the diff output is better.
if key == 'expected_mounts':
# For selected values, use one entry as key
def sort_key_fn(x):
return x['target']
else:
# We sort the list of dictionaries by using the sorted items of a dict as its key.
def sort_key_fn(x):
return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
if p is not None:
p = sorted(p, key=sort_key_fn)
if c is not None:
c = sorted(c, key=sort_key_fn)
differences.add(key, parameter=p, active=c)
has_differences = not differences.empty
return has_differences, differences
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
if self.parameters.client.docker_api_version < LooseVersion('1.22'):
# update_container() call not supported
return False, []
host_config = self.container['HostConfig']
restart_policy = host_config.get('RestartPolicy') or dict()
config_mapping = dict(
blkio_weight=host_config.get('BlkioWeight'),
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpu_shares=host_config.get('CpuShares'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
restart_policy=restart_policy.get('Name')
)
# Options which don't make sense without their accompanying option
if self.parameters.restart_policy:
config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
differences = DifferenceTracker()
for key, value in config_mapping.items():
if getattr(self.parameters, key, None):
compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
match = self._compare(getattr(self.parameters, key), value, compare)
if not match:
# no match. record the differences
differences.add(key, parameter=getattr(self.parameters, key), active=value)
different = not differences.empty
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
network_info = connected_networks.get(network['name'])
if network_info is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
network_info_ipam = network_info.get('IPAMConfig') or {}
if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
diff = True
if network.get('aliases'):
if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
diff = True
if network.get('links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=network_info_ipam.get('IPv4Address'),
ipv6_address=network_info_ipam.get('IPv6Address'),
aliases=network_info.get('Aliases'),
links=network_info.get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if self.parameters.published_ports is None:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
if isinstance(config[0], int):
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links:
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
parts = vol.split(':')
if len(parts) == 3:
host, container, mode = parts
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if len(parts) == 2:
if not is_volume_permissions(parts[1]):
host, container, mode = parts + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_expected_device_requests(self):
if self.parameters.device_requests is None:
return None
device_requests = []
for dr in self.parameters.device_requests:
device_requests.append({
'Driver': dr['driver'],
'Count': dr['count'],
'DeviceIDs': dr['device_ids'],
'Capabilities': dr['capabilities'],
'Options': dr['options'],
})
return device_requests
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
# We only expect anonymous volumes to show up in the list
if ':' in vol:
parts = vol.split(':')
if len(parts) == 3:
continue
if len(parts) == 2:
if not is_volume_permissions(parts[1]):
continue
expected_vols[vol] = dict()
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image[self.parameters.client.image_inspect_source].get('Env'):
for env_var in image[self.parameters.client.image_inspect_source]['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
param_ports = []
if self.parameters.ports:
param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_sysctls(self, config_sysctls):
self.log('_get_expected_sysctls')
if config_sysctls is None:
return None
result = dict()
for key, value in config_sysctls.items():
result[key] = to_text(value, errors='surrogate_or_strict')
return result
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
def _normalize_port(self, port):
if '/' not in port:
return port + '/tcp'
return port
def _get_expected_healthcheck(self):
self.log('_get_expected_healthcheck')
expected_healthcheck = dict()
if self.parameters.healthcheck:
expected_healthcheck.update([(k.title().replace("_", ""), v)
for k, v in self.parameters.healthcheck.items()])
return expected_healthcheck
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
client.module.warn('log_options is ignored when log_driver is not specified')
if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
client.module.warn('healthcheck is ignored when test is not specified')
if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
client.module.warn('restart_retries is ignored when restart_policy is not specified')
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.diff_tracker = DifferenceTracker()
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff
if self.facts:
self.results['container'] = self.facts
def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
delay = 1.0
total_wait = 0
while True:
# Inspect container
result = self.client.get_container_by_id(container_id)
if result is None:
if accept_removal:
return
msg = 'Encontered vanished container while waiting for container "{0}"'
self.fail(msg.format(container_id))
# Check container state
state = result.get('State', {}).get('Status')
if complete_states is not None and state in complete_states:
return
if wait_states is not None and state not in wait_states:
msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
self.fail(msg.format(container_id, state))
# Wait
if max_wait is not None:
if total_wait > max_wait:
msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
self.fail(msg.format(container_id, max_wait))
if total_wait + delay > max_wait:
delay = max_wait - total_wait
sleep(delay)
total_wait += delay
# Exponential backoff, but never wait longer than 10 seconds
# (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
# until the maximal 10 seconds delay is reached. By then, the
# code will have slept for ~1.5 minutes.)
delay = min(delay * 1.1, 10)
def present(self, state):
container = self._get_container(self.parameters.name)
was_running = container.running
was_paused = container.paused
container_created = False
# If the image parameter was passed then we need to deal with the image
# version comparison. Otherwise we handle this depending on whether
# the container already runs or not; in the former case, in case the
# container needs to be restarted, we use the existing container's
# image ID.
image = self._get_image()
self.log(image, pretty_print=True)
if not container.exists or container.removing:
# New container
if container.removing:
self.log('Found container in removal phase')
else:
self.log('No container found')
if not self.parameters.image:
self.fail('Cannot create container when image is not specified!')
self.diff_tracker.add('exists', parameter=True, active=False)
if container.removing and not self.check_mode:
# Wait for container to be removed before trying to create it
self.wait_for_state(
container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
container_created = True
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if self.parameters.comparisons['image']['comparison'] == 'strict':
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff_tracker.merge(differences)
self.diff['differences'] = differences.get_legacy_docker_container_diffs()
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
image_to_use = self.parameters.image
if not image_to_use and container and container.Image:
image_to_use = container.Image
if not image_to_use:
self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
if not self.check_mode:
self.wait_for_state(
container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
new_container = self.container_create(image_to_use, self.parameters.create_parameters)
if new_container:
container = new_container
container_created = True
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container, container_created)
if state == 'started' and not container.running:
self.diff_tracker.add('running', parameter=True, active=was_running)
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.diff_tracker.add('running', parameter=True, active=was_running)
self.diff_tracker.add('restarted', parameter=True, active=False)
container = self.container_restart(container.Id)
elif state == 'stopped' and container.running:
self.diff_tracker.add('running', parameter=False, active=was_running)
self.container_stop(container.Id)
container = self._get_container(container.Id)
if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused:
self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
if not self.check_mode:
try:
if self.parameters.paused:
self.client.pause(container=container.Id)
else:
self.client.unpause(container=container.Id)
except Exception as exc:
self.fail("Error %s container %s: %s" % (
"pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc)
))
container = self._get_container(container.Id)
self.results['changed'] = True
self.results['actions'].append(dict(set_paused=self.parameters.paused))
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.diff_tracker.add('running', parameter=False, active=True)
self.container_stop(container.Id)
self.diff_tracker.add('exists', parameter=False, active=True)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.fail(msg, **kwargs)
def _output_logs(self, msg):
self.client.module.log(msg=msg)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
if is_image_name_id(self.parameters.image):
image = self.client.find_image_by_id(self.parameters.image)
else:
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not image or self.parameters.pull:
if not self.check_mode:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
elif not image:
# If the image isn't there, claim we'll pull.
# (Implicitly: if the image is there, claim it already was latest.)
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
self.diff_tracker.merge(different_limits)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container, container_created):
updated_container = container
if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
has_network_differences, network_differences = container.has_network_differences()
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
for netdiff in network_differences:
self.diff_tracker.add(
'network.{0}'.format(netdiff['parameter']['name']),
parameter=netdiff['parameter'],
active=netdiff['container']
)
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
for extra_network in extra_networks:
self.diff_tracker.add(
'network.{0}'.format(extra_network['name']),
active=extra_network
)
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
to_native(exc)))
# connect to the network
params = dict()
for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
if diff['parameter'].get(para):
params[para] = diff['parameter'][para]
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
to_native(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
self.client.report_warnings(new_container)
except Exception as exc:
self.fail("Error creating container: %s" % to_native(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
if self.parameters.detach is False:
if self.client.docker_py_version >= LooseVersion('3.0'):
status = self.client.wait(container_id)['StatusCode']
else:
status = self.client.wait(container_id)
self.client.fail_results['status'] = status
self.results['status'] = status
if self.parameters.auto_remove:
output = "Cannot retrieve result as auto_remove is enabled"
if self.parameters.output_logs:
self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
else:
config = self.client.inspect_container(container_id)
logging_driver = config['HostConfig']['LogConfig']['Type']
if logging_driver in ('json-file', 'journald'):
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
if self.parameters.output_logs:
self._output_logs(msg=output)
else:
output = "Result logged using `%s` driver" % logging_driver
if status != 0:
self.fail(output)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
count = 0
while True:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except NotFound as dummy:
pass
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we don't end up in an infinite loop.
if count == 3:
self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
count += 1
# Unpause
try:
self.client.unpause(container=container_id)
except Exception as exc2:
self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
# Now try again
continue
if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
pass
else:
self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
# We only loop when explicitly requested by 'continue'
break
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
result = self.client.update_container(container_id, **update_parameters)
self.client.report_warnings(result)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_restart(self, container_id):
self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
if not self.check_mode:
try:
if self.parameters.stop_timeout:
dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
else:
dummy = self.client.restart(container_id)
except Exception as exc:
self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
return self._get_container(container_id)
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
count = 0
while True:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we don't end up in an infinite loop.
if count == 3:
self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
count += 1
# Unpause
try:
self.client.unpause(container=container_id)
except Exception as exc2:
self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
# Now try again
continue
self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
# We only loop when explicitly requested by 'continue'
break
return response
def detect_ipvX_address_usage(client):
'''
Helper function to detect whether any specified network uses ipv4_address or ipv6_address
'''
for network in client.module.params.get("networks") or []:
if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
return True
return False
class AnsibleDockerClientContainer(AnsibleDockerClient):
# A list of module options which are not docker container properties
__NON_CONTAINER_PROPERTY_OPTIONS = tuple([
'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
'recreate', 'restart', 'state', 'networks', 'cleanup', 'kill_signal',
'output_logs', 'paused', 'removal_wait_timeout', 'default_host_ip',
] + list(DOCKER_COMMON_ARGS.keys()))
def _parse_comparisons(self):
comparisons = {}
comp_aliases = {}
# Put in defaults
explicit_types = dict(
command='list',
devices='set(dict)',
device_requests='set(dict)',
dns_search_domains='list',
dns_servers='list',
env='set',
entrypoint='list',
etc_hosts='set',
mounts='set(dict)',
networks='set(dict)',
ulimits='set(dict)',
device_read_bps='set(dict)',
device_write_bps='set(dict)',
device_read_iops='set(dict)',
device_write_iops='set(dict)',
)
all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
default_values = dict(
stop_timeout='ignore',
)
for option, data in self.module.argument_spec.items():
all_options.add(option)
for alias in data.get('aliases', []):
all_options.add(alias)
# Ignore options which aren't used as container properties
if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
continue
# Determine option type
if option in explicit_types:
datatype = explicit_types[option]
elif data['type'] == 'list':
datatype = 'set'
elif data['type'] == 'dict':
datatype = 'dict'
else:
datatype = 'value'
# Determine comparison type
if option in default_values:
comparison = default_values[option]
elif datatype in ('list', 'value'):
comparison = 'strict'
else:
comparison = 'allow_more_present'
comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
# Keep track of aliases
comp_aliases[option] = option
for alias in data.get('aliases', []):
comp_aliases[alias] = option
# Process legacy ignore options
if self.module.params['ignore_image']:
comparisons['image']['comparison'] = 'ignore'
if self.module.params['purge_networks']:
comparisons['networks']['comparison'] = 'strict'
# Process options
if self.module.params.get('comparisons'):
# If '*' appears in comparisons, process it first
if '*' in self.module.params['comparisons']:
value = self.module.params['comparisons']['*']
if value not in ('strict', 'ignore'):
self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
for option, v in comparisons.items():
if option == 'networks':
# `networks` is special: only update if
# some value is actually specified
if self.module.params['networks'] is None:
continue
v['comparison'] = value
# Now process all other comparisons.
comp_aliases_used = {}
for key, value in self.module.params['comparisons'].items():
if key == '*':
continue
# Find main key
key_main = comp_aliases.get(key)
if key_main is None:
if key_main in all_options:
self.fail("The module option '%s' cannot be specified in the comparisons dict, "
"since it does not correspond to container's state!" % key)
self.fail("Unknown module option '%s' in comparisons dict!" % key)
if key_main in comp_aliases_used:
self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
comp_aliases_used[key_main] = key
# Check value and update accordingly
if value in ('strict', 'ignore'):
comparisons[key_main]['comparison'] = value
elif value == 'allow_more_present':
if comparisons[key_main]['type'] == 'value':
self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
comparisons[key_main]['comparison'] = value
else:
self.fail("Unknown comparison mode '%s'!" % value)
# Add implicit options
comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
comparisons['disable_healthcheck'] = dict(type='value',
comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
name='disable_healthcheck')
# Check legacy values
if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
self.module.warn('The ignore_image option has been overridden by the comparisons option!')
if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
self.module.warn('The purge_networks option has been overridden by the comparisons option!')
self.comparisons = comparisons
def _get_additional_minimal_versions(self):
stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
if stop_timeout_supported:
stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
if stop_timeout_needed_for_update and not stop_timeout_supported:
# We warn (instead of fail) since in older versions, stop_timeout was not used
# to update the container's configuration, but only when stopping a container.
self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
"the container's stop_timeout configuration. "
"If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
else:
if stop_timeout_needed_for_update and not stop_timeout_supported:
# We warn (instead of fail) since in older versions, stop_timeout was not used
# to update the container's configuration, but only when stopping a container.
self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
"update the container's stop_timeout configuration." % (self.docker_api_version_str,))
self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
def __init__(self, **kwargs):
option_minimal_versions = dict(
# internal options
log_config=dict(),
publish_all_ports=dict(),
ports=dict(),
volume_binds=dict(),
name=dict(),
# normal options
device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'),
dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
ipc_mode=dict(docker_api_version='1.25'),
mac_address=dict(docker_api_version='1.25'),
oom_score_adj=dict(docker_api_version='1.22'),
shm_size=dict(docker_api_version='1.22'),
stop_signal=dict(docker_api_version='1.21'),
tmpfs=dict(docker_api_version='1.22'),
volume_driver=dict(docker_api_version='1.21'),
memory_reservation=dict(docker_api_version='1.21'),
kernel_memory=dict(docker_api_version='1.21'),
auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
# specials
ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
detect_usage=detect_ipvX_address_usage,
usage_msg='ipv4_address or ipv6_address in networks'),
stop_timeout=dict(), # see _get_additional_minimal_versions()
)
super(AnsibleDockerClientContainer, self).__init__(
option_minimal_versions=option_minimal_versions,
option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
**kwargs
)
self.image_inspect_source = 'Config'
if self.docker_api_version < LooseVersion('1.21'):
self.image_inspect_source = 'ContainerConfig'
self._get_additional_minimal_versions()
self._parse_comparisons()
if self.module.params['container_default_behavior'] is None:
self.module.params['container_default_behavior'] = 'compatibility'
self.module.deprecate(
'The container_default_behavior option will change its default value from "compatibility" to '
'"no_defaults" in community.docker 2.0.0. To remove this warning, please specify an explicit value for it now',
version='2.0.0', collection_name='community.docker' # was Ansible 2.14 / community.general 3.0.0
)
if self.module.params['container_default_behavior'] == 'compatibility':
old_default_values = dict(
auto_remove=False,
detach=True,
init=False,
interactive=False,
memory="0",
paused=False,
privileged=False,
read_only=False,
tty=False,
)
for param, value in old_default_values.items():
if self.module.params[param] is None:
self.module.params[param] = value
def main():
argument_spec = dict(
auto_remove=dict(type='bool'),
blkio_weight=dict(type='int'),
capabilities=dict(type='list', elements='str'),
cap_drop=dict(type='list', elements='str'),
cgroup_parent=dict(type='str'),
cleanup=dict(type='bool', default=False),
command=dict(type='raw'),
comparisons=dict(type='dict'),
container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpus=dict(type='float'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
default_host_ip=dict(type='str'),
detach=dict(type='bool'),
devices=dict(type='list', elements='str'),
device_read_bps=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='str'),
)),
device_write_bps=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='str'),
)),
device_read_iops=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='int'),
)),
device_write_iops=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='int'),
)),
device_requests=dict(type='list', elements='dict', options=dict(
capabilities=dict(type='list', elements='list'),
count=dict(type='int'),
device_ids=dict(type='list', elements='str'),
driver=dict(type='str'),
options=dict(type='dict'),
)),
dns_servers=dict(type='list', elements='str'),
dns_opts=dict(type='list', elements='str'),
dns_search_domains=dict(type='list', elements='str'),
domainname=dict(type='str'),
entrypoint=dict(type='list', elements='str'),
env=dict(type='dict'),
env_file=dict(type='path'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list', elements='str'),
healthcheck=dict(type='dict', options=dict(
test=dict(type='raw'),
interval=dict(type='str'),
timeout=dict(type='str'),
start_period=dict(type='str'),
retries=dict(type='int'),
)),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
init=dict(type='bool'),
interactive=dict(type='bool'),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list', elements='str'),
log_driver=dict(type='str'),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
mounts=dict(type='list', elements='dict', options=dict(
target=dict(type='str', required=True),
source=dict(type='str'),
type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
read_only=dict(type='bool'),
consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
no_copy=dict(type='bool'),
labels=dict(type='dict'),
volume_driver=dict(type='str'),
volume_options=dict(type='dict'),
tmpfs_size=dict(type='str'),
tmpfs_mode=dict(type='str'),
)),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
networks=dict(type='list', elements='dict', options=dict(
name=dict(type='str', required=True),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
aliases=dict(type='list', elements='str'),
links=dict(type='list', elements='str'),
)),
networks_cli_compatible=dict(type='bool', default=True),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
output_logs=dict(type='bool', default=False),
paused=dict(type='bool'),
pid_mode=dict(type='str'),
pids_limit=dict(type='int'),
privileged=dict(type='bool'),
published_ports=dict(type='list', elements='str', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool'),
recreate=dict(type='bool', default=False),
removal_wait_timeout=dict(type='float'),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int'),
runtime=dict(type='str'),
security_opts=dict(type='list', elements='str'),
shm_size=dict(type='str'),
state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
sysctls=dict(type='dict'),
tmpfs=dict(type='list', elements='str'),
tty=dict(type='bool'),
ulimits=dict(type='list', elements='str'),
user=dict(type='str'),
userns_mode=dict(type='str'),
uts=dict(type='str'),
volume_driver=dict(type='str'),
volumes=dict(type='list', elements='str'),
volumes_from=dict(type='list', elements='str'),
working_dir=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClientContainer(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
min_docker_api_version='1.20',
)
if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
client.module.deprecate(
'Please note that the default value for `network_mode` will change from not specified '
'(which is equal to `default`) to the name of the first network in `networks` if '
'`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
'change the behavior now by explicitly setting `network_mode` to the name of the first '
'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
'Please make sure that the value you set to `network_mode` equals the inspection result '
'for existing containers, otherwise the module will recreate them. You can find out the '
'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
version='2.0.0', collection_name='community.docker', # was Ansible 2.14 / community.general 3.0.0
)
try:
cm = ContainerManager(client)
client.module.exit_json(**sanitize_result(cm.results))
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
785a2ac4c6ea2b724fc3667b58832bcb92498198 | 1218aa61721b39064ec34812175a3b2bf48caee1 | /app/replicas/api/serializers.py | 07cad2ce75dfead0dcdd58cf96c7b84537cad290 | [] | no_license | omegion/Mongo-Sharded-Cluster-Monitoring-with-Django-and-Docker-Compose | fd31ace2ab08d59ed2733c94792fcc832bbea963 | 3937845a2b9ee224ff42e896639ce6b7635b1c6e | refs/heads/master | 2023-07-06T10:42:46.393369 | 2019-05-16T15:57:36 | 2019-05-16T15:57:36 | 184,385,304 | 1 | 1 | null | 2023-06-24T08:55:17 | 2019-05-01T07:52:35 | JavaScript | UTF-8 | Python | false | false | 1,056 | py | from django.core.management import call_command
from django.conf import settings
from django.db.models import F
from rest_framework import serializers
from datetime import datetime, timedelta
# Models
from replicas.models import Replica, Check
class ReplicaSerializer(serializers.ModelSerializer):
# user = serializers.SerializerMethodField()
# def get_user(self, instance):
# return {
# 'name': instance.user.name,
# 'photo': self.context['request'].build_absolute_uri(instance.user.photo.url),
# }
class Meta:
model = Replica
read_only_fields = ('id', 'name', 'shard', 'port', 'state', 'status', )
fields = '__all__'
class CheckSerializer(serializers.ModelSerializer):
class Meta:
model = Check
fields = '__all__'
class QuerySerializer(serializers.Serializer):
query = serializers.CharField(required=True)
class QueryInsertSerializer(serializers.Serializer):
number = serializers.IntegerField(required=True)
| [
"vagrant@homestead"
] | vagrant@homestead |
2878cfc7fe099af6ea3dd0037c1f3faf7d1c2e83 | f3762dca5e4956144f430f423340bdcd6604dfea | /scripts/Buffer Contour.py | 1c01743f5fa28d13e7e93e4eda4be3935aba9e29 | [] | no_license | sindile/QGIS-Processing | 7ba7a6e5eda79d86589770b423ae4d00528d0bf9 | 9cd18fa13ab7b74c24de0da3653aa252ec055de7 | refs/heads/master | 2020-12-25T03:49:19.966042 | 2014-06-02T20:33:24 | 2014-06-02T20:33:24 | 20,726,685 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | ##Points=vector
##Value_field=field Points
##Levels=string 10;20
##Buffer_parameter=number 60
##Max_buffer_size=number 500
##Group_by_field=boolean True
##Group_Field=field Points
##Contour=output vector
from qgis.core import *
from PyQt4.QtCore import *
from processing.core.VectorWriter import VectorWriter
from shapely.ops import cascaded_union
from shapely.wkb import loads
from shapely.wkt import dumps
levels = [float(x) for x in Levels.split(";")]
maxlevel = max(levels)
mbuf = Max_buffer_size
progress.setText("lvls {0}".format(levels))
nodeLayer = processing.getObject(Points)
nodePrder = nodeLayer.dataProvider()
n = nodeLayer.featureCount()
l = 0
pts = {}
bpr = Buffer_parameter
for feat in processing.features(nodeLayer):
progress.setPercentage(int(100*l/n))
l+=1
if feat[Value_field] < maxlevel:
if Group_by_field: k = feat[Group_Field]
else: k = 'a'
if k not in pts: pts[k] = []
pts[k].append((feat.geometry().asPoint(), feat[Value_field]))
if Group_by_field:
fields = [QgsField(Group_Field, QVariant.String), QgsField('level', QVariant.Double)]
else:
fields = [QgsField('level', QVariant.Double)]
writer = VectorWriter(Contour, None, fields, QGis.WKBMultiPolygon, nodePrder.crs())
feat = QgsFeature()
n = len(pts)
l = 0
for k,v in pts.iteritems():
progress.setPercentage(int(100*l/n))
l+=1
if Group_by_field: attrs = [k, 0]
else: attrs = [0]
for l in levels:
if Group_by_field: attrs[1] = l
else: attrs[0] = l
feat.setAttributes(attrs)
ptlist = [x for x in v if x[1] < l]
polygons = [loads(QgsGeometry.fromPoint(p).buffer(min(mbuf, d * bpr), 10).asWkb())
for p,d in ptlist]
feat.setGeometry(QgsGeometry.fromWkt(dumps(cascaded_union(polygons))))
writer.addFeature(feat)
del writer | [
"volayaf@gmail.com"
] | volayaf@gmail.com |
4993149be8c698b9e63a433c14c910f7cae87885 | a5fdc429f54a0deccfe8efd4b9f17dd44e4427b5 | /0x06-python-classes/0-square.py | 1a79ed9a820966620e5d4ee00e1a77cd94c84336 | [] | no_license | Jilroge7/holbertonschool-higher_level_programming | 19b7fcb4c69793a2714ad241e0cc4fc975d94694 | 743a352e42d447cd8e1b62d2533408c25003b078 | refs/heads/master | 2022-12-20T20:41:33.375351 | 2020-09-25T02:02:28 | 2020-09-25T02:02:28 | 259,471,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | #!/usr/bin/python3
"""Module here"""
class Square:
"""Class defined here as empty class of square"""
pass # pass as empty
| [
"1672@holbertonschool.com"
] | 1672@holbertonschool.com |
ebb3b622a8dd2029e68b69e158f95feb702b8a52 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_094/ch1_2019_02_22_13_18_51_955616.py | 423e906e0e2ddecae0e43790ed3561a549ffb207 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | def calcular_valor_devido(valor0,juros,n):
y = valor0*(1+juros)**n
return y
| [
"you@example.com"
] | you@example.com |
a57641283552933fe4ce9c6587a51d4d46875d6d | 605d63d23bc2e07eb054979a14557d469787877e | /utest/libdoc/test_datatypes.py | e6020b697cbcbd1133a446ff34af7ab630690134 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | robotframework/robotframework | 407b0cdbe0d3bb088f9bfcf9ea7d16e22eee1ddf | cf896995f822f571c33dc5651d51365778b1cf40 | refs/heads/master | 2023-08-29T03:19:00.734810 | 2023-08-27T18:14:48 | 2023-08-28T18:14:11 | 21,273,155 | 8,635 | 2,623 | Apache-2.0 | 2023-09-05T04:58:08 | 2014-06-27T11:10:38 | Python | UTF-8 | Python | false | false | 741 | py | import unittest
from robot.libdocpkg.standardtypes import STANDARD_TYPE_DOCS
from robot.running.arguments.typeconverters import (
EnumConverter, CombinedConverter, CustomConverter, TypeConverter, TypedDictConverter
)
class TestStandardTypeDocs(unittest.TestCase):
no_std_docs = (EnumConverter, CombinedConverter, CustomConverter, TypedDictConverter)
def test_all_standard_types_have_docs(self):
for cls in TypeConverter.__subclasses__():
if cls.type not in STANDARD_TYPE_DOCS and cls not in self.no_std_docs:
raise AssertionError(f"Standard converter '{cls.__name__}' "
f"does not have documentation.")
if __name__ == '__main__':
unittest.main()
| [
"peke@iki.fi"
] | peke@iki.fi |
558cf29fd9568653cc559f9993465037ecee7ae6 | 979f47ef23640b7655c8c562faa31e03cab434d4 | /config.py | b41777940696163daf1c3457098c6c01da33b6b1 | [
"LicenseRef-scancode-sata"
] | permissive | ckelsel/captcha_platform | f3fe9b29bebca325a1d1875eff73c057665c29b0 | 62915afac879cb9c8b9e012974d0d6d13c437da8 | refs/heads/master | 2020-04-16T10:09:26.891613 | 2019-01-13T12:20:42 | 2019-01-13T12:20:42 | 165,493,280 | 0 | 0 | NOASSERTION | 2019-01-13T10:41:08 | 2019-01-13T10:41:08 | null | UTF-8 | Python | false | false | 8,349 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author: kerlomz <kerlomz@gmail.com>
import os
import uuid
import yaml
import hashlib
import logging
from logging.handlers import TimedRotatingFileHandler
from character import *
class Config(object):
def __init__(self, conf_path: str, graph_path: str = None, model_path: str = None):
self.model_path = model_path
self.conf_path = conf_path
self.graph_path = graph_path
self.sys_cf = self.read_conf
self.access_key = None
self.secret_key = None
self.default_model = self.sys_cf['System']['DefaultModel']
self.split_flag = eval(self.sys_cf['System']['SplitFlag'])
self.strict_sites = self.sys_cf['System'].get('StrictSites')
self.strict_sites = True if self.strict_sites is None else self.strict_sites
self.log_path = "logs"
self.logger_tag = self.sys_cf['System'].get('LoggerTag')
self.logger_tag = self.logger_tag if self.logger_tag else "coriander"
self.logger = logging.getLogger(self.logger_tag)
self.static_path = self.sys_cf['System'].get('StaticPath')
self.static_path = self.static_path if self.static_path else 'static'
self.use_default_authorization = False
self.authorization = None
self.init_logger()
self.assignment()
def init_logger(self):
self.logger.setLevel(logging.INFO)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
file_handler = TimedRotatingFileHandler(
'{}/{}.log'.format(self.log_path, "captcha_platform"),
when="MIDNIGHT",
interval=1,
backupCount=180
)
self.logger.propagate = False
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self.logger.addHandler(stream_handler)
def assignment(self):
# ---AUTHORIZATION START---
mac_address = hex(uuid.getnode())[2:]
self.use_default_authorization = False
self.authorization = self.sys_cf.get('Security')
if not self.authorization or not self.authorization.get('AccessKey') or not self.authorization.get('SecretKey'):
self.use_default_authorization = True
model_name_md5 = hashlib.md5(
"{}".format(self.default_model).encode('utf8')).hexdigest()
self.authorization = {
'AccessKey': model_name_md5[0: 16],
'SecretKey': hashlib.md5("{}{}".format(model_name_md5, mac_address).encode('utf8')).hexdigest()
}
self.access_key = self.authorization['AccessKey']
self.secret_key = self.authorization['SecretKey']
# ---AUTHORIZATION END---
@property
def read_conf(self):
with open(self.conf_path, 'r', encoding="utf-8") as sys_fp:
sys_stream = sys_fp.read()
return yaml.load(sys_stream)
class Model(object):
def __init__(self, conf: Config, model_conf: str):
self.conf = conf
self.logger = self.conf.logger
self.graph_path = conf.graph_path
self.model_path = conf.model_path
self.model_conf = model_conf
self.model_conf_demo = 'model_demo.yaml'
self.verify()
def verify(self):
if not os.path.exists(self.model_conf):
raise Exception(
'Configuration File "{}" No Found. '
'If it is used for the first time, please copy one from {} as {}'.format(
self.model_conf,
self.model_conf_demo,
self.model_path
)
)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
raise Exception(
'For the first time, please put the trained model in the model directory.'
)
def char_set(self, _type):
if isinstance(_type, list):
return _type
if isinstance(_type, str):
return SIMPLE_CHAR_SET.get(_type) if _type in SIMPLE_CHAR_SET.keys() else None
self.logger.error(
"Character set configuration error, customized character set should be list type"
)
@property
def read_conf(self):
with open(self.model_conf, 'r', encoding="utf-8") as sys_fp:
sys_stream = sys_fp.read()
return yaml.load(sys_stream)
class ModelConfig(Model):
def __init__(self, conf: Config, model_conf: str):
super().__init__(conf=conf, model_conf=model_conf)
self.system = None
self.device = None
self.device_usage = None
self.charset = None
self.split_char = None
self.gen_charset = None
self.char_exclude = None
self.charset_len = None
self.target_model = None
self.model_type = None
self.image_height = None
self.image_width = None
self.resize = None
self.binaryzation = None
self.smooth = None
self.blur = None
self.replace_transparent = None
self.model_site = None
self.version = None
self.mac_address = None
self.compile_model_path = None
self.model_name_md5 = None
self.color_engine = None
self.cf_model = self.read_conf
self.assignment()
self.graph_name = "{}&{}".format(self.target_model, self.size_string)
def assignment(self):
system = self.cf_model.get('System')
self.device = system.get('Device') if system else None
self.device = self.device if self.device else "cpu:0"
self.device_usage = system.get('DeviceUsage') if system else None
self.device_usage = self.device_usage if self.device_usage else 0.1
self.charset = self.cf_model['Model'].get('CharSet')
self.gen_charset = self.char_set(self.charset)
if self.gen_charset is None:
raise Exception(
"The character set type does not exist, there is no character set named {}".format(self.charset),
)
self.char_exclude = self.cf_model['Model'].get('CharExclude')
self.gen_charset = [''] + [i for i in self.char_set(self.charset) if i not in self.char_exclude]
self.charset_len = len(self.gen_charset)
self.target_model = self.cf_model['Model'].get('ModelName')
self.model_type = self.cf_model['Model'].get('ModelType')
self.model_site = self.cf_model['Model'].get('Sites')
self.model_site = self.model_site if self.model_site else []
self.version = self.cf_model['Model'].get('Version')
self.version = self.version if self.version else 1.0
self.split_char = self.cf_model['Model'].get('SplitChar')
self.split_char = '' if not self.split_char else self.split_char
self.image_height = self.cf_model['Model'].get('ImageHeight')
self.image_width = self.cf_model['Model'].get('ImageWidth')
self.color_engine = self.cf_model['Model'].get('ColorEngine')
self.color_engine = self.color_engine if self.color_engine else 'opencv'
self.binaryzation = self.cf_model['Pretreatment'].get('Binaryzation')
self.smooth = self.cf_model['Pretreatment'].get('Smoothing')
self.blur = self.cf_model['Pretreatment'].get('Blur')
self.blur = self.cf_model['Pretreatment'].get('Blur')
self.resize = self.cf_model['Pretreatment'].get('Resize')
self.resize = self.resize if self.resize else [self.image_width, self.image_height]
self.replace_transparent = self.cf_model['Pretreatment'].get('ReplaceTransparent')
self.compile_model_path = os.path.join(self.graph_path, '{}.pb'.format(self.target_model))
if not os.path.exists(self.compile_model_path):
raise Exception(
'{} not found, please put the trained model in the model directory.'.format(self.compile_model_path)
)
def size_match(self, size_str):
return size_str == self.size_string
@property
def size_string(self):
return "{}x{}".format(self.image_width, self.image_height)
| [
"kerlomz@gmail.com"
] | kerlomz@gmail.com |
3730c7a184b3d699c21a6e262940768d03cefb66 | f03efb1242053bb5f532f654015afa6172cc8891 | /wk02-simple-algorithms/exercise/function/function-02.py | ab7ab6a3cc3198010edf9e1898c94f191495b81e | [] | no_license | nabilatajrin/MITx-6.00.1x-IntroductionToComputerScienceAndProgrammingUsingPython | f283135edfb07cb652881489424afa7a683f5b3f | d79fe9f7088392d7de2015a61ef1178ab65d325e | refs/heads/master | 2022-12-14T10:59:58.212864 | 2020-09-15T05:45:59 | 2020-09-15T05:45:59 | 245,590,470 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | a = 10
def f(x):
return x + a
a = 3
print(f(1)) | [
"nabilatajrin@gmail.com"
] | nabilatajrin@gmail.com |
57e01f66e7647c4f10f809f8c799d1ce79bbf434 | a4fcaa28f288ff495ac09c3f8070f019f4d3ba80 | /virtualenvs/rp-3-python3_6_flask_bokeh_bette/bin/pip3.6 | 25f52bb7807a73b9ddac0577075cb8067474af93 | [] | no_license | tomwhartung/always_learning_python | db44b0745f27f482e6482faa821f89dc7809dda8 | ab27c164a724754e3e25518bf372bd4437995d64 | refs/heads/master | 2020-12-07T15:57:04.184391 | 2017-05-18T19:35:31 | 2017-05-18T19:35:31 | 67,449,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | 6 | #!/var/www/always_learning/github/customizations/always_learning_python/virtualenvs/rp-3-python3_6_flask_bokeh_bette/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tomwhartung@gmail.com"
] | tomwhartung@gmail.com |
161fa8607b38260ceab9820c52b08e6799e531be | 89fc945e93c9cfacaab0d3bac0071ee95c86f81d | /bookdata/schema.py | 16f3874b8f8748971303648b94e3517ebe924d3f | [
"MIT"
] | permissive | jimfhahn/bookdata-tools | 2341825438aa627554eb91d1929545e7fe0b24f5 | cd857028a7947f369d84cb69a941303a413046b6 | refs/heads/master | 2023-09-05T07:38:43.604828 | 2021-03-11T22:21:30 | 2021-03-11T22:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | """
Data schema information for the book data tools.
"""
import pandas as pd
class NS:
def __init__(self, name, num):
self.name = name
self.code = num
self.offset = num * 100000000
ns_work = NS('OL-W', 1)
ns_edition = NS('OL-E', 2)
ns_loc_rec = NS('LOC', 3)
ns_gr_work = NS('GR-W', 4)
ns_gr_book = NS('GR-B', 5)
ns_loc_work = NS('LOC-W', 6)
ns_loc_instance = NS('LOC-I', 7)
ns_isbn = NS('ISBN', 9)
numspaces = [
ns_work, ns_edition,
ns_loc_rec,
ns_gr_work, ns_gr_book,
ns_loc_work, ns_loc_instance,
ns_isbn
]
src_labels = pd.Series(dict((_ns.name, _ns.code) for _ns in numspaces))
src_label_rev = pd.Series(src_labels.index, index=src_labels.values)
| [
"michaelekstrand@boisestate.edu"
] | michaelekstrand@boisestate.edu |
94b5e34a931d800c3150922a52f9eaee92644e13 | f9886d2b57d92186773d73f59dc0a0e9759b8944 | /04_bigdata/01_Collection/01_XML/6_change_value_to_attribute.py | e5af8ac67740920fae4e00daa09911777e69e0a2 | [] | no_license | Meengkko/bigdata_python2019 | 14bab0da490bd36c693f50b5d924e27f4a8e02ba | a28e964ab7cefe612041830c7b1c960f92c42ad5 | refs/heads/master | 2022-12-12T15:51:21.448923 | 2019-11-08T03:50:15 | 2019-11-08T03:50:15 | 195,142,241 | 0 | 0 | null | 2022-04-22T22:37:59 | 2019-07-04T00:17:18 | HTML | UTF-8 | Python | false | false | 370 | py | from xml.etree.ElementTree import Element, dump, SubElement
note = Element('note', date="20120104",to="Tove")
# to = Element('to') # 자식 노드
# to.text = "Tove" # 현재 앨리먼트(Tag)에 값 추가
# note.append(to) # 부모 노드에 자식노드 추가
SubElement(note, "From").text = "Jani" # SubElement를 활용하여 자식 노드 추가
dump(note)
| [
"you@ddd.com"
] | you@ddd.com |
922fe3837b1fe1ad442a1198b4d5631ceff4734a | 80052e0cbfe0214e4878d28eb52009ff3054fe58 | /e2yun_addons/odoo12/e2yun_website_helpdesk_form/__manifest__.py | 0f88accae37eadfb417ad8d9b7f0f5ef3c255a0f | [] | no_license | xAlphaOmega/filelib | b022c86f9035106c24ba806e6ece5ea6e14f0e3a | af4d4b079041f279a74e786c1540ea8df2d6b2ac | refs/heads/master | 2021-01-26T06:40:06.218774 | 2020-02-26T14:25:11 | 2020-02-26T14:25:11 | 243,349,887 | 0 | 2 | null | 2020-02-26T19:39:32 | 2020-02-26T19:39:31 | null | UTF-8 | Python | false | false | 1,188 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'e2yun_Online Ticket Submission',
'category': 'Website',
'summary': 'e2yun 服务订单表单在线提交 ',
'description': """在线提交服务订单.""",
'application': True,
'depends': [
'web', 'website', 'website_helpdesk', 'helpdesk', 'website_helpdesk_form',
'im_livechat', 'portal', 'mail', "rating"
],
'data': [
'security/helpdesk_security.xml',
'security/ir.model.access.csv',
'data/website_helpdesk.xml',
'views/helpdesk_templates.xml',
'views/commonproblems_templates.xml',
'views/helpdesk_views.xml',
'views/helpdesk_ticket_brand_type.xml',
'views/helpdesk_tickchat_uuid.xml',
'views/helpdesk_rating.xml',
'views/helpdesk_team_views_subuser.xml',
'views/helpdesk_csoffline.xml',
],
'qweb': [
"static/src/xml/helpdeskdesk_matnr.xml",
"static/src/xml/helpdeskdesk_livechat_out.xml"
],
'license': 'OEEL-1',
'installable': True,
'auto_install': False,
'active': False,
'web': True,
}
| [
"hepeng1@163.com"
] | hepeng1@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.