blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d9e8ec272587da4d0b57094865c1e442afa7573 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Data2vec_for_PyTorch/fairseq/models/speech_dlm/__init__.py | 6ea914d6a578651fecd18cc7f352382623de303a | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 249 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .speech_dlm import * # noqa
from .hub_interface import * # noqa
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
4d1d1206749d6326010dd874cf58439b9b23189c | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /vedat/dist/gtk/gtkspell3/actions.py | 6e8ee6ed12a4b2248e6106133f1bda3ac07b6278 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 727 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
shelltools.export("HOME", get.workDIR())
def setup():
autotools.autoreconf('-fi')
autotools.configure("--disable-static \
--enable-gtk3")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.install()
# Empty files: NEWS,
pisitools.dodoc("COPYING", "README", "AUTHORS", "ChangeLog")
| [
"vedat@pisi_linux1.0"
] | vedat@pisi_linux1.0 |
cac379b18eb97023b4c5fdcf142c87d983e6794e | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /@lib/12-13-2011-01/vyperlogix/misc/date.py | 725848771c1c8d2be9463ec460abd9baef7c236f | [] | no_license | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,166 | py | # to get the local (current / here) time
import time
import CooperativeClass
__copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
# allows quick, easy creation of enumeration objects
def enum(*names):
class enum(object):
def __setattr__(self, parameter, value):
raise AttributeError
def __delattr__(self, parameter):
raise AttributeError
obj = enum()
for value, parameter in enumerate(names):
obj.__dict__[parameter] = value
return obj
# an enumeration object with the list attributes
format = enum('mdyy', 'yymd', 'mdy', 'ymd')
class date(CooperativeClass.Cooperative):
def __init__(self, form, string=None):
self.__form = form
if string:
self.__set_via_string(string)
else:
temp = time.localtime()
self.__year = temp.tm_year
self.__month = temp.tm_mon
self.__day = temp.tm_mday
def __set_via_string(self, string):
numbers = string.split('/')
assert len(numbers) == 3
for index in range(len(numbers)):
numbers[index] = int(numbers[index])
if self.__form == format.mdyy or self.__form == format.mdy:
self.__year = numbers[2]
self.__month = numbers[0]
self.__day = numbers[1]
elif self.__form == format.yymd or self.__form == format.ymd:
self.__year = numbers[0]
self.__month = numbers[1]
self.__day = number[2]
else:
raise 'bad format'
def __set_via_string_DEPRECATED(self, string):
length = len(string)
if self.__form == format.mdyy:
if length == 10:
assert string[2] == '/' and string[5] == '/'
self.__year = int(string[6:])
self.__month = int(string[:2])
self.__day = int(string[3:5])
elif length == 8:
self.__year = int(string[4:])
self.__month = int(string[:2])
self.__day = int(string[2:4])
else:
raise 'bad string'
elif self.__form == format.yymd:
if length == 10:
assert string[4] == '/' and string[7] == '/'
self.__year = int(string[:4])
self.__month = int(string[5:7])
self.__day = int(string[8:])
elif length == 8:
self.__year = int(string[:4])
self.__month = int(string[4:6])
self.__day = int(string[6:])
else:
raise 'bad string'
elif self.__form == format.mdy:
if length == 8:
assert string[2] == '/' and string[5] == '/'
self.__year = int(string[6:])
self.__month = int(string[:2])
self.__day = int(string[3:5])
elif length == 6:
self.__year = int(string[4:])
self.__month = int(string[:2])
self.__day = int(string[2:4])
else:
raise 'bad string'
elif self.__form == format.ymd:
if length == 8:
assert string[2] == '/' and string[5] == '/'
self.__year = int(string[:2])
self.__month = int(string[3:5])
self.__day = int(string[6:])
elif length == 6:
self.__year = int(string[:2])
self.__month = int(string[2:4])
self.__day = int(string[4:])
else:
raise 'bad string'
else:
raise 'bad format'
def GetDate(self, form=None):
if form is None:
form = self.__form
if form == format.mdyy:
return str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2) + '/' + str(self.__year)[-4:].zfill(4)
elif form == format.yymd:
return str(self.__year)[-4:].zfill(4) + '/' + str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2)
elif form == format.mdy:
return str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2) + '/' + str(self.__year)[-2:].zfill(2)
elif form == format.ymd:
return str(self.__year)[-2:].zfill(2) + '/' + str(self.__month)[-2:].zfill(2) + '/' + str(self.__day)[-2:].zfill(2)
else:
raise 'bad format'
def GetDateShort(self):
return time.strftime('%a %b %d, %Y', time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y'))
def GetDateLong(self):
return time.strftime('%A %B %d, %Y', time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y'))
def GetDay(self):
return self.__day
def GetMonth(self):
return self.__month
def GetYear(self):
return self.__year
def GetDayOfWeek(self):
wday = time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y').tm_wday
wday += 1
if wday == 7:
return 0
return wday
def GetJulianDay(self):
return time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y').tm_yday
def IsValid(self):
try:
time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y')
return True
except:
return False
def AddDays(self, days):
temp = time.localtime(time.mktime(time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y')) + int(days) * (60 * 60 *24))
self.__year = temp.tm_year
self.__month = temp.tm_mon
self.__day = temp.tm_mday
return self
def AddYears(self, years):
self.__year += int(years)
return self
def AddMonths(self, months):
candidate_month = self.__month + int(months)
if 0 < candidate_month < 13:
self.__month = candidate_month
elif candidate_month > 12:
self.__year += candidate_month / 12
self.__month = ((candidate_month - 1) % 12) + 1
elif candidate_month < 1:
candidate_month = abs(candidate_month) + 1
self.__year -= candidate_month / 12
self.__month = 13 - (((candidate_month - 1) % 12) + 1)
else:
raise 'there is a problem if this runs'
return self
def SubtractDays(self, days):
return self.AddDays(-days)
def SubtractYears(self, years):
return self.AddYears(-years)
def SubtractMonths(self, months):
return self.AddMonths(-months)
def DateDiff(self, form, string):
temp = date(form, string)
now = self.__get_relative_day()
then = temp.__get_relative_day()
return int(abs(now - then))
def __get_relative_day(self):
return time.mktime(time.strptime(self.GetDate(format.mdyy), '%m/%d/%Y')) / (60 * 60 * 24)
def YearsOld(self):
temp = date(format.mdyy)
candidate_year = temp.GetYear() - self.GetYear()
if temp.GetMonth() - self.GetMonth() > 0:
return candidate_year
elif temp.GetMonth() - self.GetMonth() < 0:
return candidate_year - 1
else:
if self.GetDay() - temp.GetDay() <= 0:
return candidate_year
else:
return candidate_year - 1
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
b964c7d3ac0997d189160beb6397ce66674c1b0e | 1afa6c852dfc922d1a26a384d965976f31a87692 | /Interaction/Style/Testing/Python/TestStyleTrackballCamera.py | ec1a5770ffd8efdb2a48a09334f9989151a0a7a6 | [
"BSD-3-Clause"
] | permissive | dgobbi/VTK | 631d037aacc7258861e70f77c586b01cd4ebff3f | 17f232ee440025c26bc78a897edef78e9fc78510 | refs/heads/master | 2021-01-04T22:27:46.611907 | 2013-03-01T19:44:02 | 2013-03-01T19:44:02 | 938,377 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
# Run this test like so:
# vtkpython TestStyleTrackballCamera.py -D $VTK_DATA_ROOT \
# -B $VTK_DATA_ROOT/Baseline/Rendering
import sys
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
'''
Prevent .pyc files from being created.
Stops the vtk source being polluted
by .pyc files.
'''
sys.dont_write_bytecode = True
# Load base (spike and test)
import TestStyleBaseSpike
import TestStyleBase
class TestStyleTrackballCamera(vtk.test.Testing.vtkTest):
def testStyleTrackballCamera(self):
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
testStyleBaseSpike = TestStyleBaseSpike.StyleBaseSpike(ren, renWin, iRen)
# Set interactor style
inStyle = vtk.vtkInteractorStyleSwitch()
iRen.SetInteractorStyle(inStyle)
# Switch to Trackball+Actor mode
iRen.SetKeyEventInformation(0, 0, 't', 0, '0')
iRen.InvokeEvent("CharEvent")
iRen.SetKeyEventInformation(0, 0, 'c', 0, '0')
iRen.InvokeEvent("CharEvent")
# Test style
testStyleBase = TestStyleBase.TestStyleBase(ren)
testStyleBase.test_style(inStyle.GetCurrentStyle())
# render and interact with data
img_file = "TestStyleTrackballCamera.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestStyleTrackballCamera, 'test')])
| [
"nikhil.shetty@kitware.com"
] | nikhil.shetty@kitware.com |
9e75dc1d0187593e791de4ce6185ce127d5bea53 | 221e8d2c7f8f0044e2884d3c1a1327d49ca50356 | /hackbright_web.py | 2a52d90b7672a1ebd04e39d8bbce1684a8f89873 | [] | no_license | lakeeja/HBF_wk4_project_tracker_flask | d8411cfc12741c6662badc32cc8e2bcec399958a | a45669b5389051f8fd68d5fb8fd5865a5a059880 | refs/heads/master | 2021-03-19T07:39:32.490693 | 2017-06-14T20:09:32 | 2017-06-14T20:09:32 | 94,369,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | """A web application for tracking projects, students, and student grades."""
from flask import Flask, request, render_template
import hackbright
app = Flask(__name__)
@app.route("/student")
def get_student():
"""Show information about a student."""
github = request.args.get('github')
first, last, github = hackbright.get_student_by_github(github)
project = hackbright.get_grades_by_github(github)
print project
html = render_template("student_info.html",
first=first,
last=last,
github=github,
project=project)
return html
@app.route("/student-search")
def get_student_form():
"""Show form for searching for a student."""
return render_template('student_search.html')
@app.route("/student-add", methods=['POST'])
def student_add():
"""Add a student."""
github = request.form.get("github")
first_name = request.form.get("first_name")
last_name = request.form.get("last_name")
hackbright.make_new_student(github, first_name, last_name)
return "made it to student add whew"
@app.route('/project')
def project_info():
""" list info about a project """
title = request.args.get('title')
title, description, max_grade = hackbright.get_project_by_title(title)
projects = render_template("project_info.html",
title=title,
description=description,
max_grade=max_grade)
return projects #incomplete
if __name__ == "__main__":
hackbright.connect_to_db(app)
app.run(debug=True)
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
3f0006a29d6d15d213266f9961b4c7a39e3f2c85 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v7_2_0/rbridge_id/interface/loopback/ip/interface_loopback_ospf_conf/ospf_interface_config/authentication_key/__init__.py | 51c1547f8a985cc0cfecdf38f457216cf1c46283 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,866 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import no_encrypt_auth_key_table
import auth_key_table
class authentication_key(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/interface/loopback/ip/interface-loopback-ospf-conf/ospf-interface-config/authentication-key. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__no_encrypt_auth_key_table','__auth_key_table',)
_yang_name = 'authentication-key'
_rest_name = 'authentication-key'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__no_encrypt_auth_key_table = YANGDynClass(base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
self.__auth_key_table = YANGDynClass(base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'interface', u'loopback', u'ip', u'interface-loopback-ospf-conf', u'ospf-interface-config', u'authentication-key']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'interface', u'Loopback', u'ip', u'ospf', u'authentication-key']
def _get_no_encrypt_auth_key_table(self):
"""
Getter method for no_encrypt_auth_key_table, mapped from YANG variable /rbridge_id/interface/loopback/ip/interface_loopback_ospf_conf/ospf_interface_config/authentication_key/no_encrypt_auth_key_table (container)
"""
return self.__no_encrypt_auth_key_table
def _set_no_encrypt_auth_key_table(self, v, load=False):
"""
Setter method for no_encrypt_auth_key_table, mapped from YANG variable /rbridge_id/interface/loopback/ip/interface_loopback_ospf_conf/ospf_interface_config/authentication_key/no_encrypt_auth_key_table (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_no_encrypt_auth_key_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_no_encrypt_auth_key_table() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """no_encrypt_auth_key_table must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__no_encrypt_auth_key_table = t
if hasattr(self, '_set'):
self._set()
def _unset_no_encrypt_auth_key_table(self):
self.__no_encrypt_auth_key_table = YANGDynClass(base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
def _get_auth_key_table(self):
"""
Getter method for auth_key_table, mapped from YANG variable /rbridge_id/interface/loopback/ip/interface_loopback_ospf_conf/ospf_interface_config/authentication_key/auth_key_table (container)
"""
return self.__auth_key_table
def _set_auth_key_table(self, v, load=False):
"""
Setter method for auth_key_table, mapped from YANG variable /rbridge_id/interface/loopback/ip/interface_loopback_ospf_conf/ospf_interface_config/authentication_key/auth_key_table (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_key_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_key_table() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_key_table must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__auth_key_table = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_key_table(self):
self.__auth_key_table = YANGDynClass(base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
no_encrypt_auth_key_table = __builtin__.property(_get_no_encrypt_auth_key_table, _set_no_encrypt_auth_key_table)
auth_key_table = __builtin__.property(_get_auth_key_table, _set_auth_key_table)
_pyangbind_elements = {'no_encrypt_auth_key_table': no_encrypt_auth_key_table, 'auth_key_table': auth_key_table, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
b8aa11e2fbe1bcf8389ab5e53c2fbb4cea60c170 | c857d225b50c5040e132d8c3a24005a689ee9ce4 | /problem350.py | 9e621c086dc23d4c33e491b4fc312ed60a93f07d | [] | no_license | pythonsnake/project-euler | 0e60a6bd2abeb5bf863110c2a551d5590c03201e | 456e4ef5407d2cf021172bc9ecfc2206289ba8c9 | refs/heads/master | 2021-01-25T10:44:27.876962 | 2011-10-21T00:46:02 | 2011-10-21T00:46:02 | 2,335,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | """
A list of size n is a sequence of n natural numbers. examples are (2,4,6), (2,6,4), (10,6,15,6), and (11).
the greatest common divisor, or gcd, of a list is the largest natural number that divides all entries of the list. examples: gcd(2,6,4) = 2, gcd(10,6,15,6) = 1 and gcd(11) = 11.
the least common multiple, or lcm, of a list is the smallest natural number divisible by each entry of the list. examples: lcm(2,6,4) = 12, lcm(10,6,15,6) = 30 and lcm(11) = 11.
let f(g, l, n) be the number of lists of size n with gcd g and lcm l. for example:
f(10, 100, 1) = 91.
f(10, 100, 2) = 327.
f(10, 100, 3) = 1135.
f(10, 100, 1000) mod 1014 = 3286053.
find f(106, 1012, 1018) mod 1014.
""" | [
"pythonsnake98@gmail.com"
] | pythonsnake98@gmail.com |
21a873fd59c00a1d52720a6c3195a0abdef5efd0 | faf2852a357a2e077d0e7f0a28055c250f5edcd0 | /myshop/authapp/migrations/0002_authapp_user.py | 7f5e3935c03ee2acb2f53026a1448723437e08b6 | [] | no_license | Pegorino82/GUDjangoProject_2 | dc68ff9d21afb56ce29f3c5fe4672c5f79633196 | 579189b576af3bdcd98927d054030b7a2ebc9f46 | refs/heads/master | 2020-04-07T20:13:25.701650 | 2019-01-17T12:01:21 | 2019-01-17T12:01:21 | 158,679,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # Generated by Django 2.1.4 on 2018-12-19 20:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('authapp', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='authapp',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"eshkryabin@yandex.ru"
] | eshkryabin@yandex.ru |
fbccffcd8524d8b244b026b2bed39e87748354d3 | 1dead366d7d1152a2dd1c7dd5f2ab91a4119310a | /GMOOC/GMOOC/settings.py | 602b234767d99af5cf8c0179a8ebc850bd52fd7b | [] | no_license | SmallSir/Python-and-Xadmin | 6b95bcb401c1f4ca232d3c77bb6d883467e5ea7f | fe29cb24d3a047f9a97319d4e33f07ad0e1562e3 | refs/heads/master | 2020-04-09T07:42:29.380396 | 2018-12-03T09:45:13 | 2018-12-03T09:45:13 | 160,167,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | """
Django settings for GMOOC project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9=k8^e+l@w&1w#s!_9be1zjf41_sty25p_sll#%tyxau$%sqi+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTHENTICATION_BACKENDS = (
'users.views.CustomBackend',
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'courses',
'organization',
'operation',
'xadmin',
'crispy_forms',
'captcha'
]
AUTH_USER_MODEL = 'users.UserFile'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GMOOC.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GMOOC.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'PASSWORD':'qiu961030.',
'HOST':'localhost',
'USER':'root',
'NAME':'gmooc',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
) | [
"280690956@qq.com"
] | 280690956@qq.com |
993e9beb0901f600bbe3c35feaa5c1c4eaacb5cc | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/cloud/amazon/cloudtrail.py | 941233d6df89f0731513aa74d67f407526dbc35d | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 23,719 | py | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudtrail
short_description: manage CloudTrail create, delete, update
description:
- Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- "Ansible Core Team"
- "Ted Timmons"
- "Daniel Shepherd (@shepdelacreme)"
requirements:
- boto3
- botocore
options:
state:
description:
- Add or remove CloudTrail configuration.
- The following states have been preserved for backwards compatibility. C(state=enabled) and C(state=disabled).
- enabled=present and disabled=absent.
required: true
choices: ['present', 'absent', 'enabled', 'disabled']
name:
description:
- Name for the CloudTrail.
- Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account.
required: true
enable_logging:
description:
- Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
default: true
version_added: "2.4"
s3_bucket_name:
description:
- An existing S3 bucket where CloudTrail will deliver log files.
- This bucket should exist and have the proper policy.
- See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- Required when C(state=present)
version_added: "2.4"
s3_key_prefix:
description:
- S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
is_multi_region_trail:
description:
- Specify whether the trail belongs only to one region or exists in all regions.
default: false
version_added: "2.4"
enable_log_file_validation:
description:
- Specifies whether log file integrity validation is enabled.
- CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
version_added: "2.4"
aliases: [ "log_file_validation_enabled" ]
include_global_events:
description:
- Record API calls from global services such as IAM and STS.
default: true
aliases: [ "include_global_service_events" ]
sns_topic_name:
description:
- SNS Topic name to send notifications to when a log file is delivered
version_added: "2.4"
cloudwatch_logs_role_arn:
description:
- Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html)
- Required when C(cloudwatch_logs_log_group_arn)
version_added: "2.4"
cloudwatch_logs_log_group_arn:
description:
- A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html)
- Required when C(cloudwatch_logs_role_arn)
version_added: "2.4"
kms_key_id:
description:
- Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
- The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html)
version_added: "2.4"
tags:
description:
- A hash/dictionary of tags to be applied to the CloudTrail resource.
- Remove completely or specify an empty dictionary to remove all tags.
default: {}
version_added: "2.4"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: create single region cloudtrail
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: create multi-region trail with validation and tags
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
kms_key_id: "alias/MyAliasName"
tags:
environment: dev
Name: default
- name: show another valid kms_key_id
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
# simply "12345678-1234-1234-1234-123456789012" would be valid too.
- name: pause logging the trail we just created
cloudtrail:
state: present
name: default
enable_logging: false
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: delete a trail
cloudtrail:
state: absent
name: default
'''
RETURN = '''
exists:
description: whether the resource exists
returned: always
type: bool
sample: true
trail:
description: CloudTrail resource details
returned: always
type: complex
sample: hash/dictionary of values
contains:
trail_arn:
description: Full ARN of the CloudTrail resource
returned: success
type: string
sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
name:
description: Name of the CloudTrail resource
returned: success
type: string
sample: default
is_logging:
description: Whether logging is turned on or paused for the Trail
returned: success
type: bool
sample: True
s3_bucket_name:
description: S3 bucket name where log files are delivered
returned: success
type: string
sample: myBucket
s3_key_prefix:
description: Key prefix in bucket where log files are delivered (if any)
returned: success when present
type: string
sample: myKeyPrefix
log_file_validation_enabled:
description: Whether log file validation is enabled on the trail
returned: success
type: bool
sample: true
include_global_service_events:
description: Whether global services (IAM, STS) are logged with this trail
returned: success
type: bool
sample: true
is_multi_region_trail:
description: Whether the trail applies to all regions or just one
returned: success
type: bool
sample: true
has_custom_event_selectors:
description: Whether any custom event selectors are used for this trail.
returned: success
type: bool
sample: False
home_region:
description: The home region where the trail was originally created and must be edited.
returned: success
type: string
sample: us-east-1
sns_topic_name:
description: The SNS topic name where log delivery notifications are sent.
returned: success when present
type: string
sample: myTopic
sns_topic_arn:
description: Full ARN of the SNS topic where log delivery notifications are sent.
returned: success when present
type: string
sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
cloud_watch_logs_log_group_arn:
description: Full ARN of the CloudWatch Logs log group where events are delivered.
returned: success when present
type: string
sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
cloud_watch_logs_role_arn:
description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
returned: success when present
type: string
sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
kms_key_id:
description: Full ARN of the KMS Key used to encrypt log files.
returned: success when present
type: string
sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
tags:
description: hash/dictionary of tags applied to this resource
returned: success
type: dict
sample: {'environment': 'dev', 'Name': 'default'}
'''
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
# Handled in main() by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info,
HAS_BOTO3, ansible_dict_to_boto3_tag_list,
boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict)
def create_trail(module, client, ct_params):
"""
Creates a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to create
"""
resp = {}
try:
resp = client.create_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return resp
def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
"""
Creates, updates, removes tags on a CloudTrail resource
module : AnsibleModule object
client : boto3 client connection object
tags : Dict of tags converted from ansible_dict to boto3 list of dicts
trail_arn : The ARN of the CloudTrail to operate on
curr_tags : Dict of the current tags on resource, if any
dry_run : true/false to determine if changes will be made if needed
"""
adds = []
removes = []
updates = []
changed = False
if curr_tags is None:
# No current tags so just convert all to a tag list
adds = ansible_dict_to_boto3_tag_list(tags)
else:
curr_keys = set(curr_tags.keys())
new_keys = set(tags.keys())
add_keys = new_keys - curr_keys
remove_keys = curr_keys - new_keys
update_keys = dict()
for k in curr_keys.intersection(new_keys):
if curr_tags[k] != tags[k]:
update_keys.update({k: tags[k]})
adds = get_tag_list(add_keys, tags)
removes = get_tag_list(remove_keys, curr_tags)
updates = get_tag_list(update_keys, tags)
if removes or updates:
changed = True
if not dry_run:
try:
client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
if updates or adds:
changed = True
if not dry_run:
try:
client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return changed
def get_tag_list(keys, tags):
"""
Returns a list of dicts with tags to act on
keys : set of keys to get the values for
tags : the dict of tags to turn into a list
"""
tag_list = []
for k in keys:
tag_list.append({'Key': k, 'Value': tags[k]})
return tag_list
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
module : AnsibleModule object
client : boto3 client connection object
name : The name or ARN of the CloudTrail to operate on
action : start or stop
"""
if action == 'start':
try:
client.start_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
elif action == 'stop':
try:
client.stop_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
else:
module.fail_json(msg="Unsupported logging action")
def get_trail_facts(module, client, name):
"""
Describes existing trail in an account
module : AnsibleModule object
client : boto3 client connection object
name : Name of the trail
"""
# get Trail info
try:
trail_resp = client.describe_trails(trailNameList=[name])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Now check to see if our trail exists and get status and tags
if len(trail_resp['trailList']):
trail = trail_resp['trailList'][0]
try:
status_resp = client.get_trail_status(Name=trail['Name'])
tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
trail['IsLogging'] = status_resp['IsLogging']
trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
# Check for non-existent values and populate with None
optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
for v in optional_vals - set(trail.keys()):
trail[v] = None
return trail
else:
# trail doesn't exist return None
return None
def delete_trail(module, client, trail_arn):
"""
Delete a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
trail_arn : Full CloudTrail ARN
"""
try:
client.delete_trail(Name=trail_arn)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def update_trail(module, client, ct_params):
"""
Delete a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to update
"""
try:
client.update_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
name=dict(default='default'),
enable_logging=dict(default=True, type='bool'),
s3_bucket_name=dict(),
s3_key_prefix=dict(),
sns_topic_name=dict(),
is_multi_region_trail=dict(default=False, type='bool'),
enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
cloudwatch_logs_role_arn=dict(),
cloudwatch_logs_log_group_arn=dict(),
kms_key_id=dict(),
tags=dict(default={}, type='dict'),
))
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
# collect parameters
if module.params['state'] in ('present', 'enabled'):
state = 'present'
elif module.params['state'] in ('absent', 'disabled'):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(
Name=module.params['name'],
S3BucketName=module.params['s3_bucket_name'],
IncludeGlobalServiceEvents=module.params['include_global_events'],
IsMultiRegionTrail=module.params['is_multi_region_trail'],
)
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['enable_log_file_validation'] is not None:
ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
results = dict(
changed=False,
exists=False
)
# Get existing trail facts
trail = get_trail_facts(module, client, ct_params['Name'])
# If the trail exists set the result exists variable
if trail is not None:
results['exists'] = True
if state == 'absent' and results['exists']:
# If Trail exists go ahead and delete
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if not module.check_mode:
delete_trail(module, client, trail['TrailARN'])
elif state == 'present' and results['exists']:
# If Trail exists see if we need to update it
do_update = False
for key in ct_params:
tkey = str(key)
# boto3 has inconsistent parameter naming so we handle it here
if key == 'EnableLogFileValidation':
tkey = 'LogFileValidationEnabled'
# We need to make an empty string equal None
if ct_params.get(key) == '':
val = None
else:
val = ct_params.get(key)
if val != trail.get(tkey):
do_update = True
results['changed'] = True
# If we are in check mode copy the changed values to the trail facts in result output to show what would change.
if module.check_mode:
trail.update({tkey: ct_params.get(key)})
if not module.check_mode and do_update:
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
# Check if we need to start/stop logging
if enable_logging and not trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = True
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = False
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Check if we need to update tags on resource
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
elif state == 'present' and not results['exists']:
# Trail doesn't exist just go create it
results['changed'] = True
if not module.check_mode:
# If we aren't in check_mode then actually create it
created_trail = create_trail(module, client, ct_params)
# Apply tags
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
# Get the trail status
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Set the logging state for the trail to desired value
if enable_logging and not status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Get facts for newly created Trail
trail = get_trail_facts(module, client, ct_params['Name'])
# If we are in check mode create a fake return structure for the newly minted trail
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
acct_id = sts_client.get_caller_identity()['Account']
except ClientError:
pass
trail = dict()
trail.update(ct_params)
if 'EnableLogFileValidation' not in ct_params:
ct_params['EnableLogFileValidation'] = False
trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
b9f99c6b7e9712d01382336f8494ecf25633cb41 | 53b529e8edf046971db0ef4a740520e0b3e60699 | /.history/recipebox/models_20200202160352.py | fafdd926b9e070ad125ea70f80232e6b1b526195 | [] | no_license | EnriqueGalindo/recipebox | 6b6662e517ac045a23cd43aaf296c83cf61608b2 | ace7b2699db8be20568ced7062dc85ae92aa2eee | refs/heads/master | 2020-12-28T00:46:09.079202 | 2020-02-04T04:44:35 | 2020-02-04T04:44:35 | 238,124,659 | 0 | 1 | null | 2020-04-04T21:40:24 | 2020-02-04T04:42:05 | Python | UTF-8 | Python | false | false | 300 | py | from django.db import models
class Author(models.Model)
name = models.CharField(max_length=30)
bio = models.TextField
class Recipe(models.Model)
title = models.CharField(max_length=30)
author = models.ForeignKey('Author', on_delete=models.CASCADE)
description = models.TextField | [
"egalindo@protonmail.com"
] | egalindo@protonmail.com |
9c91b7b8e91ddc4fc9938f0c66b1f9450885d3b0 | 82090f948cce1bf26c0cc25da58e7739d7e9c624 | /core/models.py | 0490988914eb2b8e0dcbc3b8b2dbc82c7ccf8d9b | [] | no_license | Ehsan-63/django-qa | 4e98e9ee2bcc96238829e6a515cfdd1ed2733d61 | e2f467aeba21e082d0c2761e05e0cdc31bd7bf22 | refs/heads/master | 2022-11-27T21:42:03.531635 | 2020-07-29T15:57:32 | 2020-07-29T15:57:32 | 284,274,854 | 0 | 0 | null | 2020-08-01T14:23:12 | 2020-08-01T14:23:12 | null | UTF-8 | Python | false | false | 935 | py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Question(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='questions')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
closed = models.BooleanField(default=False)
def __str__(self):
return f'{self.user} - {self.title[:20]}'
def get_absolute_url(self):
return reverse('core:detail', args=[self.id, self.slug])
class Answer(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='answers')
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='answers')
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.user} - {self.question.title[:20]}'
| [
"amirbig44@gmail.com"
] | amirbig44@gmail.com |
2dea73bc2e8fdb0faa848365e1edfb79b68c3fc1 | ebdd2dca7e1e0a9a5617e5231d72db634f1f32e1 | /myproject/settings.py | 0016fee7b854e2fb22ccd63126151dfa5d767081 | [] | no_license | craigds/django_25454 | adf3d4903a400a980066ccc1e39145a587045ea9 | 046603d9a24d1cc87a02af3712332560adf57f91 | refs/heads/master | 2021-01-10T13:17:38.884649 | 2019-01-06T20:40:57 | 2019-01-06T20:40:57 | 46,089,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,763 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qiw64(b7pq7xccl&ej_15k#yy0)^w@-(brkp)bkaii9%(gjb!x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'myapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mydatabase',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| [
"craig.destigter@koordinates.com"
] | craig.destigter@koordinates.com |
76f66d757b47e9b8fd951a8d9ffb16d55d1a1c70 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /n4JA3je7FEFfZKaWp_22.py | 97d9d5e1b036a363bf62e5a79d65b41da93f716f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
def million_in_month(first_month, multiplier):
month = 1
total_earnings = 0
while total_earnings < 10**6:
first_month *= multiplier
total_earnings += first_month
month += 1
return month
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4e14cfd9cbd068bcaa900c144725772e638d912d | 3be8da1d39bef1e09e4c8e7a6b736d7fc74a3c0f | /webserver/opentrain/common/static/common/ot_i18n.py | 220f6e9b537c255d90f967abf1f980bd02e07611 | [
"BSD-3-Clause"
] | permissive | amitzini/OpenTrain | bbe5b2fc1b1b118931f7aac94667083c1b5cf4da | 25ff81df668a9eba1c4369f9a789e34c60b44096 | refs/heads/master | 2020-04-01T22:36:01.131143 | 2014-10-27T22:07:40 | 2014-10-27T22:07:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | from django.utils.translation import ugettext_noop
ugettext_noop("Search In")
ugettext_noop("Device Reports")
ugettext_noop("Live Trains")
ugettext_noop("Report Details")
ugettext_noop("Distances")
ugettext_noop("Akko")
ugettext_noop("Modiin")
ugettext_noop("Modiin Center")
ugettext_noop("Kiryat Hayyim")
ugettext_noop("Kiryat Motzkin")
ugettext_noop("Leb Hmifratz")
ugettext_noop("Hutsot HaMifrats")
ugettext_noop("Akko")
ugettext_noop("Nahariyya")
ugettext_noop("Haifa Center HaShmona")
ugettext_noop("Haifa Bat Gallim")
ugettext_noop("Haifa Hof HaKarmel (Razi'el)")
ugettext_noop("Atlit")
ugettext_noop("Binyamina")
ugettext_noop("Kesariyya - Pardes Hanna")
ugettext_noop("Hadera West")
ugettext_noop("Natanya")
ugettext_noop("Bet Yehoshua")
ugettext_noop("Herzliyya")
ugettext_noop("Tel Aviv - University")
ugettext_noop("Tel Aviv Center - Savidor")
ugettext_noop("Bne Brak")
ugettext_noop("Petah Tikva Kiryat Arye")
ugettext_noop("Petah Tikva Sgulla")
ugettext_noop("Tel Aviv HaShalom")
ugettext_noop("Holon Junction")
ugettext_noop("Holon - Wolfson")
ugettext_noop("Bat Yam - Yoseftal")
ugettext_noop("Bat Yam - Komemiyyut")
ugettext_noop("Kfar Habbad")
ugettext_noop("Tel Aviv HaHagana")
ugettext_noop("Lod")
ugettext_noop("Ramla")
ugettext_noop("Ganey Aviv")
ugettext_noop("Rehovot E. Hadar")
ugettext_noop("Be'er Ya'akov")
ugettext_noop("Yavne")
ugettext_noop("Ashdod Ad Halom")
ugettext_noop("Ashkelon")
ugettext_noop("Bet Shemesh")
ugettext_noop("Jerusalem Biblical Zoo")
ugettext_noop("Jerusalem Malha")
ugettext_noop("Kiryat Gat")
ugettext_noop("Be'er Sheva North University")
ugettext_noop("Be'er Sheva Center")
ugettext_noop("Dimona")
ugettext_noop("Lehavim - Rahat")
ugettext_noop("Ben Gurion Airport")
ugettext_noop("Kfar Sava")
ugettext_noop("Rosh Ha'Ayin North")
ugettext_noop("Yavne - West")
ugettext_noop("Rishon LeTsiyyon HaRishonim")
ugettext_noop("Hod HaSharon")
ugettext_noop("Sderot")
ugettext_noop("Rishon LeTsiyyon - Moshe Dayan")
# routes
ugettext_noop("Tel Aviv Center - Rishon LeTsiyyon HaRishonim")
ugettext_noop("Nahariyya - Modiin Center")
ugettext_noop("Nahariyya - Be'er Sheva Center")
ugettext_noop("Binyamina - Ashkelon")
ugettext_noop("Nahariyya - Ben Gurion Airport -Be'er Sheva Center")
ugettext_noop("Kiryat Motzkin - Haifa Hof HaKarmel (Razi'el)")
ugettext_noop("Tel Aviv Center - Savidor - Jerusalem Malha")
ugettext_noop("Be'er Sheva North University - Dimona")
ugettext_noop("Hod HaSharon - Ashkelon")
ugettext_noop("Hertsliyya - Be'er Sheva Center")
# days
ugettext_noop("Sunday")
ugettext_noop("Monday")
ugettext_noop("Tuesday")
ugettext_noop("Wendesay")
ugettext_noop("Thursday")
ugettext_noop("Friday")
ugettext_noop("Saturday")
ugettext_noop("Stop")
ugettext_noop("Arrival")
ugettext_noop("Departure")
ugettext_noop("Live")
ugettext_noop('Live Trains');
ugettext_noop('Simulated');
ugettext_noop('WIP');
ugettext_noop('No Trips Now');
ugettext_noop('Current Trains List')
ugettext_noop("Total # of reports (with loc)")
ugettext_noop("to")
ugettext_noop("on")
ugettext_noop("Search Reports")
ugettext_noop("Go Live")
ugettext_noop("Stop Live")
ugettext_noop("auto zoom")
ugettext_noop("Stops Only")
ugettext_noop("All Reports")
ugettext_noop("Please wait. Loading Reports, will take some time...")
ugettext_noop("Map for device id")
ugettext_noop("Total # of reports (with loc)")
ugettext_noop('cur')
ugettext_noop('exp')
| [
"ekeydar@gmail.com"
] | ekeydar@gmail.com |
32d121133bb107b2caf9971092b1b2fbaff65cc6 | 38ee4430af92b52230a79a6965c03ae3f2375bf4 | /setup.py | 357fc8235e22e0ef1714b73e14cf517462aff641 | [
"MIT"
] | permissive | dannguyen/poppler_wrap | 78df3dc391411b8770c6d37e647582e49a7a2343 | abd23dbd273e10cb34ceeb4b6a078faa525c3bb4 | refs/heads/master | 2020-03-17T00:06:03.328034 | 2018-05-12T04:16:13 | 2018-05-12T04:16:13 | 133,103,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=6.0', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Dan Nguyen",
author_email='dansonguyen@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
description="Python wrapper around my favorite poppler functions for working with PDFs",
entry_points={
'console_scripts': [
'poppler_wrap=poppler_wrap.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='poppler_wrap',
name='poppler_wrap',
packages=find_packages(include=['poppler_wrap']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/dannguyen/poppler_wrap',
version='0.1.0',
zip_safe=False,
)
| [
"dansonguyen@gmail.com"
] | dansonguyen@gmail.com |
35915cf212498c6065c0b5e3a79edb5030459d37 | b09a8df80c35e3ccca43cd74cec6e1a14db76ad7 | /user_import/views.py | 2282c2db1b9624c1c3db81b4feb4e259794dff40 | [
"MIT"
] | permissive | ofa/everyvoter | 79fd6cecb78759f5e9c35ba660c3a5be99336556 | 3af6bc9f3ff4e5dfdbb118209e877379428bc06c | refs/heads/master | 2021-06-24T19:38:25.256578 | 2019-07-02T10:40:57 | 2019-07-02T10:40:57 | 86,486,195 | 7 | 3 | MIT | 2018-12-03T19:52:20 | 2017-03-28T17:07:15 | Python | UTF-8 | Python | false | false | 3,269 | py | """Views for Import App"""
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponse
from django.views.generic import CreateView, DetailView
from django.urls import reverse_lazy
from django_filters.views import FilterView
import unicodecsv
from manage.mixins import ManageViewMixin
from branding.mixins import OrganizationViewMixin, OrganizationCreateViewMixin
from everyvoter_common.utils.slug import slugify_header
from everyvoter_common.utils.uuid_slug_mixin import UUIDSlugMixin
from user_import.models import UserImport
from user_import.tasks import ingest_import
from user_import.forms import UserImportForm
from user_import.filters import UserImportFilter
class ImportListView(OrganizationViewMixin, ManageViewMixin, FilterView):
"""List all imports"""
model = UserImport
template_name = "user_import/list_imports.html"
paginate_by = 15
context_object_name = 'imports'
filterset_class = UserImportFilter
class ImportCreateView(OrganizationViewMixin, ManageViewMixin,
SuccessMessageMixin, OrganizationCreateViewMixin,
CreateView):
"""Create a new import"""
model = UserImport
form_class = UserImportForm
template_name = "user_import/create_import.html"
success_url = reverse_lazy('manage:user_import:list_imports')
success_message = "Import %(name)s was started"
def form_valid(self, form):
"""Handle a valid form"""
form.instance.uploader = self.request.user
form.instance.status = 'pending'
response = super(ImportCreateView, self).form_valid(form)
ingest_import.delay(self.object.pk)
return response
class ImportErrorCSVView(OrganizationViewMixin, ManageViewMixin,
UUIDSlugMixin, DetailView):
"""Download errors from a specific import"""
model = UserImport
slug_field = 'uuid'
def render_to_response(self, context, **response_kwargs):
"""Render to response"""
response = HttpResponse(content_type='text/csv')
# pylint: disable=line-too-long
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(
slugify_header(self.object.name))
import_record_statuses = self.object.importrecordstatus_set.filter(
status='failed').select_related('import_record')
field_names = [
'status',
'error_type',
'first_name',
'last_name',
'email',
'address',
'note'
]
writer = unicodecsv.DictWriter(response, fieldnames=field_names)
writer.writeheader()
for import_record_status in import_record_statuses:
import_record = import_record_status.import_record
row = {
'status': import_record_status.status,
'error_type': import_record_status.error_type,
'note': import_record_status.note,
'first_name': import_record.first_name,
'last_name': import_record.last_name,
'email': import_record.email,
'address': import_record.address
}
writer.writerow(row)
return response
| [
"nickcatal@gmail.com"
] | nickcatal@gmail.com |
31b0b4a0f36dc1c645ae3945bbe72e3193013840 | d7ad696cd1b550bb41d20f87b83c984ec7f19aa7 | /atcoder/python/_old/educational_dp/01/f_lcs.py | daed11e02ec4f5622ab55e37e0c7107af0ef7faf | [] | no_license | mida-hub/hobby | 2947d10da7964d945e63d57b549c1dcb90ef7305 | 6e6f381e59fc2b0429fab36474d867aa3855af77 | refs/heads/master | 2022-12-21T23:33:14.857931 | 2022-12-19T16:30:34 | 2022-12-19T16:30:34 | 147,890,434 | 0 | 0 | null | 2021-03-20T04:31:58 | 2018-09-08T01:31:59 | Jupyter Notebook | UTF-8 | Python | false | false | 861 | py | s = input()
t = input()
len_s = len(s)
len_t = len(t)
dp = [[0] * (len_t + 1) for x in range(len_s + 1)]
# print(dp)
dp[0][0] = 0
for i in range(0, len_s + 1):
for j in range(0, len_t + 1):
if i > 0 and j > 0:
if s[i-1] == t[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max([dp[i-1][j], dp[i][j-1]])
for j, d in enumerate(dp):
print(d[:][:])
# print(dp[len_s][len_t])
dp_len = dp[len_s][len_t]
ans = ''
dp_len -= 1
len_s -= 1
len_t -= 1
while dp_len >= 0:
# print(f'dp_len:{dp_len}')
# print(f'len_s:{len_s}')
# print(f'len_t:{len_t}')
if s[len_s] == t[len_t]:
ans = s[len_s] + ans
dp_len -= 1
len_s -= 1
len_t -= 1
elif dp[len_s][len_t] == dp[len_s-1][len_t]:
len_s -= 1
else:
len_t -= 1
print(ans)
| [
"rusuden0106@gmail.com"
] | rusuden0106@gmail.com |
a66f5a08fb150733ba6bca0867715ea027d57c3c | 4cb9b7ddc5df9e528ce6b36ab13f8c842d8c0cfa | /vistrails/packages/URL/https.py | ce53bcfbb2b4be9f8b97751bc4c68bcc2bd50dbc | [
"BSD-3-Clause"
] | permissive | anukat2015/VisTrails | bca4f812ffe9e69a1aa4174267a4225f1245638f | c24e310bf62cc0151e084aa4f9e50026e788afbd | refs/heads/master | 2021-01-18T15:42:35.910689 | 2015-11-04T16:15:19 | 2015-11-05T18:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Python's handling of certificate verification is irresponsible and wrong.
# Having to include the code below to get what should be the only acceptable
# default behavior is a shame
# Code from https://gist.github.com/schlamar/2993700
from __future__ import division
import httplib
import urllib2
import ssl
import certifi
from backports.ssl_match_hostname import match_hostname
__all__ = ['VerifiedHTTPSHandler', 'https_handler', 'build_opener']
class CertValidatingHTTPSConnection(httplib.HTTPConnection):
default_port = httplib.HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
ca_certs=None, strict=None, **kwargs):
httplib.HTTPConnection.__init__(self, host, port, strict, **kwargs)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock = ssl.wrap_socket(self.sock, keyfile=self.key_file,
certfile=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
match_hostname(cert, hostname)
class VerifiedHTTPSHandler(urllib2.HTTPSHandler):
def __init__(self, **kwargs):
urllib2.HTTPSHandler.__init__(self)
self._connection_args = kwargs
def https_open(self, req):
def http_class_wrapper(host, **kwargs):
full_kwargs = dict(self._connection_args)
full_kwargs.update(kwargs)
return CertValidatingHTTPSConnection(host, **full_kwargs)
return self.do_open(http_class_wrapper, req)
https_handler = VerifiedHTTPSHandler(ca_certs=certifi.where())
def build_opener(*handlers, **kwargs):
# Keyword-only argument 'insecure'
insecure = kwargs.pop('insecure', False)
if kwargs:
raise TypeError("build_opener() got unexpected keyword argument %r" %
next(iter(kwargs)))
if not insecure:
handlers = handlers + (https_handler,)
handlers = handlers + (urllib2.ProxyHandler(),)
return urllib2.build_opener(*handlers)
| [
"remirampin@gmail.com"
] | remirampin@gmail.com |
ae779f9bd683ed5f93e9a47dcc1000e53c0dc0b7 | ea522b496372174216fba2aad29bf231a28cc819 | /QuikLab/trunk/.metadata/.plugins/org.eclipse.core.resources/.history/69/a0b8d3f1e48f00181164c0c8bfbc6595 | 0f9acdf33db2a7fb37006af4de7657f023ec3da1 | [] | no_license | gatdotZF/svn192.168.1.5 | 15fdf074042b0a1e5e8c795d3c340080c04ba496 | 196bd4c83b12e3476c269057a4ef3b730a967d9e | refs/heads/master | 2020-07-26T17:45:16.259037 | 2019-09-16T06:36:35 | 2019-09-16T06:36:35 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 7,738 | #! /usr/bin/env python
#coding=GB18030
from pywinauto import application
import pywinauto.base_wrapper as ba
import SendKeys
import time
import os
import pywinauto.mouse as mouse
class Pywin(object):
def __init__(self):
self.app = application.Application(backend='uia')
def start(self,tl_dir,tl_name):
os.chdir(tl_dir)
self.app.start(tl_name)
def connect(self, window_name):
self.app.connect(title = window_name)
time.sleep(1)
def pr(self, window_name):
self.app[window_name].print_control_identifiers()
def close(self, window_name):
self.app[window_name].Close()
time.sleep(1)
def max_window(self, window_name):
self.app[window_name].Maximize()
time.sleep(1)
def menu_click(self, window_name, menulist):
self.app[window_name].MenuSelect(menulist)
time.sleep(1)
def input(self, window_name, controller, content):
self.app[window_name][controller].type_keys(content)
time.sleep(1)
def click(self, window_name, controller):
self.app[window_name][controller].click_input()
time.sleep(1)
def right_click(self, window_name, controller):
self.app[window_name][controller].right_click_input()
def double_click(self, window_name, controller, x ,y):
self.app[window_name][controller].double_click_input(button = "left",coords = (x, y))
time.sleep(1)
def focus(self,window_name,controller):
self.app[window_name][controller].set_focus()
def drag(self,window_name,controller,dx,dy,sx,sy):
self.app[window_name][controller].drag_mouse_input(dst=(dx,dy),src=(sx,sy),button='left',pressed='',absolute=True)
def Sendk(self,key_name,times):
SendKeys.SendKeys('{%s %d}'%(key_name,times))
if __name__ == "__main__":
app=Pywin()
# # tl_dir = r'D:\Program Files\QuiKLab3.0'
# # tl_name = r'D:\Program Files\QuiKLab3.0\MainApp.exe'
#
# # app.start(tl_dir,tl_name)
# # time.sleep(2)
# # window_name =u'登录--试验自动测试管理系统'
window_name = r'QuiKLab V3.0'
time.sleep(2)
# # window_name = u'图形监控'
dlg=app.connect(window_name)
# time.sleep(2)
# controller="Button17"
#app.focus(window_name, controller)
# app.Sendk('TAB')
# time.sleep(2)
# app.input(window_name,controller,123)
# #
# app.click(window_name,'CheckBox')
#
app.pr(window_name)
#
app=application.Application(backend='uia')
window_name = r'QuiKLab V3.0'
# app.connect(title = window_name)
# print "11111"
# app[window_name]['Button10'].click_input()
# app[window_name][u'确定'].click_input()
# app[window_name]['ComBox1'].select(1)
# app[window_name].Maximize()
app[window_name][u'编辑__信号__signal'].click_input()
# print app[window_name]['Static8'].texts()
# if 'name' in app[window_name]['statics2'].texts()[0]:
# print "get"
# app.click(window_name, 'Edit8')
# test=ctr.UIAWrapper()
# test = ba.BaseWrapper()
# test.get_properties()
# print "finish!!!!"
# time.sleep(100000)
# app.right_click(window_name, 'TreeView2')
# app.Sendk('DOWN', 2)
# app.Sendk('ENTER',1)
# app.click(window_name, 'Button4')
# app.right_click(window_name,'TreeItem16') #测试用例root
# time.sleep(1)
# app.Sendk('DOWN',4)
# app.Sendk('ENTER',1)
'''
#添加总线
app.click(window_name,'TreeItem10') #进入环境配置
mouse.right_click(coords=(1577, 492))
app.Sendk('DOWN', 1)
app.Sendk('ENTER',1)
app.click(window_name, 'COmboBox1')
app.input(window_name, 'ComboBox1', 'tcp')#添加 TCP/IP协议
app.Sendk('ENTER',1)
app.click(window_name, 'Button5') #确定
#添加设备
app.right_click(window_name,'Pane2')
app.Sendk('DOWN', 3)
app.Sendk('ENTER',1) #选择添加设备
#添加目标机
app.click(window_name, 'ComboBox1')
app.Sendk('UP', 1)
app.Sendk('ENTER', 1)
#添加IP
app.click(window_name, 'Edit2')
app.Sendk('RIGHT', 1)
app.input(window_name, 'Edit2', '192')
app.Sendk('.', 1)
app.input(window_name, 'Edit3', '168')
app.Sendk('.', 1)
app.input(window_name, 'Edit4', '1')
app.Sendk('.', 1)
app.input(window_name, 'Edit5', '5')
app.click(window_name, 'Button5')#确定
#添加客户端接口
app.click(window_name, 'ComboBox1')
app.Sendk('DOWN', 1)
app.Sendk('ENTER', 1)
#添加IP
app.click(window_name, 'Edit9')
app.Sendk('RIGHT', 1)
app.input(window_name, 'Edit9', '192')
app.Sendk('.', 1)
app.input(window_name, 'Edit10', '168')
app.Sendk('.', 1)
app.input(window_name, 'Edit11', '1')
app.Sendk('.', 1)
app.input(window_name, 'Edit12', '5')
#设置端口
app.click(window_name, 'Edit13')
app.Sendk('BACKSPACE', 1)
app.input(window_name, 'Edit13', '6060')
app.click(window_name, 'Button7')#确定
#添加服务端接口
pywinauto.mouse.right_click(coords=(923,510))
app.Sendk('DOWN', 2)
app.Sendk('ENTER', 1)
app.click(window_name, 'ComboBox1')
app.Sendk('DOWN', 2)
app.Sendk('ENTER', 1)
app.click(window_name, 'Button7')#确定
#添加信号
pywinauto.mouse.press(button='left', coords=(902, 456))
pywinauto.mouse.move(coords=(940, 456))
pywinauto.mouse.release(button='left', coords=(940, 456))
app.click(window_name, 'ComboBox5')
app.input(window_name, 'ComboBox5', 'i_block')#添加数据结构
app.click(window_name, 'Button11')#确定
#新建测试用例
app.click(window_name, 'TreeItem11')
app.right_click(window_name, 'TreeView2')
app.Sendk('DOWN', 2)
app.Sendk('ENTER',1)
app.click(window_name, 'Button4')
app.right_click(window_name,'TreeItem16') #测试用例root
time.sleep(1)
app.Sendk('DOWN',4)
app.Sendk('ENTER',1)
app.Sendk('TAB',2)
app.input(window_name, 'Edit1', 'content2')#输入用例名
app.click(window_name,'Button4') #确定
app.click(window_name,'TreeItem16')
app.Sendk('RIGHT',1)
app.Sendk('DOWN',1)
#添加信号
app.click(window_name, 'TabItem2')
app.click(window_name, 'Button16')
app.click(window_name, 'Table1')
pywinauto.mouse.click(coords=(602,450))#复选信号
app.click(window_name, 'Button14')#确定
#测试用例编辑
app.click(window_name, 'TabItem1')
app.right_click(window_name, 'TreeItem16')
app.Sendk('UP', 1)
app.Sendk('ENTER', 1)
pywinauto.mouse.press(button='left',coords=(600,229))
pywinauto.mouse.move(coords=(661, 646))
pywinauto.mouse.release(button='left', coords=(661, 646))
app.click(window_name, 'Button10')
#添加UUT
app.right_click(window_name,'Pane2')
app.click(window_name, 'ComboBox1')
app.Sendk('UP', 1)
app.Sendk('DOWN', 1)
app.Sendk('ENTER', 1)
app.click(window_name, 'Button5')
# app.click(window_name,'TreeItem12') #测试用例
# SendKeys.SendKeys('{LEFT}')
# app.right_click(window_name, 'TreeItem16')
# SendKeys.SendKeys('{DOWN}')
# app.Sendk('ENTER',1)
app.click(window_name, 'TabItem5')#图形监控
controller='Button4'
app.drag(window_name, controller, 810, 484, 1298, 406)#发送按钮控件
app.drag(window_name, controller, 849, 280, 1302, 307)#发送旋钮控件
window_name = 'QuiKLab V3.0'
app.connect(window_name)
app.click(window_name, "TabItem1") #测试用例编辑
# app.click(window_name, "TabItem2") #测试用例变量
# app.double_click(window_name, 'Pane0',10,10)
# app.drag(window_name,'Pane0',960,540,960,300)
app.drag(window_name,'Pane0',960,300,960,540)
'''
| [
"994430058@qq.com"
] | 994430058@qq.com | |
4a6fb5bb1f0a3073cf9fe3811f9b0bbeccd8fe8f | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /pychron/furnace/tasks/thermo/furnace_plugin.py | 10d2f425a6afc473edc838188bb86750b9aa9268 | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 2,352 | py | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.furnace.ifurnace_manager import IFurnaceManager
from pychron.furnace.tasks.furnace_plugin import BaseFurnacePlugin
from pychron.furnace.tasks.thermo.preferences import ThermoFurnacePreferencesPane
from pychron.furnace.tasks.thermo.task import ThermoFurnaceTask
class ThermoFurnacePlugin(BaseFurnacePlugin):
name = "ThermoFurnace"
id = "pychron.furnace.thermo.plugin"
klass = ("pychron.furnace.thermo.furnace_manager", "ThermoFurnaceManager")
task_klass = ThermoFurnaceTask
# def _help_tips_default(self):
# return ['']
def _deactivations_default(self):
application = self.application
def func():
manager = application.get_service(IFurnaceManager)
if manager:
for window in application.windows:
if "furnace" in window.active_task.id:
break
else:
manager.stop_update()
return [func]
def _activations_default(self):
man = self._get_manager()
return [man.start_update]
def _panes_default(self):
def f():
from pychron.furnace.tasks.thermo.panes import ExperimentFurnacePane
manager = self._get_manager()
fpane = ExperimentFurnacePane(model=manager)
return fpane
return [f]
def test_furnace_api(self):
man = self._get_manager()
return man.test_furnace_api()
def _preferences_panes_default(self):
return [ThermoFurnacePreferencesPane]
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
b8a0cc6a7e1290fac4b5b319a556b86a8c26c97c | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Practical Exercise/C201/__init__.py | cb700ef657ab253b573b6d0d6253c8c0b590d557 | [] | no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 77 | py | #! python3
from r_DailyProgrammer.Practical Exercise.C201.main import main
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
c4335347dc7e11719221f609202691493973425b | ef7eabdd5f9573050ef11d8c68055ab6cdb5da44 | /codeEval/hard/closest_pair.py | 1388ae98fc85075f1ac1d62410aeba96440d15da | [
"WTFPL"
] | permissive | gauravsingh58/algo | cdbf68e28019ba7c3e4832e373d32c71902c9c0d | 397859a53429e7a585e5f6964ad24146c6261326 | refs/heads/master | 2022-12-28T01:08:32.333111 | 2020-09-30T19:37:53 | 2020-09-30T19:37:53 | 300,037,652 | 1 | 1 | WTFPL | 2020-10-15T09:26:32 | 2020-09-30T19:29:29 | Java | UTF-8 | Python | false | false | 654 | py | import sys
from operator import itemgetter
def get_distance(p, q):
return ((p[0]-q[0])**2 + (p[1]-q[1])**2)**0.5
def find_closest_pair(ls):
ls = sorted(ls, key=itemgetter(0, 1))
dist, m = [], 40000
for i in range(len(ls)-1):
m = min(m, get_distance(ls[i], ls[i+1]))
if m > 10000:
return "INFINITY"
else:
return "%.4f" % m
points, n = [], 0
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
if n == 0:
if len(points) > 0:
print find_closest_pair(points)
points = []
n = int(test.strip())
if n == 0:
break
else:
points.append(map(int, test.split()))
n -= 1
test_cases.close() | [
"elmas.ferhat@gmail.com"
] | elmas.ferhat@gmail.com |
94d1ce9d86c7f94f8caecce50f1de144d6242cd6 | 2aa0cf7a10120fe87f1c38dff205a5099f04846f | /main.py | bf98a1d12228dce8cd72fc2caeaab523335a48a0 | [] | no_license | bullitserg/certificate_checker | 749a3a33c8004fad86d291b52593e6b2650ab287 | b7e3ddf15afb7fdc09285bb0822b738daa10a67e | refs/heads/master | 2021-05-03T04:24:11.877319 | 2018-02-08T12:45:24 | 2018-02-08T12:45:24 | 120,614,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,197 | py | import ets.ets_certmanager_logs_parser as cert_mngr
import ets.ets_certificate_lib as cert_lib
from ets.ets_ssh_connector import SSHConnection as Ssh
from datetime import datetime
from os.path import join, normpath
from config_parser import *
import argparse
import logger_module
PROGNAME = 'Certificate checker'
DESCRIPTION = '''Скрипт для проверки сертификатов'''
VERSION = '1.0'
AUTHOR = 'Belim S.'
RELEASE_DATE = '2018-02-07'
CERTIFICATE_VERSION = 0
NOW = datetime.now()
crl_file_local = normpath(join(local_dir, crl_file))
mca_file_local = normpath(join(local_dir, mca_file))
mroot_file_local = normpath(join(local_dir, mroot_file))
crl_file_remote = normpath(join(remote_dir, crl_file))
mca_file_remote = normpath(join(remote_dir, mca_file))
mroot_file_remote = normpath(join(remote_dir, mroot_file))
connections = {1: Ssh.CONNECT_CRYPTO_1,
2: Ssh.CONNECT_CRYPTO_2,
3: Ssh.CONNECT_CRYPTO_3,
4: Ssh.CONNECT_CRYPTO_4,
5: Ssh.CONNECT_CRYPTO_5}
def show_version():
print(PROGNAME, VERSION, '\n', DESCRIPTION, '\nAuthor:', AUTHOR, '\nRelease date:', RELEASE_DATE)
# обработчик параметров командной строки
def create_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-v', '--version', action='store_true',
help="Show version")
parser.add_argument('-s', '--server', type=int,
help="Set server number")
parser.add_argument('-f', '--file', type=str,
help="Set certificate file")
return parser
def update_files(server_num):
"""Функция получения файлов данных с крипто-сервера server_num"""
print('Обновление файлов данных с криптосервера %s...' % server_num)
server_connection = connections[server_num]
connect = Ssh(connection=server_connection, connection_type='key')
with connect.open_ssh():
connect.exec_command('/opt/cprocsp/bin/amd64/certmgr -list -store mRoot > %s' % mroot_file_remote)
connect.exec_command('/opt/cprocsp/bin/amd64/certmgr -list -store mCA > %s' % mca_file_remote)
connect.exec_command('/opt/cprocsp/bin/amd64/certmgr -list -store mCA -crl > %s' % crl_file_remote)
connect.get_file(mroot_file_remote, mroot_file_local)
connect.get_file(mca_file_remote, mca_file_local)
connect.get_file(crl_file_remote, crl_file_local)
print('Файлы данных обновлены')
def check_cert(certificate):
"""Функция проверки сертификата"""
global CERTIFICATE_VERSION
CERTIFICATE_VERSION += 1
# обрабатываем пользовательский сертификат
user_certificate = cert_lib.Certificate(certificate)
# получаем данные об subjectKey
user_certificate_subject_key = user_certificate.get_subject_key_identifier()
user_certificate_serial = user_certificate.get_sertificate_serial()
print('\n=================== Checking certificate %s ===================' % CERTIFICATE_VERSION)
print('SERIAL_N:', user_certificate_serial)
if not user_certificate_subject_key:
user_certificate_subject_key = 'UNKNOWN'
print('SUBJ_KEY:', user_certificate_subject_key)
# получаем состояние отозванности по всем точкам
web_revoke_status = user_certificate.check_web_revoke_status(user_certificate_serial, info=True)
# получаем дату последнего изменения на сервере по всем точкам
web_crl_last_modified = user_certificate.check_web_crl_last_modified(user_certificate_serial)
# получаем файл корневого к указанному
root_cert_link, error = user_certificate.get_root_certificate_file()
# проверять на установку в mca и mroot нужно все КРОМЕ пользовательского (первого)
if CERTIFICATE_VERSION > 1:
mca_certificate_text_info = mca_certificate_mngr_file.get_text_info(user_certificate_subject_key,
key='SubjKeyID')
if mca_certificate_text_info:
print("mCA: установлен")
print(mca_certificate_text_info)
else:
print("mCA: отсутствует")
mroot_certificate_text_info = mroot_certificate_mngr_file.get_text_info(user_certificate_subject_key,
key='SubjKeyID')
if mroot_certificate_text_info:
print("mRoot: установлен\n")
print(mroot_certificate_text_info)
else:
print("mRoot: отсутствует\n")
# для кажлой точки распространения проверяем
for web_crl_num in range(len(user_certificate.get_crl_distribution_points())):
user_web_crl_num = web_crl_num + 1
last_modified_info, last_modified_error = web_crl_last_modified[web_crl_num]
if not last_modified_error:
last_modified_date = last_modified_info
else:
last_modified_date = 'дата не определена'
status_info_dict, error_info_dict = web_revoke_status[web_crl_num]
if not error_info_dict:
if status_info_dict:
print(
'CRL (WEB_%s от %s): cертификат %s отозван %s (%s)' % (user_web_crl_num,
last_modified_date,
user_certificate_serial,
status_info_dict['revoke_date'],
status_info_dict['reason']))
else:
print('CRL (WEB_%s от %s): не числится в списке отозванных' % (user_web_crl_num, last_modified_date))
else:
print('CRL (WEB_%s): невозможно проверить наличие в списке отозванных' % user_web_crl_num)
# проверять наличие CRL в mca нужно для всех, КРОМЕ пользовательского (первого)
if CERTIFICATE_VERSION > 1:
# проверим CRL на площадке (по дате действия)
crl_certificate_data = crl_certificate_mngr_file.get_info(user_certificate_subject_key, key='AuthKeyID')
if crl_certificate_data:
this_update = crl_certificate_data['ThisUpdate']
next_update = crl_certificate_data['NextUpdate']
if not this_update <= NOW <= next_update:
print('CRL (mCA): установлен, действует с %s по %s (НЕ АКТУАЛЕН)' % (this_update, next_update))
else:
print('CRL (mCA): установлен, действует с %s по %s' % (this_update, next_update))
else:
print('CRL (mCA): не установлен')
if root_cert_link:
print("Проверка корневого сертификата...")
check_cert(root_cert_link)
else:
print("Корневой не требуется (не указан)")
# ОСНОВНОЙ КОД
if __name__ == '__main__':
logger = logger_module.logger()
try:
# парсим аргументы командной строки
my_parser = create_parser()
namespace = my_parser.parse_args()
if namespace.version:
show_version()
exit(0)
if namespace.server:
if namespace.server not in connections.keys():
print('Параметр server должен быть одним из значений: %s' % connections.keys())
update_files(namespace.server)
if namespace.file:
mca_certificate_mngr_file = cert_mngr.CertmanagerFile(mca_file_local, timezone=timezone)
mroot_certificate_mngr_file = cert_mngr.CertmanagerFile(mroot_file_local, timezone=timezone)
crl_certificate_mngr_file = cert_mngr.CertmanagerFile(crl_file_local, timezone=timezone)
print('Checking started %s' % NOW)
check_cert(namespace.file)
print('--------------------------------------------\nChecking finished %s' % datetime.now())
else:
show_version()
print('For more information run use --help')
# если при исполнении будут исключения - кратко выводим на терминал, остальное - в лог
except Exception as e:
logger.fatal('Fatal error! Exit', exc_info=True)
print('Critical error: %s' % e)
print('More information in log file')
exit(1)
exit(0)
| [
"bullit88@mail.ru"
] | bullit88@mail.ru |
70156d945934f471e9dfe882949c342b2e29c2f7 | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /apps/cloud_api_generator/generatedServer/tasklets/lan/getNextMacRange/lan_getNextMacRange.py | 3928da8aa16225a7fb32607614557e6d5b8b9246 | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 182 | py | __author__ = 'aserver'
__tags__ = 'lan', 'getNextMacRange'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
| [
"devnull@localhost"
] | devnull@localhost |
7849122eb3fe8c33686282b0d31947253548e51b | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L31/31-35_MD_NVT_rerun/set_6.py | 8441267cd7f30a30dc5e9f08e15d3a37e6877812 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L31/MD_NVT_rerun/ti_one-step/31_35/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_6.in'
temp_pbs = filesdir + 'temp_6.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_6.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_6.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
02dda9d71cdceea323cc189f06c4e34e849415aa | f0aa307e12bf7ea74c7bee23830f016aeaf45dd8 | /tensor2tensor/data_generators/timeseries_test.py | 9daabe80b1a68bdd9c361272296b30955bbae4de | [
"Apache-2.0"
] | permissive | ShahNewazKhan/tensor2tensor | d94aaa0eea23e20fe1e483d27890939a7243d3b9 | ef91df0197d3f6bfd1a91181ea10e97d4d0e5393 | refs/heads/master | 2020-03-19T17:42:27.293415 | 2018-06-10T02:41:33 | 2018-06-10T02:41:33 | 136,773,457 | 0 | 0 | null | 2018-06-10T02:34:05 | 2018-06-10T02:34:05 | null | UTF-8 | Python | false | false | 1,873 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timeseries generators tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensor2tensor.data_generators import timeseries
import tensorflow as tf
class TimeseriesTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.tmp_dir)
os.mkdir(cls.tmp_dir)
def testTimeseriesToyProblem(self):
problem = timeseries.TimeseriesToyProblem()
problem.generate_data(self.tmp_dir, self.tmp_dir)
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, self.tmp_dir)
features = dataset.make_one_shot_iterator().get_next()
examples = []
exhausted = False
with self.test_session() as sess:
examples.append(sess.run(features))
examples.append(sess.run(features))
examples.append(sess.run(features))
examples.append(sess.run(features))
try:
sess.run(features)
except tf.errors.OutOfRangeError:
exhausted = True
self.assertTrue(exhausted)
self.assertEqual(4, len(examples))
self.assertNotEqual(
list(examples[0]["inputs"][0]), list(examples[1]["inputs"][0]))
if __name__ == "__main__":
tf.test.main()
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
a2169451d8f1b8e4c3a35108c6208337b798af78 | cca3f6a0accb18760bb134558fea98bb87a74806 | /abc175/F/main.py | a737fd1cdca034d435b425811277d4674b8061de | [] | no_license | Hashizu/atcoder_work | 5ec48cc1147535f8b9d0f0455fd110536d9f27ea | cda1d9ac0fcd56697ee5db93d26602dd8ccee9df | refs/heads/master | 2023-07-15T02:22:31.995451 | 2021-09-03T12:10:57 | 2021-09-03T12:10:57 | 382,987,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | #!/usr/bin/env python3
import sys
def solve(N: int, S: "List[str]", C: "List[int]"):
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
S = [str()] * (N) # type: "List[str]"
C = [int()] * (N) # type: "List[int]"
for i in range(N):
S[i] = next(tokens)
C[i] = int(next(tokens))
solve(N, S, C)
if __name__ == '__main__':
main()
| [
"athenenoctus@gmail.com"
] | athenenoctus@gmail.com |
db042864a1223f46e3ce2426fa6200fe5f845b09 | 4910ef5677b0af1d5ee88dd422a1a65f8ad81413 | /whyis/autonomic/global_change_service.py | 2bbd467310b2daad084e27918b861e99a88b4ca3 | [
"Apache-2.0"
] | permissive | mobilemadman2/whyis | a0d3b0d88873955d7f50471ecb928f6cdb47ffb1 | 009fdfefc0962dbf1dd629c47d763720c6f20ba0 | refs/heads/master | 2020-06-27T00:43:01.894275 | 2019-07-30T23:22:42 | 2019-07-30T23:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | from __future__ import print_function
from builtins import str
import sadi
import rdflib
import setlr
from datetime import datetime
from .service import Service
from nanopub import Nanopublication
from datastore import create_id
import flask
from flask import render_template
from flask import render_template_string
import logging
import sys, traceback
import database
import tempfile
from depot.io.interfaces import StoredFile
from whyis.namespace import whyis
class GlobalChangeService(Service):
@property
def query_predicate(self):
return whyis.globalChangeQuery
| [
"gordom6@rpi.edu"
] | gordom6@rpi.edu |
1b30005c650309cac9e0140834da3c81572bba0f | e96deed00dd14a1f6d1ed7825991f12ea8c6a384 | /093. Restore IP Addresses.py | 3bbe1bad78b5fc89c3478ebae0c08eaec03f3737 | [] | no_license | borisachen/leetcode | 70b5c320abea8ddfa299b2e81f886cfeb39345c1 | 15e36b472a5067d17482dbd0d357336d31b35ff4 | refs/heads/master | 2021-01-19T17:07:46.726320 | 2020-11-16T04:30:52 | 2020-11-16T04:30:52 | 88,306,634 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | 93. Restore IP Addresses
Given a string containing only digits, restore it by returning all possible valid IP address combinations.
For example:
Given "25525511135",
return ["255.255.11.135", "255.255.111.35"]. (Order does not matter)
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
res = []
self.backtrack(s, res, 0, '', 0)
return res
def backtrack(self, ip, res, start, temp, count):
if count > 4:
return
if count == 4 and start==len(ip):
res.append(temp)
for i in range(1,4):
if start+i > len(ip):
break
next_block = ip[start:(start+i)]
# check for invalid blocks
if (next_block[0]=='0' and len(next_block)>1) or (i==3 and next_block > '255'):
continue
period = '.' if count < 3 else ''
a = "" if count == 3 else "."
self.backtrack(ip, res, start+i, temp+next_block+a, count+1)
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
solutions = []
self.restoreIp(ip=s, solutions=solutions, idx=0, restored="", count=0)
return solutions
def restoreIp(self, ip, solutions, idx, restored, count):
if count > 4:
return
if count==4 and idx==len(ip):
solutions.append(restored)
for i in range(1,4): # i is the number of digits to try for the next set. try i=1,2,3
if idx+i > len(ip): # if we are beyond the original ip, break the loop entirely
break
s = ip[idx:(idx+i)] # s = the current next value to be added
if (s[0]=='0' and len(s)>1) or (i==3 and s>='256'): # s is invalid if it starts with 0XX or is greater than 255
continue
a = "" if count == 3 else "." # what to append after s? either . or nothing depending on current count of .'s
self.restoreIp(ip, solutions, idx+i, restored+s+a, count+1)
public List<String> restoreIpAddresses(String s) {
List<String> solutions = new ArrayList<String>();
restoreIp(s, solutions, 0, "", 0);
return solutions;
}
private void restoreIp(String ip, List<String> solutions, int idx, String restored, int count) {
if (count > 4) return;
if (count == 4 && idx == ip.length()) solutions.add(restored);
for (int i=1; i<4; i++) {
if (idx+i > ip.length()) break;
String s = ip.substring(idx,idx+i);
if ((s.startsWith("0") && s.length()>1) || (i==3 && Integer.parseInt(s) >= 256)) continue;
restoreIp(ip, solutions, idx+i, restored+s+(count==3?"" : "."), count+1);
}
}
| [
"boris.chen@gmail.com"
] | boris.chen@gmail.com |
9e94c807331ed9006528a0c3f7fc2460f9616c60 | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210716141910.py | 2911fb930aed362ba612341069ea19f4c50d8e31 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,159 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from icecream import ic
import math
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method=2, strategy_apply=0, propulsion_constrains=0, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
:param method: if method = 1, it is Mattingly Method, otherwise is Gudmundsson Method
:param strategy_apply: if strategy_apply = 0, no strategy apply
:param propulsion_constrains: if propulsion_constrains = 0, no propulsion_constrains apply
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.propulsion_constrains = propulsion_constrains
self.strategy_apply = strategy_apply
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = altitude.size
self.hp = np.linspace(0, 1, self.n+1)
self.hp_threshold = 0.5
# method = 1 = Mattingly_Method, method = 2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method1(
self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self, p_w_turbofan_max, p_w_motorfun_max, pc):
p_w = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
p_w_1 = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
p_w_2 = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
for i in range(1, 8):
for j in range(len(self.hp)):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2[i, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1[i, j] = problem1.allFuncs[i](problem1)
p_w_2[i, j] = problem2.allFuncs[i](problem2)
if self.propulsion_constrains != 0 and pc != 0:
if p_w_1[i, j] > p_w_turbofan_max:
p_w_1[i, j] = 100000
elif p_w_2[i, j] > p_w_motorfun_max:
p_w_2[i, j] = 100000
p_w[i, j] = p_w_1[i, j] + p_w_2[i, j]
return p_w, p_w_1, p_w_2
def p_w_min(self, p_w):
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amin(p_w, axis=1)
#find the index of p_w_min which is the hp
hp_p_w_min = np.zeros(8)
for i in range(1, 8):
for j in range(len(self.hp)):
if p_w[i, j] - p_w_min[i] < 0.001:
hp_p_w_min[i] = j * 0.01
p_w_1 = np.zeros(8)
p_w_2= np.zeros(8)
for i in range(1, 8):
problem1 = self.method1(
self.h[i], self.v[i], self.beta[i], self.w_s, hp_p_w_min[i])
problem2 = self.method2(
self.h[i], self.v[i], self.beta[i], self.w_s, hp_p_w_min[i])
if i >= 5:
p_w_1[i] = problem1.allFuncs[-1](
problem1, roc=15 - 5 * (i - 5))
p_w_2[i] = problem2.allFuncs[-1](
problem2, roc=15 - 5 * (i - 5))
else:
p_w_1[i] = problem1.allFuncs[i](problem1)
p_w_2[i] = problem2.allFuncs[i](problem2)
p_w_min = np.amax(p_w_min)
p_w_1_min = np.amax(p_w_1)
p_w_2_min = np.amax(p_w_2)
return p_w_1_min, p_w_2_min, p_w_min, hp_p_w_min
def strategy(self):
if self.strategy_apply == 0:
p_w_turbofan_max = 10000
p_w_motorfun_max = 10000
p_w, p_w_1, p_w_2 = Design_Point_Select_Strategy.p_w_compute(
self, p_w_turbofan_max, p_w_motorfun_max, pc=0)
p_w_min = np.amax(p_w[:, 50])
p_w_1_min = np.amax(p_w_1[:, 50])
p_w_2_min = np.amax(p_w_2[:, 50])
hp_p_w_min = 0.5*np.ones(8)
else:
if self.propulsion_constrains == 0:
p_w_turbofan_max = 100000
p_w_motorfun_max = 100000
p_w, p_w_1, p_w_2 = Design_Point_Select_Strategy.p_w_compute(self, p_w_turbofan_max, p_w_motorfun_max, 0)
else:
p_w, _, _ = Design_Point_Select_Strategy.p_w_compute(self, 10000, 10000, pc=0)
p_w_1_min, p_w_2_min, _, _ = Design_Point_Select_Strategy.p_w_min(self, p_w)
p_w_turbofun_boundary = math.ceil(p_w_1_min)
p_w_motorfun_boundary = math.ceil(p_w_2_min)
ic(p_w_turbofun_boundary, p_w_motorfun_boundary)
# build p_w_design_point matrix, try p_w_max to find the best one
p_w_design_point = np.zeros([p_w_turbofun_boundary+1, p_w_motorfun_boundary+1])
for i in range(p_w_turbofun_boundary+1):
for j in range(p_w_motorfun_boundary+1):
p_w, _, _ = Design_Point_Select_Strategy.p_w_compute(self, i, j, 1)
#find the min p_w from hp: 0 --- 100 for each flight condition:
p_w_min = np.amin(p_w, axis=1)
p_w_design_point[i, j] = np.amax(p_w_min)
print(i)
p_w_turbofan_max = np.unravel_index(
p_w_design_point.argmin(), p_w_design_point.shape)[0]
p_w_motorfun_max = np.unravel_index(
p_w_design_point.argmin(), p_w_design_point.shape)[1]
p_w, p_w_1, p_w_2 = Design_Point_Select_Strategy.p_w_compute(
self, p_w_turbofan_max, p_w_motorfun_max, 1)
# ic(p_w, p_w_1, p_w_2)
p_w_1_min, p_w_2_min, p_w_min, hp_p_w_min = Design_Point_Select_Strategy.p_w_min(self, p_w)
hp_p_w_min[0] = p_w_motorfun_max/(p_w_motorfun_max+p_w_turbofan_max)
return self.w_s, p_w_min, p_w_1_min, p_w_2_min, hp_p_w_min, p_w_turbofan_max, p_w_motorfun_max
if __name__ == "__main__":
n, m = 250, 8
w_s = np.linspace(100, 9000, n)
p_w = np.zeros([m, n, 6])
constrains = np.array([[0, 80, 1, 0.2], [0, 68, 0.988, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.8], [3000, 100,
0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.7]])
constrains_name = ['stall speed', 'take off', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m', 'feasible region-hybrid', 'feasible region-conventional']
l1 = ['hp: 0.5', 'hp: adjust', 'hp: adjust with threshold']
l2 = ['design point hp: 0.5', 'design point hp: adjust', 'design point hp: adjust with threshold']
color = ['k', 'c', 'b', 'g', 'y', 'plum', 'violet', 'm']
l_style = ['-', '--', '-.']
mark = ['s', '^', '*']
alpha = [0.5, 0.4, 0.3]
methods = [ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric,
ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric]
strategy = [0, 1, 1]
propulsion = [0, 0, 0]
# plots
fig, ax = plt.subplots(3, 2, sharey=True, sharex=True, figsize=(10, 12))
ax = ax.flatten()
design_point_p_w, design_point_w_s = np.zeros([3, 6]), np.zeros([3, 2])
for z in range(3):
h = constrains[:, 0]
v = constrains[:, 1]
beta = constrains[:, 2]
problem1 = Design_Point_Select_Strategy(
h, v, beta, method=1, strategy_apply=strategy[z], propulsion_constrains=propulsion[z])
problem2 = Design_Point_Select_Strategy(
h, v, beta, method=2, strategy_apply=strategy[z], propulsion_constrains=propulsion[z])
w_s1, p_w_min11, p_w_1_min11, p_w_2_min11, hp_p_w_min11, p_w_turbofan_max11, p_w_motorfun_max11 = problem1.strategy()
ic(w_s1, p_w_min11, p_w_1_min11, p_w_2_min11, hp_p_w_min11, p_w_turbofan_max11, p_w_motorfun_max11 )
design_point_w_s[z, 0], design_point_p_w[z, 4], design_point_p_w[z, 0], design_point_p_w[z, 2], hp_p_w_min_1, _, _ = problem1.strategy()
design_point_w_s[z, 1], design_point_p_w[z, 5], design_point_p_w[z, 1], design_point_p_w[z, 3], hp_p_w_min_2, _, _ = problem2.strategy()
for k in range(6):
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
if k % 2 == 0:
hp = hp_p_w_min_1[i]
else:
hp = hp_p_w_min_2[i]
# calculate p_w
if k < 4:
problem = methods[k](h, v, beta, w_s[j], hp)
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
if i == 0:
problem = methods[k-2](h, v, beta, w_s[j], hp)
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
p_w[i, j, k] = p_w[i, j, k-4] + p_w[i, j, k-2]
# plot the lines
if z == 0:
if i == 0:
ax[k].plot(p_w[i, :, k], np.linspace(0, 100, n),
linewidth=1, alpha=0.1, linestyle=l_style[z], label=constrains_name[i])
else:
ax[k].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=0.1, linestyle=l_style[z], label=constrains_name[i])
else:
if i == 0:
ax[k].plot(p_w[i, :, k], np.linspace(0, 100, n),
linewidth=1, alpha=0.1, linestyle=l_style[z])
else:
ax[k].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=0.1, linestyle=l_style[z])
# plot fill region
p_w[0, :, k] = 10 ** 10 * (w_s - p_w[0, 0, k])
ax[k].fill_between(w_s, np.amax(p_w[0:m, :, k], axis=0), 150, alpha=alpha[z], label=l1[z])
ax[k].plot(design_point_w_s[z, 0], design_point_p_w[z, k], marker=mark[z],
markersize=5, label=l2[z])
if z == 0:
if k>=4:
ax[k].plot(6012, 72, 'r*', markersize=5, label='True Conventional')
handles, labels = plt.gca().get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(0.05, 0.01, 0.9, 0.5), loc="lower left",
mode="expand", borderaxespad=0, ncol=3, frameon=False)
# plt.tight_layout(rect=[0.125, 0.02, 0, 0])
# hp = constrains[:, 3]
plt.xlim(200, 9000)
plt.ylim(0, 100)
plt.setp(ax[0].set_title(r'$\bf{Mattingly-Method}$'))
plt.setp(ax[1].set_title(r'$\bf{Gudmundsson-Method}$'))
plt.setp(ax[4:6], xlabel='Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.setp(ax[0], ylabel=r'$\bf{Turbofun}$''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(ax[2], ylabel=r'$\bf{Motor}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(
ax[4], ylabel=r'$\bf{Turbofun+Motor}$''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.subplots_adjust(bottom=0.15)
plt.suptitle(r'$\bf{Component}$' ' ' r'$\bf{P_{SL}/W_{TO}}$' ' ' r'$\bf{Diagrams}$'
' ' r'$\bf{After}$' ' ' r'$\bf{Adjust}$' ' ' r'$\bf{Degree-of-Hybridization}$'
'\n hp adjust with threshold for Gudmundsson Method: take-off=' +
'%.2f' % hp_p_w_min_2[0] + ' stall-speed=' +
str(hp_p_w_min_2[1]) + '\n cruise=' +
str(hp_p_w_min_2[2]) + ' service-ceiling=' +
str(hp_p_w_min_2[3]) + ' level-turn=@3000m' +
str(hp_p_w_min_2[4]) + ' climb@S-L=' +
str(hp_p_w_min_2[5]) + ' climb@3000m=' +
str(hp_p_w_min_2[6]) + ' climb@7000m=' + str(hp_p_w_min_2[7]))
plt.show()
| [
"libao@gatech.edu"
] | libao@gatech.edu |
441f47bc3f4a0ef8aedba9771147faf091013cd4 | 12abe02e205d3e8dabe78fb5a93ccca89e2c42c4 | /toontown/toon/Toon.py | f95d82087ccf62349693a198c726ec74efbca9c4 | [] | no_license | nate97/toontown-src-py3.0 | 55092b2973b76e6b6d566887f44c52822684394c | f76c515801ae08c40b264b48365211fd44b137eb | refs/heads/master | 2022-07-07T05:23:22.071185 | 2022-06-22T16:36:10 | 2022-06-22T16:36:10 | 187,682,471 | 15 | 8 | null | null | null | null | UTF-8 | Python | false | false | 119,521 | py | from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from direct.showbase.PythonUtil import Functor
from direct.task.Task import Task
from panda3d.core import RigidBodyCombiner
from panda3d.core import *
import random
import types
from . import AccessoryGlobals
from . import Motion
from . import TTEmote
from . import ToonDNA
from .ToonHead import *
from otp.avatar import Avatar
from otp.avatar import Emote
from otp.avatar.Avatar import teleportNotify
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPLocalizer
from toontown.battle import SuitBattleGlobals
from toontown.chat.ChatGlobals import *
from toontown.distributed import DelayDelete
from toontown.effects import DustCloud
from toontown.effects import Wake
from toontown.hood import ZoneUtil
from toontown.nametag.NametagGlobals import *
from toontown.suit import SuitDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
import importlib
def teleportDebug(requestStatus, msg, onlyIfToAv = True):
if teleportNotify.getDebug():
teleport = 'teleport'
if 'how' in requestStatus and requestStatus['how'][:len(teleport)] == teleport:
if not onlyIfToAv or 'avId' in requestStatus and requestStatus['avId'] > 0:
teleportNotify.debug(msg)
SLEEP_STRING = TTLocalizer.ToonSleepString
DogDialogueArray = []
CatDialogueArray = []
HorseDialogueArray = []
RabbitDialogueArray = []
MouseDialogueArray = []
DuckDialogueArray = []
MonkeyDialogueArray = []
BearDialogueArray = []
PigDialogueArray = []
LegsAnimDict = {}
TorsoAnimDict = {}
HeadAnimDict = {}
Preloaded = {}
Phase3AnimList = (('neutral', 'neutral'), ('run', 'run'))
Phase3_5AnimList = (('walk', 'walk'),
('teleport', 'teleport'),
('book', 'book'),
('jump', 'jump'),
('running-jump', 'running-jump'),
('jump-squat', 'jump-zstart'),
('jump-idle', 'jump-zhang'),
('jump-land', 'jump-zend'),
('running-jump-squat', 'leap_zstart'),
('running-jump-idle', 'leap_zhang'),
('running-jump-land', 'leap_zend'),
('pushbutton', 'press-button'),
('throw', 'pie-throw'),
('victory', 'victory-dance'),
('sidestep-left', 'sidestep-left'),
('conked', 'conked'),
('cringe', 'cringe'),
('wave', 'wave'),
('shrug', 'shrug'),
('angry', 'angry'),
('tutorial-neutral', 'tutorial-neutral'),
('left-point', 'left-point'),
('right-point', 'right-point'),
('right-point-start', 'right-point-start'),
('give-props', 'give-props'),
('give-props-start', 'give-props-start'),
('right-hand', 'right-hand'),
('right-hand-start', 'right-hand-start'),
('duck', 'duck'),
('sidestep-right', 'jump-back-right'),
('periscope', 'periscope'))
Phase4AnimList = (('sit', 'sit'),
('sit-start', 'intoSit'),
('swim', 'swim'),
('tug-o-war', 'tug-o-war'),
('sad-walk', 'losewalk'),
('sad-neutral', 'sad-neutral'),
('up', 'up'),
('down', 'down'),
('left', 'left'),
('right', 'right'),
('applause', 'applause'),
('confused', 'confused'),
('bow', 'bow'),
('curtsy', 'curtsy'),
('bored', 'bored'),
('think', 'think'),
('battlecast', 'fish'),
('cast', 'cast'),
('castlong', 'castlong'),
('fish-end', 'fishEND'),
('fish-neutral', 'fishneutral'),
('fish-again', 'fishAGAIN'),
('reel', 'reel'),
('reel-H', 'reelH'),
('reel-neutral', 'reelneutral'),
('pole', 'pole'),
('pole-neutral', 'poleneutral'),
('slip-forward', 'slip-forward'),
('slip-backward', 'slip-backward'),
('catch-neutral', 'gameneutral'),
('catch-run', 'gamerun'),
('catch-eatneutral', 'eat_neutral'),
('catch-eatnrun', 'eatnrun'),
('catch-intro-throw', 'gameThrow'),
('swing', 'swing'),
('pet-start', 'petin'),
('pet-loop', 'petloop'),
('pet-end', 'petend'),
('scientistJealous', 'scientistJealous'),
('scientistEmcee', 'scientistEmcee'),
('scientistWork', 'scientistWork'),
('scientistGame', 'scientistGame'))
Phase5AnimList = (('water-gun', 'water-gun'),
('hold-bottle', 'hold-bottle'),
('firehose', 'firehose'),
('spit', 'spit'),
('tickle', 'tickle'),
('smooch', 'smooch'),
('happy-dance', 'happy-dance'),
('sprinkle-dust', 'sprinkle-dust'),
('juggle', 'juggle'),
('climb', 'climb'),
('sound', 'shout'),
('toss', 'toss'),
('hold-magnet', 'hold-magnet'),
('hypnotize', 'hypnotize'),
('struggle', 'struggle'),
('lose', 'lose'),
('melt', 'melt'))
Phase5_5AnimList = (('takePhone', 'takePhone'),
('phoneNeutral', 'phoneNeutral'),
('phoneBack', 'phoneBack'),
('bank', 'jellybeanJar'),
('callPet', 'callPet'),
('feedPet', 'feedPet'),
('start-dig', 'into_dig'),
('loop-dig', 'loop_dig'),
('water', 'water'))
Phase6AnimList = (('headdown-putt', 'headdown-putt'),
('into-putt', 'into-putt'),
('loop-putt', 'loop-putt'),
('rotateL-putt', 'rotateL-putt'),
('rotateR-putt', 'rotateR-putt'),
('swing-putt', 'swing-putt'),
('look-putt', 'look-putt'),
('lookloop-putt', 'lookloop-putt'),
('bad-putt', 'bad-putt'),
('badloop-putt', 'badloop-putt'),
('good-putt', 'good-putt'))
Phase9AnimList = (('push', 'push'),)
Phase10AnimList = (('leverReach', 'leverReach'), ('leverPull', 'leverPull'), ('leverNeutral', 'leverNeutral'))
Phase12AnimList = ()
LegDict = {'s': '/models/char/tt_a_chr_dgs_shorts_legs_',
'm': '/models/char/tt_a_chr_dgm_shorts_legs_',
'l': '/models/char/tt_a_chr_dgl_shorts_legs_'}
TorsoDict = {
'ss': '/models/char/tt_a_chr_dgs_shorts_torso_',
'ms': '/models/char/tt_a_chr_dgm_shorts_torso_',
'ls': '/models/char/tt_a_chr_dgl_shorts_torso_',
'sd': '/models/char/tt_a_chr_dgs_skirt_torso_',
'md': '/models/char/tt_a_chr_dgm_skirt_torso_',
'ld': '/models/char/tt_a_chr_dgl_skirt_torso_'}
def loadModels():
global Preloaded
if not Preloaded:
print('Preloading avatars...')
for key in list(LegDict.keys()):
fileRoot = LegDict[key]
Preloaded[fileRoot+'-1000'] = loader.loadModel('phase_3' + fileRoot + '1000')
Preloaded[fileRoot+'-500'] = loader.loadModel('phase_3' + fileRoot + '500')
Preloaded[fileRoot+'-250'] = loader.loadModel('phase_3' + fileRoot + '250')
for key in list(TorsoDict.keys()):
fileRoot = TorsoDict[key]
Preloaded[fileRoot+'-1000'] = loader.loadModel('phase_3' + fileRoot + '1000')
if len(key) > 1:
Preloaded[fileRoot+'-500'] = loader.loadModel('phase_3' + fileRoot + '500')
Preloaded[fileRoot+'-250'] = loader.loadModel('phase_3' + fileRoot + '250')
def loadBasicAnims():
loadPhaseAnims()
def unloadBasicAnims():
loadPhaseAnims(0)
def loadTutorialBattleAnims():
loadPhaseAnims('phase_3.5')
def unloadTutorialBattleAnims():
loadPhaseAnims('phase_3.5', 0)
def loadMinigameAnims():
loadPhaseAnims('phase_4')
def unloadMinigameAnims():
loadPhaseAnims('phase_4', 0)
def loadBattleAnims():
loadPhaseAnims('phase_5')
def unloadBattleAnims():
loadPhaseAnims('phase_5', 0)
def loadSellbotHQAnims():
loadPhaseAnims('phase_9')
def unloadSellbotHQAnims():
loadPhaseAnims('phase_9', 0)
def loadCashbotHQAnims():
loadPhaseAnims('phase_10')
def unloadCashbotHQAnims():
loadPhaseAnims('phase_10', 0)
def loadBossbotHQAnims():
loadPhaseAnims('phase_12')
def unloadBossbotHQAnims():
loadPhaseAnims('phase_12', 0)
def loadPhaseAnims(phaseStr = 'phase_3', loadFlag = 1):
if phaseStr == 'phase_3':
animList = Phase3AnimList
elif phaseStr == 'phase_3.5':
animList = Phase3_5AnimList
elif phaseStr == 'phase_4':
animList = Phase4AnimList
elif phaseStr == 'phase_5':
animList = Phase5AnimList
elif phaseStr == 'phase_5.5':
animList = Phase5_5AnimList
elif phaseStr == 'phase_6':
animList = Phase6AnimList
elif phaseStr == 'phase_9':
animList = Phase9AnimList
elif phaseStr == 'phase_10':
animList = Phase10AnimList
elif phaseStr == 'phase_12':
animList = Phase12AnimList
else:
self.notify.error('Unknown phase string %s' % phaseStr)
for key in list(LegDict.keys()):
for anim in animList:
if loadFlag:
pass
elif anim[0] in LegsAnimDict[key]:
if base.localAvatar.style.legs == key:
base.localAvatar.unloadAnims([anim[0]], 'legs', None)
for key in list(TorsoDict.keys()):
for anim in animList:
if loadFlag:
pass
elif anim[0] in TorsoAnimDict[key]:
if base.localAvatar.style.torso == key:
base.localAvatar.unloadAnims([anim[0]], 'torso', None)
for key in list(HeadDict.keys()):
if key.find('d') >= 0:
for anim in animList:
if loadFlag:
pass
elif anim[0] in HeadAnimDict[key]:
if base.localAvatar.style.head == key:
base.localAvatar.unloadAnims([anim[0]], 'head', None)
def compileGlobalAnimList():
phaseList = [Phase3AnimList,
Phase3_5AnimList,
Phase4AnimList,
Phase5AnimList,
Phase5_5AnimList,
Phase6AnimList,
Phase9AnimList,
Phase10AnimList,
Phase12AnimList]
phaseStrList = ['phase_3',
'phase_3.5',
'phase_4',
'phase_5',
'phase_5.5',
'phase_6',
'phase_9',
'phase_10',
'phase_12']
for animList in phaseList:
phaseStr = phaseStrList[phaseList.index(animList)]
for key in list(LegDict.keys()):
LegsAnimDict.setdefault(key, {})
for anim in animList:
file = phaseStr + LegDict[key] + anim[1]
LegsAnimDict[key][anim[0]] = file
for key in list(TorsoDict.keys()):
TorsoAnimDict.setdefault(key, {})
for anim in animList:
file = phaseStr + TorsoDict[key] + anim[1]
TorsoAnimDict[key][anim[0]] = file
for key in list(HeadDict.keys()):
if key.find('d') >= 0:
HeadAnimDict.setdefault(key, {})
for anim in animList:
file = phaseStr + HeadDict[key] + anim[1]
HeadAnimDict[key][anim[0]] = file
def loadDialog():
loadPath = 'phase_3.5/audio/dial/'
DogDialogueFiles = ('AV_dog_short', 'AV_dog_med', 'AV_dog_long', 'AV_dog_question', 'AV_dog_exclaim', 'AV_dog_howl')
global DogDialogueArray
for file in DogDialogueFiles:
DogDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
catDialogueFiles = ('AV_cat_short', 'AV_cat_med', 'AV_cat_long', 'AV_cat_question', 'AV_cat_exclaim', 'AV_cat_howl')
global CatDialogueArray
for file in catDialogueFiles:
CatDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
horseDialogueFiles = ('AV_horse_short', 'AV_horse_med', 'AV_horse_long', 'AV_horse_question', 'AV_horse_exclaim', 'AV_horse_howl')
global HorseDialogueArray
for file in horseDialogueFiles:
HorseDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
rabbitDialogueFiles = ('AV_rabbit_short', 'AV_rabbit_med', 'AV_rabbit_long', 'AV_rabbit_question', 'AV_rabbit_exclaim', 'AV_rabbit_howl')
global RabbitDialogueArray
for file in rabbitDialogueFiles:
RabbitDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
mouseDialogueFiles = ('AV_mouse_short', 'AV_mouse_med', 'AV_mouse_long', 'AV_mouse_question', 'AV_mouse_exclaim', 'AV_mouse_howl')
global MouseDialogueArray
for file in mouseDialogueFiles:
MouseDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
duckDialogueFiles = ('AV_duck_short', 'AV_duck_med', 'AV_duck_long', 'AV_duck_question', 'AV_duck_exclaim', 'AV_duck_howl')
global DuckDialogueArray
for file in duckDialogueFiles:
DuckDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
monkeyDialogueFiles = ('AV_monkey_short', 'AV_monkey_med', 'AV_monkey_long', 'AV_monkey_question', 'AV_monkey_exclaim', 'AV_monkey_howl')
global MonkeyDialogueArray
for file in monkeyDialogueFiles:
MonkeyDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
bearDialogueFiles = ('AV_bear_short', 'AV_bear_med', 'AV_bear_long', 'AV_bear_question', 'AV_bear_exclaim', 'AV_bear_howl')
global BearDialogueArray
for file in bearDialogueFiles:
BearDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
pigDialogueFiles = ('AV_pig_short', 'AV_pig_med', 'AV_pig_long', 'AV_pig_question', 'AV_pig_exclaim', 'AV_pig_howl')
global PigDialogueArray
for file in pigDialogueFiles:
PigDialogueArray.append(base.loader.loadSfx(loadPath + file + '.ogg'))
def unloadDialog():
global CatDialogueArray
global PigDialogueArray
global BearDialogueArray
global DuckDialogueArray
global RabbitDialogueArray
global MouseDialogueArray
global DogDialogueArray
global HorseDialogueArray
global MonkeyDialogueArray
DogDialogueArray = []
CatDialogueArray = []
HorseDialogueArray = []
RabbitDialogueArray = []
MouseDialogueArray = []
DuckDialogueArray = []
MonkeyDialogueArray = []
BearDialogueArray = []
PigDialogueArray = []
class Toon(Avatar.Avatar, ToonHead):
notify = DirectNotifyGlobal.directNotify.newCategory('Toon')
afkTimeout = base.config.GetInt('afk-timeout', 600)
def __init__(self):
try:
self.Toon_initialized
return
except:
self.Toon_initialized = 1
Avatar.Avatar.__init__(self)
ToonHead.__init__(self)
self.forwardSpeed = 0.0
self.rotateSpeed = 0.0
self.avatarType = 'toon'
self.motion = Motion.Motion(self)
self.standWalkRunReverse = None
self.playingAnim = None
self.soundTeleport = None
self.cheesyEffect = ToontownGlobals.CENormal
self.effectTrack = None
self.emoteTrack = None
self.emote = None
self.stunTrack = None
self.__bookActors = []
self.__holeActors = []
self.holeClipPath = None
self.wake = None
self.lastWakeTime = 0
self.forceJumpIdle = False
self.numPies = 0
self.pieType = 0
self.pieThrowType = ToontownGlobals.PieThrowArc
self.pieModel = None
self.__pieModelType = None
self.pieScale = 1.0
self.hatNodes = []
self.glassesNodes = []
self.backpackNodes = []
self.hat = (0, 0, 0)
self.glasses = (0, 0, 0)
self.backpack = (0, 0, 0)
self.shoes = (0, 0, 0)
self.isStunned = 0
self.isDisguised = 0
self.defaultColorScale = None
self.jar = None
self.setTag('pieCode', str(ToontownGlobals.PieCodeToon))
self.setFont(ToontownGlobals.getToonFont())
self.soundChatBubble = base.loader.loadSfx('phase_3/audio/sfx/GUI_balloon_popup.ogg')
self.animFSM = ClassicFSM('Toon', [State('off', self.enterOff, self.exitOff),
State('neutral', self.enterNeutral, self.exitNeutral),
State('victory', self.enterVictory, self.exitVictory),
State('Happy', self.enterHappy, self.exitHappy),
State('Sad', self.enterSad, self.exitSad),
State('Catching', self.enterCatching, self.exitCatching),
State('CatchEating', self.enterCatchEating, self.exitCatchEating),
State('Sleep', self.enterSleep, self.exitSleep),
State('walk', self.enterWalk, self.exitWalk),
State('jumpSquat', self.enterJumpSquat, self.exitJumpSquat),
State('jump', self.enterJump, self.exitJump),
State('jumpAirborne', self.enterJumpAirborne, self.exitJumpAirborne),
State('jumpLand', self.enterJumpLand, self.exitJumpLand),
State('run', self.enterRun, self.exitRun),
State('swim', self.enterSwim, self.exitSwim),
State('swimhold', self.enterSwimHold, self.exitSwimHold),
State('dive', self.enterDive, self.exitDive),
State('cringe', self.enterCringe, self.exitCringe),
State('OpenBook', self.enterOpenBook, self.exitOpenBook, ['ReadBook', 'CloseBook']),
State('ReadBook', self.enterReadBook, self.exitReadBook),
State('CloseBook', self.enterCloseBook, self.exitCloseBook),
State('TeleportOut', self.enterTeleportOut, self.exitTeleportOut),
State('Died', self.enterDied, self.exitDied),
State('TeleportedOut', self.enterTeleportedOut, self.exitTeleportedOut),
State('TeleportIn', self.enterTeleportIn, self.exitTeleportIn),
State('Emote', self.enterEmote, self.exitEmote),
State('SitStart', self.enterSitStart, self.exitSitStart),
State('Sit', self.enterSit, self.exitSit),
State('Push', self.enterPush, self.exitPush),
State('Squish', self.enterSquish, self.exitSquish),
State('FallDown', self.enterFallDown, self.exitFallDown),
State('GolfPuttLoop', self.enterGolfPuttLoop, self.exitGolfPuttLoop),
State('GolfRotateLeft', self.enterGolfRotateLeft, self.exitGolfRotateLeft),
State('GolfRotateRight', self.enterGolfRotateRight, self.exitGolfRotateRight),
State('GolfPuttSwing', self.enterGolfPuttSwing, self.exitGolfPuttSwing),
State('GolfGoodPutt', self.enterGolfGoodPutt, self.exitGolfGoodPutt),
State('GolfBadPutt', self.enterGolfBadPutt, self.exitGolfBadPutt),
State('Flattened', self.enterFlattened, self.exitFlattened),
State('CogThiefRunning', self.enterCogThiefRunning, self.exitCogThiefRunning),
State('ScientistJealous', self.enterScientistJealous, self.exitScientistJealous),
State('ScientistEmcee', self.enterScientistEmcee, self.exitScientistEmcee),
State('ScientistWork', self.enterScientistWork, self.exitScientistWork),
State('ScientistLessWork', self.enterScientistLessWork, self.exitScientistLessWork),
State('ScientistPlay', self.enterScientistPlay, self.enterScientistPlay)], 'off', 'off')
animStateList = self.animFSM.getStates()
self.animFSM.enterInitialState()
def stopAnimations(self):
if hasattr(self, 'animFSM'):
if not self.animFSM.isInternalStateInFlux():
self.animFSM.request('off')
else:
self.notify.warning('animFSM in flux, state=%s, not requesting off' % self.animFSM.getCurrentState().getName())
else:
self.notify.warning('animFSM has been deleted')
if self.effectTrack != None:
self.effectTrack.finish()
self.effectTrack = None
if self.emoteTrack != None:
self.emoteTrack.finish()
self.emoteTrack = None
if self.stunTrack != None:
self.stunTrack.finish()
self.stunTrack = None
if self.wake:
self.wake.stop()
self.wake.destroy()
self.wake = None
self.cleanupPieModel()
return
def delete(self):
try:
self.Toon_deleted
except:
self.Toon_deleted = 1
self.stopAnimations()
self.rightHands = None
self.rightHand = None
self.leftHands = None
self.leftHand = None
self.headParts = None
self.torsoParts = None
self.hipsParts = None
self.legsParts = None
del self.animFSM
for bookActor in self.__bookActors:
bookActor.cleanup()
del self.__bookActors
for holeActor in self.__holeActors:
holeActor.cleanup()
del self.__holeActors
self.soundTeleport = None
self.motion.delete()
self.motion = None
Avatar.Avatar.delete(self)
ToonHead.delete(self)
return
def updateToonDNA(self, newDNA, fForce = 0, tPose = 0):
self.style.gender = newDNA.getGender()
oldDNA = self.style
if fForce or newDNA.head != oldDNA.head:
self.swapToonHead(newDNA.head)
if fForce or newDNA.torso != oldDNA.torso:
self.swapToonTorso(newDNA.torso, genClothes=0)
if not tPose:
self.loop('neutral')
if fForce or newDNA.legs != oldDNA.legs:
self.swapToonLegs(newDNA.legs)
self.swapToonColor(newDNA)
self.__swapToonClothes(newDNA)
def setDNAString(self, dnaString):
newDNA = ToonDNA.ToonDNA()
newDNA.makeFromNetString(dnaString)
if len(newDNA.torso) < 2:
self.sendLogSuspiciousEvent('nakedToonDNA %s was requested' % newDNA.torso)
newDNA.torso = newDNA.torso + 's'
self.setDNA(newDNA)
def setDNA(self, dna):
if hasattr(self, 'isDisguised'):
if self.isDisguised:
return
if self.style:
self.updateToonDNA(dna)
else:
self.style = dna
self.generateToon()
self.initializeDropShadow()
self.initializeNametag3d()
def parentToonParts(self):
if self.hasLOD():
for lodName in self.getLODNames():
if base.config.GetBool('want-new-anims', 1):
if not self.getPart('torso', lodName).find('**/def_head').isEmpty():
self.attach('head', 'torso', 'def_head', lodName)
else:
self.attach('head', 'torso', 'joint_head', lodName)
else:
self.attach('head', 'torso', 'joint_head', lodName)
self.attach('torso', 'legs', 'joint_hips', lodName)
else:
self.attach('head', 'torso', 'joint_head')
self.attach('torso', 'legs', 'joint_hips')
def unparentToonParts(self):
if self.hasLOD():
for lodName in self.getLODNames():
self.getPart('head', lodName).reparentTo(self.getLOD(lodName))
self.getPart('torso', lodName).reparentTo(self.getLOD(lodName))
self.getPart('legs', lodName).reparentTo(self.getLOD(lodName))
else:
self.getPart('head').reparentTo(self.getGeomNode())
self.getPart('torso').reparentTo(self.getGeomNode())
self.getPart('legs').reparentTo(self.getGeomNode())
def setLODs(self):
self.setLODNode()
levelOneIn = base.config.GetInt('lod1-in', 20)
levelOneOut = base.config.GetInt('lod1-out', 0)
levelTwoIn = base.config.GetInt('lod2-in', 80)
levelTwoOut = base.config.GetInt('lod2-out', 20)
levelThreeIn = base.config.GetInt('lod3-in', 280)
levelThreeOut = base.config.GetInt('lod3-out', 80)
self.addLOD(1000, levelOneIn, levelOneOut)
self.addLOD(500, levelTwoIn, levelTwoOut)
self.addLOD(250, levelThreeIn, levelThreeOut)
def generateToon(self):
self.generateToonContainer() # NJF performance experiment
self.setLODs()
self.generateToonLegs()
self.generateToonHead()
self.generateToonTorso()
self.generateToonColor()
self.parentToonParts()
self.rescaleToon()
self.resetHeight()
self.setupToonNodes()
self.completeToonContainer()
def generateToonContainer(self):
self.ridgidToonContainer = RigidBodyCombiner("ridgidToonContainer")
self.RGTCNodepath = NodePath(self.ridgidToonContainer)
self.RGTCNodepath.reparentTo(render)
def completeToonContainer(self):
self.reparentTo(self.RGTCNodepath)
self.ridgidToonContainer.collect()
def setupToonNodes(self):
rightHand = NodePath('rightHand')
self.rightHand = None
self.rightHands = []
leftHand = NodePath('leftHand')
self.leftHands = []
self.leftHand = None
for lodName in self.getLODNames():
hand = self.getPart('torso', lodName).find('**/joint_Rhold')
if base.config.GetBool('want-new-anims', 1):
if not self.getPart('torso', lodName).find('**/def_joint_right_hold').isEmpty():
hand = self.getPart('torso', lodName).find('**/def_joint_right_hold')
else:
hand = self.getPart('torso', lodName).find('**/joint_Rhold')
self.rightHands.append(hand)
rightHand = rightHand.instanceTo(hand)
if base.config.GetBool('want-new-anims', 1):
if not self.getPart('torso', lodName).find('**/def_joint_left_hold').isEmpty():
hand = self.getPart('torso', lodName).find('**/def_joint_left_hold')
else:
hand = self.getPart('torso', lodName).find('**/joint_Lhold')
self.leftHands.append(hand)
leftHand = leftHand.instanceTo(hand)
if self.rightHand == None:
self.rightHand = rightHand
if self.leftHand == None:
self.leftHand = leftHand
self.headParts = self.findAllMatches('**/__Actor_head')
self.legsParts = self.findAllMatches('**/__Actor_legs')
self.hipsParts = self.legsParts.findAllMatches('**/joint_hips')
self.torsoParts = self.hipsParts.findAllMatches('**/__Actor_torso')
return
def initializeBodyCollisions(self, collIdStr):
Avatar.Avatar.initializeBodyCollisions(self, collIdStr)
if not self.ghostMode:
self.collNode.setCollideMask(self.collNode.getIntoCollideMask() | ToontownGlobals.PieBitmask)
def getBookActors(self):
if self.__bookActors:
return self.__bookActors
bookActor = Actor.Actor('phase_3.5/models/props/book-mod', {'book': 'phase_3.5/models/props/book-chan'})
bookActor2 = Actor.Actor(other=bookActor)
bookActor3 = Actor.Actor(other=bookActor)
self.__bookActors = [bookActor, bookActor2, bookActor3]
hands = self.getRightHands()
for bookActor, hand in zip(self.__bookActors, hands):
bookActor.reparentTo(hand)
bookActor.hide()
return self.__bookActors
def getHoleActors(self):
if self.__holeActors:
return self.__holeActors
holeActor = Actor.Actor('phase_3.5/models/props/portal-mod', {'hole': 'phase_3.5/models/props/portal-chan'})
holeActor2 = Actor.Actor(other=holeActor)
holeActor3 = Actor.Actor(other=holeActor)
self.__holeActors = [holeActor, holeActor2, holeActor3]
for ha in self.__holeActors:
if hasattr(self, 'uniqueName'):
holeName = self.uniqueName('toon-portal')
else:
holeName = 'toon-portal'
ha.setName(holeName)
return self.__holeActors
def rescaleToon(self):
animalStyle = self.style.getAnimal()
bodyScale = ToontownGlobals.toonBodyScales[animalStyle]
headScale = ToontownGlobals.toonHeadScales[animalStyle]
self.setAvatarScale(bodyScale)
for lod in self.getLODNames():
self.getPart('head', lod).setScale(headScale)
def getBodyScale(self):
animalStyle = self.style.getAnimal()
bodyScale = ToontownGlobals.toonBodyScales[animalStyle]
return bodyScale
def resetHeight(self):
if hasattr(self, 'style') and self.style:
animal = self.style.getAnimal()
bodyScale = ToontownGlobals.toonBodyScales[animal]
headScale = ToontownGlobals.toonHeadScales[animal][2]
shoulderHeight = ToontownGlobals.legHeightDict[self.style.legs] * bodyScale + ToontownGlobals.torsoHeightDict[self.style.torso] * bodyScale
height = shoulderHeight + ToontownGlobals.headHeightDict[self.style.head] * headScale
self.shoulderHeight = shoulderHeight
if self.cheesyEffect == ToontownGlobals.CEBigToon or self.cheesyEffect == ToontownGlobals.CEBigWhite:
height *= ToontownGlobals.BigToonScale
elif self.cheesyEffect == ToontownGlobals.CESmallToon:
height *= ToontownGlobals.SmallToonScale
self.setHeight(height)
def generateToonLegs(self, copy = 1):
global Preloaded
legStyle = self.style.legs
filePrefix = LegDict.get(legStyle)
if filePrefix is None:
self.notify.error('unknown leg style: %s' % legStyle)
self.loadModel(Preloaded[filePrefix+'-1000'], 'legs', '1000', True)
self.loadModel(Preloaded[filePrefix+'-500'], 'legs', '500', True)
self.loadModel(Preloaded[filePrefix+'-250'], 'legs', '250', True)
if not copy:
self.showPart('legs', '1000')
self.showPart('legs', '500')
self.showPart('legs', '250')
self.loadAnims(LegsAnimDict[legStyle], 'legs', '1000')
self.loadAnims(LegsAnimDict[legStyle], 'legs', '500')
self.loadAnims(LegsAnimDict[legStyle], 'legs', '250')
self.findAllMatches('**/boots_short').stash()
self.findAllMatches('**/boots_long').stash()
self.findAllMatches('**/shoes').stash()
return
def swapToonLegs(self, legStyle, copy = 1):
self.unparentToonParts()
self.removePart('legs', '1000')
self.removePart('legs', '500')
self.removePart('legs', '250')
# Bugfix: Until upstream Panda3D includes this, we have to do it here.
if 'legs' in self._Actor__commonBundleHandles:
del self._Actor__commonBundleHandles['legs']
self.style.legs = legStyle
self.generateToonLegs(copy)
self.generateToonColor()
self.parentToonParts()
self.rescaleToon()
self.resetHeight()
del self.shadowJoint
self.initializeDropShadow()
self.initializeNametag3d()
def generateToonTorso(self, copy = 1, genClothes = 1):
global Preloaded
torsoStyle = self.style.torso
filePrefix = TorsoDict.get(torsoStyle)
if filePrefix is None:
self.notify.error('unknown torso style: %s' % torsoStyle)
self.loadModel(Preloaded[filePrefix+'-1000'], 'torso', '1000', True)
if len(torsoStyle) == 1:
self.loadModel(Preloaded[filePrefix+'-1000'], 'torso', '500', True)
self.loadModel(Preloaded[filePrefix+'-1000'], 'torso', '250', True)
else:
self.loadModel(Preloaded[filePrefix+'-500'], 'torso', '500', True)
self.loadModel(Preloaded[filePrefix+'-250'], 'torso', '250', True)
if not copy:
self.showPart('torso', '1000')
self.showPart('torso', '500')
self.showPart('torso', '250')
self.loadAnims(TorsoAnimDict[torsoStyle], 'torso', '1000')
self.loadAnims(TorsoAnimDict[torsoStyle], 'torso', '500')
self.loadAnims(TorsoAnimDict[torsoStyle], 'torso', '250')
if genClothes == 1 and not len(torsoStyle) == 1:
self.generateToonClothes()
return
def swapToonTorso(self, torsoStyle, copy = 1, genClothes = 1):
self.unparentToonParts()
self.removePart('torso', '1000')
self.removePart('torso', '500')
self.removePart('torso', '250')
# Bugfix: Until upstream Panda3D includes this, we have to do it here.
if 'torso' in self._Actor__commonBundleHandles:
del self._Actor__commonBundleHandles['torso']
self.style.torso = torsoStyle
self.generateToonTorso(copy, genClothes)
self.generateToonColor()
self.parentToonParts()
self.rescaleToon()
self.resetHeight()
self.setupToonNodes()
self.generateBackpack()
def generateToonHead(self, copy = 1):
headHeight = ToonHead.generateToonHead(self, copy, self.style, ('1000', '500', '250'))
if self.style.getAnimal() == 'dog':
self.loadAnims(HeadAnimDict[self.style.head], 'head', '1000')
self.loadAnims(HeadAnimDict[self.style.head], 'head', '500')
self.loadAnims(HeadAnimDict[self.style.head], 'head', '250')
def swapToonHead(self, headStyle, copy = 1):
self.stopLookAroundNow()
self.eyelids.request('open')
self.unparentToonParts()
self.removePart('head', '1000')
self.removePart('head', '500')
self.removePart('head', '250')
# Bugfix: Until upstream Panda3D includes this, we have to do it here.
if 'head' in self._Actor__commonBundleHandles:
del self._Actor__commonBundleHandles['head']
self.style.head = headStyle
self.generateToonHead(copy)
self.generateToonColor()
self.parentToonParts()
self.rescaleToon()
self.resetHeight()
self.eyelids.request('open')
self.startLookAround()
def generateToonColor(self):
ToonHead.generateToonColor(self, self.style)
armColor = self.style.getArmColor()
gloveColor = self.style.getGloveColor()
legColor = self.style.getLegColor()
for lodName in self.getLODNames():
torso = self.getPart('torso', lodName)
if len(self.style.torso) == 1:
parts = torso.findAllMatches('**/torso*')
parts.setColor(armColor)
for pieceName in ('arms', 'neck'):
piece = torso.find('**/' + pieceName)
piece.setColor(armColor)
hands = torso.find('**/hands')
hands.setColor(gloveColor)
legs = self.getPart('legs', lodName)
for pieceName in ('legs', 'feet'):
piece = legs.find('**/%s;+s' % pieceName)
piece.setColor(legColor)
if self.cheesyEffect == ToontownGlobals.CEGreenToon:
self.reapplyCheesyEffect()
def swapToonColor(self, dna):
self.setStyle(dna)
self.generateToonColor()
def __swapToonClothes(self, dna):
self.setStyle(dna)
self.generateToonClothes(fromNet=1)
def sendLogSuspiciousEvent(self, msg):
pass
def generateToonClothes(self, fromNet = 0):
swappedTorso = 0
if self.hasLOD():
if self.style.getGender() == 'f' and fromNet == 0:
try:
bottomPair = ToonDNA.GirlBottoms[self.style.botTex]
except:
bottomPair = ToonDNA.GirlBottoms[0]
if len(self.style.torso) < 2:
self.sendLogSuspiciousEvent('nakedToonDNA %s was requested' % self.style.torso)
return 0
elif self.style.torso[1] == 's' and bottomPair[1] == ToonDNA.SKIRT:
self.swapToonTorso(self.style.torso[0] + 'd', genClothes=0)
swappedTorso = 1
elif self.style.torso[1] == 'd' and bottomPair[1] == ToonDNA.SHORTS:
self.swapToonTorso(self.style.torso[0] + 's', genClothes=0)
swappedTorso = 1
try:
texName = ToonDNA.Shirts[self.style.topTex]
except:
texName = ToonDNA.Shirts[0]
shirtTex = loader.loadTexture(texName, okMissing=True)
if shirtTex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
shirtTex = loader.loadTexture(ToonDNA.Shirts[0])
shirtTex.setMinfilter(Texture.FTLinearMipmapLinear)
shirtTex.setMagfilter(Texture.FTLinear)
try:
shirtColor = ToonDNA.ClothesColors[self.style.topTexColor]
except:
shirtColor = ToonDNA.ClothesColors[0]
try:
texName = ToonDNA.Sleeves[self.style.sleeveTex]
except:
texName = ToonDNA.Sleeves[0]
sleeveTex = loader.loadTexture(texName, okMissing=True)
if sleeveTex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
sleeveTex = loader.loadTexture(ToonDNA.Sleeves[0])
sleeveTex.setMinfilter(Texture.FTLinearMipmapLinear)
sleeveTex.setMagfilter(Texture.FTLinear)
try:
sleeveColor = ToonDNA.ClothesColors[self.style.sleeveTexColor]
except:
sleeveColor = ToonDNA.ClothesColors[0]
if self.style.getGender() == 'm':
try:
texName = ToonDNA.BoyShorts[self.style.botTex]
except:
texName = ToonDNA.BoyShorts[0]
else:
try:
texName = ToonDNA.GirlBottoms[self.style.botTex][0]
except:
texName = ToonDNA.GirlBottoms[0][0]
bottomTex = loader.loadTexture(texName, okMissing=True)
if bottomTex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
if self.style.getGender() == 'm':
bottomTex = loader.loadTexture(ToonDNA.BoyShorts[0])
else:
bottomTex = loader.loadTexture(ToonDNA.GirlBottoms[0][0])
bottomTex.setMinfilter(Texture.FTLinearMipmapLinear)
bottomTex.setMagfilter(Texture.FTLinear)
try:
bottomColor = ToonDNA.ClothesColors[self.style.botTexColor]
except:
bottomColor = ToonDNA.ClothesColors[0]
darkBottomColor = bottomColor * 0.5
darkBottomColor.setW(1.0)
for lodName in self.getLODNames():
thisPart = self.getPart('torso', lodName)
top = thisPart.find('**/torso-top')
top.setTexture(shirtTex, 1)
top.setColor(shirtColor)
sleeves = thisPart.find('**/sleeves')
sleeves.setTexture(sleeveTex, 1)
sleeves.setColor(sleeveColor)
bottoms = thisPart.findAllMatches('**/torso-bot')
for bottomNum in range(0, bottoms.getNumPaths()):
bottom = bottoms.getPath(bottomNum)
bottom.setTexture(bottomTex, 1)
bottom.setColor(bottomColor)
caps = thisPart.findAllMatches('**/torso-bot-cap')
caps.setColor(darkBottomColor)
return swappedTorso
def generateHat(self, fromRTM = False):
hat = self.getHat()
if hat[0] >= len(ToonDNA.HatModels):
self.sendLogSuspiciousEvent('tried to put a wrong hat idx %d' % hat[0])
return
if len(self.hatNodes) > 0:
for hatNode in self.hatNodes:
hatNode.removeNode()
self.hatNodes = []
self.showEars()
if hat[0] != 0:
hatGeom = loader.loadModel(ToonDNA.HatModels[hat[0]], okMissing=True)
if hatGeom:
if hat[0] == 54:
self.hideEars()
if hat[1] != 0:
texName = ToonDNA.HatTextures[hat[1]]
tex = loader.loadTexture(texName, okMissing=True)
if tex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
else:
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
hatGeom.setTexture(tex, 1)
if fromRTM:
importlib.reload(AccessoryGlobals)
transOffset = None
if AccessoryGlobals.ExtendedHatTransTable.get(hat[0]):
transOffset = AccessoryGlobals.ExtendedHatTransTable[hat[0]].get(self.style.head[:2])
if transOffset is None:
transOffset = AccessoryGlobals.HatTransTable.get(self.style.head[:2])
if transOffset is None:
return
hatGeom.setPos(transOffset[0][0], transOffset[0][1], transOffset[0][2])
hatGeom.setHpr(transOffset[1][0], transOffset[1][1], transOffset[1][2])
hatGeom.setScale(transOffset[2][0], transOffset[2][1], transOffset[2][2])
headNodes = self.findAllMatches('**/__Actor_head')
for headNode in headNodes:
hatNode = headNode.attachNewNode('hatNode')
self.hatNodes.append(hatNode)
hatGeom.instanceTo(hatNode)
return
def generateGlasses(self, fromRTM = False):
glasses = self.getGlasses()
if glasses[0] >= len(ToonDNA.GlassesModels):
self.sendLogSuspiciousEvent('tried to put a wrong glasses idx %d' % glasses[0])
return
if len(self.glassesNodes) > 0:
for glassesNode in self.glassesNodes:
glassesNode.removeNode()
self.glassesNodes = []
self.showEyelashes()
if glasses[0] != 0:
glassesGeom = loader.loadModel(ToonDNA.GlassesModels[glasses[0]], okMissing=True)
if glassesGeom:
if glasses[0] in [15, 16]:
self.hideEyelashes()
if glasses[1] != 0:
texName = ToonDNA.GlassesTextures[glasses[1]]
tex = loader.loadTexture(texName, okMissing=True)
if tex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
else:
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
glassesGeom.setTexture(tex, 1)
if fromRTM:
importlib.reload(AccessoryGlobals)
transOffset = None
if AccessoryGlobals.ExtendedGlassesTransTable.get(glasses[0]):
transOffset = AccessoryGlobals.ExtendedGlassesTransTable[glasses[0]].get(self.style.head[:2])
if transOffset is None:
transOffset = AccessoryGlobals.GlassesTransTable.get(self.style.head[:2])
if transOffset is None:
return
glassesGeom.setPos(transOffset[0][0], transOffset[0][1], transOffset[0][2])
glassesGeom.setHpr(transOffset[1][0], transOffset[1][1], transOffset[1][2])
glassesGeom.setScale(transOffset[2][0], transOffset[2][1], transOffset[2][2])
headNodes = self.findAllMatches('**/__Actor_head')
for headNode in headNodes:
glassesNode = headNode.attachNewNode('glassesNode')
self.glassesNodes.append(glassesNode)
glassesGeom.instanceTo(glassesNode)
return
def generateBackpack(self, fromRTM = False):
backpack = self.getBackpack()
if backpack[0] >= len(ToonDNA.BackpackModels):
self.sendLogSuspiciousEvent('tried to put a wrong backpack idx %d' % backpack[0])
return
if len(self.backpackNodes) > 0:
for backpackNode in self.backpackNodes:
backpackNode.removeNode()
self.backpackNodes = []
if backpack[0] != 0:
geom = loader.loadModel(ToonDNA.BackpackModels[backpack[0]], okMissing=True)
if geom:
if backpack[1] != 0:
texName = ToonDNA.BackpackTextures[backpack[1]]
tex = loader.loadTexture(texName, okMissing=True)
if tex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
else:
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
geom.setTexture(tex, 1)
if fromRTM:
importlib.reload(AccessoryGlobals)
transOffset = None
if AccessoryGlobals.ExtendedBackpackTransTable.get(backpack[0]):
transOffset = AccessoryGlobals.ExtendedBackpackTransTable[backpack[0]].get(self.style.torso[:1])
if transOffset is None:
transOffset = AccessoryGlobals.BackpackTransTable.get(self.style.torso[:1])
if transOffset is None:
return
geom.setPos(transOffset[0][0], transOffset[0][1], transOffset[0][2])
geom.setHpr(transOffset[1][0], transOffset[1][1], transOffset[1][2])
geom.setScale(transOffset[2][0], transOffset[2][1], transOffset[2][2])
nodes = self.findAllMatches('**/def_joint_attachFlower')
for node in nodes:
theNode = node.attachNewNode('backpackNode')
self.backpackNodes.append(theNode)
geom.instanceTo(theNode)
return
def generateShoes(self):
shoes = self.getShoes()
if shoes[0] >= len(ToonDNA.ShoesModels):
self.sendLogSuspiciousEvent('tried to put a wrong shoes idx %d' % shoes[0])
return
self.findAllMatches('**/feet;+s').stash()
self.findAllMatches('**/boots_short;+s').stash()
self.findAllMatches('**/boots_long;+s').stash()
self.findAllMatches('**/shoes;+s').stash()
geoms = self.findAllMatches('**/%s;+s' % ToonDNA.ShoesModels[shoes[0]])
for geom in geoms:
geom.unstash()
if shoes[0] != 0:
for geom in geoms:
texName = ToonDNA.ShoesTextures[shoes[1]]
if self.style.legs == 'l' and shoes[0] == 3:
texName = texName[:-4] + 'LL.jpg'
tex = loader.loadTexture(texName, okMissing=True)
if tex is None:
self.sendLogSuspiciousEvent('failed to load texture %s' % texName)
else:
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
geom.setTexture(tex, 1)
return
def generateToonAccessories(self):
self.generateHat()
self.generateGlasses()
self.generateBackpack()
self.generateShoes()
def setHat(self, hatIdx, textureIdx, colorIdx, fromRTM = False):
self.hat = (hatIdx, textureIdx, colorIdx)
self.generateHat(fromRTM=fromRTM)
def getHat(self):
return self.hat
def setGlasses(self, glassesIdx, textureIdx, colorIdx, fromRTM = False):
self.glasses = (glassesIdx, textureIdx, colorIdx)
self.generateGlasses(fromRTM=fromRTM)
def getGlasses(self):
return self.glasses
def setBackpack(self, backpackIdx, textureIdx, colorIdx, fromRTM = False):
self.backpack = (backpackIdx, textureIdx, colorIdx)
self.generateBackpack(fromRTM=fromRTM)
def getBackpack(self):
return self.backpack
def setShoes(self, shoesIdx, textureIdx, colorIdx):
self.shoes = (shoesIdx, textureIdx, colorIdx)
self.generateShoes()
def getShoes(self):
return self.shoes
def getDialogueArray(self):
animalType = self.style.getType()
if animalType == 'dog':
dialogueArray = DogDialogueArray
elif animalType == 'cat':
dialogueArray = CatDialogueArray
elif animalType == 'horse':
dialogueArray = HorseDialogueArray
elif animalType == 'mouse':
dialogueArray = MouseDialogueArray
elif animalType == 'rabbit':
dialogueArray = RabbitDialogueArray
elif animalType == 'duck':
dialogueArray = DuckDialogueArray
elif animalType == 'monkey':
dialogueArray = MonkeyDialogueArray
elif animalType == 'bear':
dialogueArray = BearDialogueArray
elif animalType == 'pig':
dialogueArray = PigDialogueArray
else:
dialogueArray = None
return dialogueArray
def getShadowJoint(self):
if hasattr(self, 'shadowJoint'):
return self.shadowJoint
shadowJoint = NodePath('shadowJoint')
for lodName in self.getLODNames():
joint = self.getPart('legs', lodName).find('**/joint_shadow')
shadowJoint = shadowJoint.instanceTo(joint)
self.shadowJoint = shadowJoint
return shadowJoint
def getNametagJoints(self):
joints = []
for lodName in self.getLODNames():
bundle = self.getPartBundle('legs', lodName)
joint = bundle.findChild('joint_nameTag')
if joint:
joints.append(joint)
return joints
def getRightHands(self):
return self.rightHands
def getLeftHands(self):
return self.leftHands
def getHeadParts(self):
return self.headParts
def getHipsParts(self):
return self.hipsParts
def getTorsoParts(self):
return self.torsoParts
def getLegsParts(self):
return self.legsParts
def findSomethingToLookAt(self):
if self.randGen.random() < 0.1 or not hasattr(self, 'cr'):
x = self.randGen.choice((-0.8,
-0.5,
0,
0.5,
0.8))
y = self.randGen.choice((-0.5,
0,
0.5,
0.8))
self.lerpLookAt(Point3(x, 1.5, y), blink=1)
return
nodePathList = []
for id, obj in list(self.cr.doId2do.items()):
if hasattr(obj, 'getStareAtNodeAndOffset') and obj != self:
node, offset = obj.getStareAtNodeAndOffset()
if node.getY(self) > 0.0:
nodePathList.append((node, offset))
if nodePathList:
#nodePathList.sort(lambda x, y: cmp(x[0].getDistance(self), y[0].getDistance(self))) # Tried multiple things to convert this over to python3, idk
nodePathList.sort() # PY3
if len(nodePathList) >= 2:
if self.randGen.random() < 0.9:
chosenNodePath = nodePathList[0]
else:
chosenNodePath = nodePathList[1]
else:
chosenNodePath = nodePathList[0]
self.lerpLookAt(chosenNodePath[0].getPos(self), blink=1)
else:
ToonHead.findSomethingToLookAt(self)
def setForceJumpIdle(self, value):
self.forceJumpIdle = value
def setupPickTrigger(self):
Avatar.Avatar.setupPickTrigger(self)
torso = self.getPart('torso', '1000')
if torso == None:
return 0
self.pickTriggerNp.reparentTo(torso)
size = self.style.getTorsoSize()
if size == 'short':
self.pickTriggerNp.setPosHprScale(0, 0, 0.5, 0, 0, 0, 1.5, 1.5, 2)
elif size == 'medium':
self.pickTriggerNp.setPosHprScale(0, 0, 0.5, 0, 0, 0, 1, 1, 2)
else:
self.pickTriggerNp.setPosHprScale(0, 0, 1, 0, 0, 0, 1, 1, 2)
return 1
def showBooks(self):
for bookActor in self.getBookActors():
bookActor.show()
def hideBooks(self):
for bookActor in self.getBookActors():
bookActor.hide()
def getWake(self):
if not self.wake:
self.wake = Wake.Wake(render, self)
return self.wake
def getJar(self):
if not self.jar:
self.jar = loader.loadModel('phase_5.5/models/estate/jellybeanJar')
self.jar.setP(290.0)
self.jar.setY(0.5)
self.jar.setZ(0.5)
self.jar.setScale(0.0)
return self.jar
def removeJar(self):
if self.jar:
self.jar.removeNode()
self.jar = None
return
def setSpeed(self, forwardSpeed, rotateSpeed):
self.forwardSpeed = forwardSpeed
self.rotateSpeed = rotateSpeed
action = None
if self.standWalkRunReverse != None:
if forwardSpeed >= ToontownGlobals.RunCutOff:
action = OTPGlobals.RUN_INDEX
elif forwardSpeed > ToontownGlobals.WalkCutOff:
action = OTPGlobals.WALK_INDEX
elif forwardSpeed < -ToontownGlobals.WalkCutOff:
action = OTPGlobals.REVERSE_INDEX
elif rotateSpeed != 0.0:
action = OTPGlobals.WALK_INDEX
else:
action = OTPGlobals.STAND_INDEX
anim, rate = self.standWalkRunReverse[action]
self.motion.enter()
self.motion.setState(anim, rate)
if anim != self.playingAnim:
self.playingAnim = anim
self.playingRate = rate
self.stop()
self.loop(anim)
self.setPlayRate(rate, anim)
if self.isDisguised:
rightHand = self.suit.rightHand
numChildren = rightHand.getNumChildren()
if numChildren > 0:
anim = 'tray-' + anim
if anim == 'tray-run':
anim = 'tray-walk'
self.suit.stop()
self.suit.loop(anim)
self.suit.setPlayRate(rate, anim)
elif rate != self.playingRate:
self.playingRate = rate
if not self.isDisguised:
self.setPlayRate(rate, anim)
else:
self.suit.setPlayRate(rate, anim)
showWake, wakeWaterHeight = ZoneUtil.getWakeInfo()
if showWake and self.getZ(render) < wakeWaterHeight and abs(forwardSpeed) > ToontownGlobals.WalkCutOff:
currT = globalClock.getFrameTime()
deltaT = currT - self.lastWakeTime
if action == OTPGlobals.RUN_INDEX and deltaT > ToontownGlobals.WakeRunDelta or deltaT > ToontownGlobals.WakeWalkDelta:
self.getWake().createRipple(wakeWaterHeight, rate=1, startFrame=4)
self.lastWakeTime = currT
return action
def enterOff(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.setActiveShadow(0)
self.playingAnim = None
return
def exitOff(self):
pass
def enterNeutral(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
anim = 'neutral'
self.pose(anim, int(self.getNumFrames(anim) * self.randGen.random()))
self.loop(anim, restart=0)
self.setPlayRate(animMultiplier, anim)
self.playingAnim = anim
self.setActiveShadow(1)
def exitNeutral(self):
self.stop()
def enterVictory(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
anim = 'victory'
frame = int(ts * self.getFrameRate(anim) * animMultiplier)
self.pose(anim, frame)
self.loop('victory', restart=0)
self.setPlayRate(animMultiplier, 'victory')
self.playingAnim = anim
self.setActiveShadow(0)
def exitVictory(self):
self.stop()
def enterHappy(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.playingAnim = None
self.playingRate = None
self.standWalkRunReverse = (('neutral', 1.0),
('walk', 1.0),
('run', 1.0),
('walk', -1.0))
self.setSpeed(self.forwardSpeed, self.rotateSpeed)
self.setActiveShadow(1)
return
def exitHappy(self):
self.standWalkRunReverse = None
self.stop()
self.motion.exit()
return
def enterSad(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.playingAnim = 'sad'
self.playingRate = None
self.standWalkRunReverse = (('sad-neutral', 1.0),
('sad-walk', 1.2),
('sad-walk', 1.2),
('sad-walk', -1.0))
self.setSpeed(0, 0)
Emote.globalEmote.disableBody(self, 'toon, enterSad')
self.setActiveShadow(1)
if self.isLocal():
self.controlManager.disableAvatarJump()
return
def exitSad(self):
self.standWalkRunReverse = None
self.stop()
self.motion.exit()
Emote.globalEmote.releaseBody(self, 'toon, exitSad')
if self.isLocal():
self.controlManager.enableAvatarJump()
return
def enterCatching(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.playingAnim = None
self.playingRate = None
self.standWalkRunReverse = (('catch-neutral', 1.0),
('catch-run', 1.0),
('catch-run', 1.0),
('catch-run', -1.0))
self.setSpeed(self.forwardSpeed, self.rotateSpeed)
self.setActiveShadow(1)
return
def exitCatching(self):
self.standWalkRunReverse = None
self.stop()
self.motion.exit()
return
def enterCatchEating(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.playingAnim = None
self.playingRate = None
self.standWalkRunReverse = (('catch-eatneutral', 1.0),
('catch-eatnrun', 1.0),
('catch-eatnrun', 1.0),
('catch-eatnrun', -1.0))
self.setSpeed(self.forwardSpeed, self.rotateSpeed)
self.setActiveShadow(0)
return
def exitCatchEating(self):
self.standWalkRunReverse = None
self.stop()
self.motion.exit()
return
def enterWalk(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('walk')
self.setPlayRate(animMultiplier, 'walk')
self.setActiveShadow(1)
def exitWalk(self):
self.stop()
def getJumpDuration(self):
if self.playingAnim == 'neutral':
return self.getDuration('jump', 'legs')
else:
return self.getDuration('running-jump', 'legs')
def enterJump(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if not self.isDisguised:
if self.playingAnim == 'neutral':
anim = 'jump'
else:
anim = 'running-jump'
self.playingAnim = anim
self.setPlayRate(animMultiplier, anim)
self.play(anim)
self.setActiveShadow(1)
def exitJump(self):
self.stop()
self.playingAnim = 'neutral'
def enterJumpSquat(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if not self.isDisguised:
if self.playingAnim == 'neutral':
anim = 'jump-squat'
else:
anim = 'running-jump-squat'
self.playingAnim = anim
self.setPlayRate(animMultiplier, anim)
self.play(anim)
self.setActiveShadow(1)
def exitJumpSquat(self):
self.stop()
self.playingAnim = 'neutral'
def enterJumpAirborne(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if not self.isDisguised:
if self.playingAnim == 'neutral' or self.forceJumpIdle:
anim = 'jump-idle'
else:
anim = 'running-jump-idle'
self.playingAnim = anim
self.setPlayRate(animMultiplier, anim)
self.loop(anim)
self.setActiveShadow(1)
def exitJumpAirborne(self):
self.stop()
self.playingAnim = 'neutral'
def enterJumpLand(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if not self.isDisguised:
if self.playingAnim == 'running-jump-idle':
anim = 'running-jump-land'
skipStart = 0.2
else:
anim = 'jump-land'
skipStart = 0.0
self.playingAnim = anim
self.setPlayRate(animMultiplier, anim)
self.play(anim)
self.setActiveShadow(1)
def exitJumpLand(self):
self.stop()
self.playingAnim = 'neutral'
def enterRun(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('run')
self.setPlayRate(animMultiplier, 'run')
Emote.globalEmote.disableBody(self, 'toon, enterRun')
self.setActiveShadow(1)
def exitRun(self):
self.stop()
Emote.globalEmote.releaseBody(self, 'toon, exitRun')
def enterSwim(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableAll(self, 'enterSwim')
self.playingAnim = 'swim'
self.loop('swim')
self.setPlayRate(animMultiplier, 'swim')
self.getGeomNode().setP(-89.0)
self.dropShadow.hide()
if self.isLocal():
self.useSwimControls()
self.nametag3d.setPos(0, -2, 1)
self.startBobSwimTask()
self.setActiveShadow(0)
def enterCringe(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('cringe')
self.getGeomNode().setPos(0, 0, -2)
self.setPlayRate(animMultiplier, 'swim')
def exitCringe(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.stop()
self.getGeomNode().setPos(0, 0, 0)
self.playingAnim = 'neutral'
self.setPlayRate(animMultiplier, 'swim')
def enterDive(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('swim')
if hasattr(self.getGeomNode(), 'setPos'):
self.getGeomNode().setPos(0, 0, -2)
self.setPlayRate(animMultiplier, 'swim')
self.setActiveShadow(0)
self.dropShadow.hide()
self.nametag3d.setPos(0, -2, 1)
def exitDive(self):
self.stop()
self.getGeomNode().setPos(0, 0, 0)
self.playingAnim = 'neutral'
self.dropShadow.show()
self.nametag3d.setPos(0, 0, self.height + 0.5)
def enterSwimHold(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.getGeomNode().setPos(0, 0, -2)
self.nametag3d.setPos(0, -2, 1)
self.pose('swim', 55)
def exitSwimHold(self):
self.stop()
self.getGeomNode().setPos(0, 0, 0)
self.playingAnim = 'neutral'
self.dropShadow.show()
self.nametag3d.setPos(0, 0, self.height + 0.5)
def exitSwim(self):
self.stop()
self.playingAnim = 'neutral'
self.stopBobSwimTask()
self.getGeomNode().setPosHpr(0, 0, 0, 0, 0, 0)
self.dropShadow.show()
if self.isLocal():
self.useWalkControls()
self.nametag3d.setPos(0, 0, self.height + 0.5)
Emote.globalEmote.releaseAll(self, 'exitSwim')
def startBobSwimTask(self):
if getattr(self, 'swimBob', None):
self.swimBob.finish()
self.swimBob = None
self.nametag3d.setZ(5.0)
geomNode = self.getGeomNode()
geomNode.setZ(4.0)
self.swimBob = Sequence(
geomNode.posInterval(1, Point3(0, -3, 3), startPos=Point3(0, -3, 4), blendType='easeInOut'),
geomNode.posInterval(1, Point3(0, -3, 4), startPos=Point3(0, -3, 3), blendType='easeInOut'))
self.swimBob.loop()
def stopBobSwimTask(self):
swimBob = getattr(self, 'swimBob', None)
if swimBob:
swimBob.finish()
self.getGeomNode().setPos(0, 0, 0)
self.nametag3d.setZ(1.0)
def enterOpenBook(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableAll(self, 'enterOpenBook')
self.playingAnim = 'openBook'
self.stopLookAround()
self.lerpLookAt(Point3(0, 1, -2))
bookTracks = Parallel()
for bookActor in self.getBookActors():
bookTracks.append(ActorInterval(bookActor, 'book', startTime=1.2, endTime=1.5))
bookTracks.append(ActorInterval(self, 'book', startTime=1.2, endTime=1.5))
if hasattr(self, 'uniqueName'):
trackName = self.uniqueName('openBook')
else:
trackName = 'openBook'
self.track = Sequence(Func(self.showBooks), bookTracks, Wait(0.1), name=trackName)
if callback:
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getName(), callback, extraArgs)
self.track.start(ts)
self.setActiveShadow(0)
def exitOpenBook(self):
self.playingAnim = 'neutralob'
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
self.hideBooks()
self.startLookAround()
Emote.globalEmote.releaseAll(self, 'exitOpenBook')
return
def enterReadBook(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableBody(self, 'enterReadBook')
self.playingAnim = 'readBook'
self.stopLookAround()
self.lerpLookAt(Point3(0, 1, -2))
self.showBooks()
for bookActor in self.getBookActors():
bookActor.pingpong('book', fromFrame=38, toFrame=118)
self.pingpong('book', fromFrame=38, toFrame=118)
self.setActiveShadow(0)
def exitReadBook(self):
self.playingAnim = 'neutralrb'
self.hideBooks()
for bookActor in self.getBookActors():
bookActor.stop()
self.startLookAround()
Emote.globalEmote.releaseBody(self, 'exitReadBook')
def enterCloseBook(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableAll(self, 'enterCloseBook')
self.playingAnim = 'closeBook'
bookTracks = Parallel()
for bookActor in self.getBookActors():
bookTracks.append(ActorInterval(bookActor, 'book', startTime=4.96, endTime=6.5))
bookTracks.append(ActorInterval(self, 'book', startTime=4.96, endTime=6.5))
if hasattr(self, 'uniqueName'):
trackName = self.uniqueName('closeBook')
else:
trackName = 'closeBook'
self.track = Sequence(Func(self.showBooks), bookTracks, Func(self.hideBooks), name=trackName)
if callback:
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getName(), callback, extraArgs)
self.track.start(ts)
self.setActiveShadow(0)
def exitCloseBook(self):
self.playingAnim = 'neutralcb'
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
Emote.globalEmote.releaseAll(self, 'exitCloseBook')
return
def getSoundTeleport(self):
if not self.soundTeleport:
self.soundTeleport = base.loader.loadSfx('phase_3.5/audio/sfx/AV_teleport.ogg')
return self.soundTeleport
def getTeleportOutTrack(self, autoFinishTrack = 1):
def showHoles(holes, hands):
for hole, hand in zip(holes, hands):
hole.reparentTo(hand)
def reparentHoles(holes, toon):
holes[0].reparentTo(toon)
holes[1].detachNode()
holes[2].detachNode()
holes[0].setBin('shadow', 0)
holes[0].setDepthTest(0)
holes[0].setDepthWrite(0)
def cleanupHoles(holes):
holes[0].detachNode()
holes[0].clearBin()
holes[0].clearDepthTest()
holes[0].clearDepthWrite()
holes = self.getHoleActors()
hands = self.getRightHands()
holeTrack = Track((0.0, Func(showHoles, holes, hands)), (0.5, SoundInterval(self.getSoundTeleport(), node=self)), (1.708, Func(reparentHoles, holes, self)), (3.4, Func(cleanupHoles, holes)))
if hasattr(self, 'uniqueName'):
trackName = self.uniqueName('teleportOut')
else:
trackName = 'teleportOut'
track = Parallel(holeTrack, name=trackName, autoFinish=autoFinishTrack)
for hole in holes:
track.append(ActorInterval(hole, 'hole', duration=3.4))
track.append(ActorInterval(self, 'teleport', duration=3.4))
return track
def startQuestMap(self):
pass
def stopQuestMap(self):
pass
def enterTeleportOut(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
name = self.name
if hasattr(self, 'doId'):
name += '-' + str(self.doId)
self.notify.debug('enterTeleportOut %s' % name)
if self.ghostMode or self.isDisguised:
if callback:
callback(*extraArgs)
return
self.playingAnim = 'teleport'
Emote.globalEmote.disableAll(self, 'enterTeleportOut')
if self.isLocal():
autoFinishTrack = 0
else:
autoFinishTrack = 1
self.track = self.getTeleportOutTrack(autoFinishTrack)
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getName(), self.finishTeleportOut, [callback, extraArgs])
holeClip = PlaneNode('holeClip')
self.holeClipPath = self.attachNewNode(holeClip)
self.getGeomNode().setClipPlane(self.holeClipPath)
self.nametag3d.setClipPlane(self.holeClipPath)
self.track.start(ts)
self.setActiveShadow(0)
def finishTeleportOut(self, callback = None, extraArgs = []):
name = self.name
if hasattr(self, 'doId'):
name += '-' + str(self.doId)
self.notify.debug('finishTeleportOut %s' % name)
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
if hasattr(self, 'animFSM'):
self.animFSM.request('TeleportedOut')
if callback:
callback(*extraArgs)
return
def exitTeleportOut(self):
name = self.name
if hasattr(self, 'doId'):
name += '-' + str(self.doId)
self.notify.debug('exitTeleportOut %s' % name)
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
self.track = None
geomNode = self.getGeomNode()
if geomNode and not geomNode.isEmpty():
self.getGeomNode().clearClipPlane()
if self.nametag3d and not self.nametag3d.isEmpty():
self.nametag3d.clearClipPlane()
if self.holeClipPath:
self.holeClipPath.removeNode()
self.holeClipPath = None
Emote.globalEmote.releaseAll(self, 'exitTeleportOut')
if self and not self.isEmpty():
self.show()
return
def enterTeleportedOut(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.setActiveShadow(0)
def exitTeleportedOut(self):
pass
def getDiedInterval(self, autoFinishTrack = 1):
sound = loader.loadSfx('phase_5/audio/sfx/ENC_Lose.ogg')
if hasattr(self, 'uniqueName'):
trackName = self.uniqueName('died')
else:
trackName = 'died'
ival = Sequence(Func(Emote.globalEmote.disableBody, self), Func(self.sadEyes), Func(self.blinkEyes), Track((0, ActorInterval(self, 'lose')), (2, SoundInterval(sound, node=self)), (5.333, self.scaleInterval(1.5, VBase3(0.01, 0.01, 0.01), blendType='easeInOut'))), Func(self.detachNode), Func(self.setScale, 1, 1, 1), Func(self.normalEyes), Func(self.blinkEyes), Func(Emote.globalEmote.releaseBody, self), name=trackName, autoFinish=autoFinishTrack)
return ival
def enterDied(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if self.ghostMode:
if callback:
callback(*extraArgs)
return
if self.isDisguised:
self.takeOffSuit()
self.playingAnim = 'lose'
Emote.globalEmote.disableAll(self, 'enterDied')
if self.isLocal():
autoFinishTrack = 0
else:
autoFinishTrack = 1
if hasattr(self, 'jumpLandAnimFixTask') and self.jumpLandAnimFixTask:
self.jumpLandAnimFixTask.remove()
self.jumpLandAnimFixTask = None
self.track = self.getDiedInterval(autoFinishTrack)
if callback:
self.track = Sequence(self.track, Func(callback, *extraArgs), autoFinish=autoFinishTrack)
self.track.start(ts)
self.setActiveShadow(0)
return
def finishDied(self, callback = None, extraArgs = []):
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
if hasattr(self, 'animFSM'):
self.animFSM.request('TeleportedOut')
if callback:
callback(*extraArgs)
return
def exitDied(self):
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
Emote.globalEmote.releaseAll(self, 'exitDied')
self.show()
return
def getTeleportInTrack(self):
hole = self.getHoleActors()[0]
hole.setBin('shadow', 0)
hole.setDepthTest(0)
hole.setDepthWrite(0)
holeTrack = Sequence()
holeTrack.append(Func(hole.reparentTo, self))
pos = Point3(0, -2.4, 0)
holeTrack.append(Func(hole.setPos, self, pos))
holeTrack.append(ActorInterval(hole, 'hole', startTime=3.4, endTime=3.1))
holeTrack.append(Wait(0.6))
holeTrack.append(ActorInterval(hole, 'hole', startTime=3.1, endTime=3.4))
def restoreHole(hole):
hole.setPos(0, 0, 0)
hole.detachNode()
hole.clearBin()
hole.clearDepthTest()
hole.clearDepthWrite()
holeTrack.append(Func(restoreHole, hole))
toonTrack = Sequence(Wait(0.3), Func(self.getGeomNode().show), Func(self.nametag3d.show), ActorInterval(self, 'jump', startTime=0.45))
if hasattr(self, 'uniqueName'):
trackName = self.uniqueName('teleportIn')
else:
trackName = 'teleportIn'
return Parallel(holeTrack, toonTrack, name=trackName)
def enterTeleportIn(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if self.ghostMode or self.isDisguised:
if callback:
callback(*extraArgs)
return
self.show()
self.playingAnim = 'teleport'
Emote.globalEmote.disableAll(self, 'enterTeleportIn')
self.pose('teleport', self.getNumFrames('teleport') - 1)
self.getGeomNode().hide()
self.nametag3d.hide()
self.track = self.getTeleportInTrack()
if callback:
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getName(), callback, extraArgs)
self.track.start(ts)
self.setActiveShadow(0)
def exitTeleportIn(self):
self.playingAnim = None
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
if not self.ghostMode and not self.isDisguised:
self.getGeomNode().show()
self.nametag3d.show()
Emote.globalEmote.releaseAll(self, 'exitTeleportIn')
return
def enterSitStart(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableBody(self)
self.playingAnim = 'sit-start'
if self.isLocal():
self.track = Sequence(ActorInterval(self, 'sit-start'), Func(self.b_setAnimState, 'Sit', animMultiplier))
else:
self.track = Sequence(ActorInterval(self, 'sit-start'))
self.track.start(ts)
self.setActiveShadow(0)
def exitSitStart(self):
self.playingAnim = 'neutral'
if self.track != None:
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
Emote.globalEmote.releaseBody(self)
return
def enterSit(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableBody(self)
self.playingAnim = 'sit'
self.loop('sit')
self.setActiveShadow(0)
def exitSit(self):
self.playingAnim = 'neutral'
Emote.globalEmote.releaseBody(self)
def enterSleep(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.stopLookAround()
self.stopBlink()
self.closeEyes()
self.lerpLookAt(Point3(0, 1, -4))
self.loop('neutral')
self.setPlayRate(animMultiplier * 0.4, 'neutral')
self.setChatAbsolute(SLEEP_STRING, CFThought)
if self == base.localAvatar:
self.notify.debug('Adding timeout task to Toon.')
taskMgr.doMethodLater(self.afkTimeout, self.__handleAfkTimeout, self.uniqueName('afkTimeout'))
self.setActiveShadow(0)
def __handleAfkTimeout(self, task):
self.notify.debug('Handling timeout task on Toon.')
self.ignore('wakeup')
self.takeOffSuit()
base.cr.playGame.getPlace().fsm.request('final')
self.b_setAnimState('TeleportOut', 1, self.__handleAfkExitTeleport, [0])
return Task.done
def __handleAfkExitTeleport(self, requestStatus):
self.notify.info('closing shard...')
base.cr.gameFSM.request('closeShard', ['afkTimeout'])
def exitSleep(self):
taskMgr.remove(self.uniqueName('afkTimeout'))
self.startLookAround()
self.openEyes()
self.startBlink()
if config.GetBool('stuck-sleep-fix', 1):
doClear = SLEEP_STRING in (self.nametag.getChatText(), self.nametag.getStompChatText())
else:
doClear = self.nametag.getChatText() == SLEEP_STRING
if doClear:
self.clearChat()
self.lerpLookAt(Point3(0, 1, 0), time=0.25)
self.stop()
def enterPush(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableBody(self)
self.playingAnim = 'push'
self.track = Sequence(ActorInterval(self, 'push'))
self.track.loop()
self.setActiveShadow(1)
def exitPush(self):
self.playingAnim = 'neutral'
if self.track != None:
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
Emote.globalEmote.releaseBody(self)
return
def enterEmote(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if len(extraArgs) > 0:
emoteIndex = extraArgs[0]
else:
return
self.playingAnim = None
self.playingRate = None
self.standWalkRunReverse = (('neutral', 1.0),
('walk', 1.0),
('run', 1.0),
('walk', -1.0))
self.setSpeed(self.forwardSpeed, self.rotateSpeed)
if self.isLocal() and emoteIndex != Emote.globalEmote.EmoteSleepIndex:
if self.sleepFlag:
self.b_setAnimState('Happy', self.animMultiplier)
self.wakeUp()
duration = 0
self.emoteTrack, duration = Emote.globalEmote.doEmote(self, emoteIndex, ts)
self.setActiveShadow(1)
return
def doEmote(self, emoteIndex, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
if not self.isLocal():
if base.cr.avatarFriendsManager.checkIgnored(self.doId):
return
duration = 0
if self.isLocal():
self.wakeUp()
if self.hasTrackAnimToSpeed():
self.trackAnimToSpeed(None)
self.emoteTrack, duration = Emote.globalEmote.doEmote(self, emoteIndex, ts)
return
def __returnToLastAnim(self, task):
if self.playingAnim:
self.loop(self.playingAnim)
elif self.hp > 0:
self.loop('neutral')
else:
self.loop('sad-neutral')
return Task.done
def __finishEmote(self, task):
if self.isLocal():
if self.hp > 0:
self.b_setAnimState('Happy')
else:
self.b_setAnimState('Sad')
return Task.done
def exitEmote(self):
self.stop()
if self.emoteTrack != None:
self.emoteTrack.finish()
self.emoteTrack = None
taskMgr.remove(self.taskName('finishEmote'))
return
def enterSquish(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableAll(self)
sound = loader.loadSfx('phase_9/audio/sfx/toon_decompress.ogg')
lerpTime = 0.1
node = self.getGeomNode().getChild(0)
origScale = node.getScale()
self.track = Sequence(LerpScaleInterval(node, lerpTime, VBase3(2, 2, 0.025), blendType='easeInOut'), Wait(1.0), Parallel(Sequence(Wait(0.4), LerpScaleInterval(node, lerpTime, VBase3(1.4, 1.4, 1.4), blendType='easeInOut'), LerpScaleInterval(node, lerpTime / 2.0, VBase3(0.8, 0.8, 0.8), blendType='easeInOut'), LerpScaleInterval(node, lerpTime / 3.0, origScale, blendType='easeInOut')), ActorInterval(self, 'jump', startTime=0.2), SoundInterval(sound)))
self.track.start(ts)
self.setActiveShadow(1)
def exitSquish(self):
self.playingAnim = 'neutral'
if self.track != None:
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
Emote.globalEmote.releaseAll(self)
return
def enterFallDown(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.playingAnim = 'fallDown'
Emote.globalEmote.disableAll(self)
self.track = Sequence(ActorInterval(self, 'slip-backward'), name='fallTrack')
if callback:
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getName(), callback, extraArgs)
self.track.start(ts)
def exitFallDown(self):
self.playingAnim = 'neutral'
if self.track != None:
self.ignore(self.track.getName())
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
Emote.globalEmote.releaseAll(self)
return
def stunToon(self, ts = 0, callback = None, knockdown = 0):
if not self.isStunned:
if self.stunTrack:
self.stunTrack.finish()
self.stunTrack = None
def setStunned(stunned):
self.isStunned = stunned
if self == base.localAvatar:
messenger.send('toonStunned-' + str(self.doId), [self.isStunned])
node = self.getGeomNode()
lerpTime = 0.5
down = self.doToonColorScale(VBase4(1, 1, 1, 0.6), lerpTime)
up = self.doToonColorScale(VBase4(1, 1, 1, 0.9), lerpTime)
clear = self.doToonColorScale(self.defaultColorScale, lerpTime)
track = Sequence(Func(setStunned, 1), down, up, down, up, down, up, down, clear, Func(self.restoreDefaultColorScale), Func(setStunned, 0))
if knockdown:
self.stunTrack = Parallel(ActorInterval(self, animName='slip-backward'), track)
else:
self.stunTrack = track
self.stunTrack.start()
return
def getPieces(self, *pieces):
results = []
for lodName in self.getLODNames():
for partName, pieceNames in pieces:
part = self.getPart(partName, lodName)
if part:
if type(pieceNames) == str:
pieceNames = (pieceNames,)
for pieceName in pieceNames:
npc = part.findAllMatches('**/%s;+s' % pieceName)
for i in range(npc.getNumPaths()):
results.append(npc[i])
return results
def applyCheesyEffect(self, effect, lerpTime = 0):
if self.effectTrack != None:
self.effectTrack.finish()
self.effectTrack = None
if self.cheesyEffect != effect:
oldEffect = self.cheesyEffect
self.cheesyEffect = effect
if oldEffect == ToontownGlobals.CENormal:
self.effectTrack = self.__doCheesyEffect(effect, lerpTime)
elif effect == ToontownGlobals.CENormal:
self.effectTrack = self.__undoCheesyEffect(oldEffect, lerpTime)
else:
self.effectTrack = Sequence(self.__undoCheesyEffect(oldEffect, lerpTime / 2.0), self.__doCheesyEffect(effect, lerpTime / 2.0))
self.effectTrack.start()
return
def reapplyCheesyEffect(self, lerpTime = 0):
if self.effectTrack != None:
self.effectTrack.finish()
self.effectTrack = None
effect = self.cheesyEffect
self.effectTrack = Sequence(self.__undoCheesyEffect(effect, 0), self.__doCheesyEffect(effect, lerpTime))
self.effectTrack.start()
return
def clearCheesyEffect(self, lerpTime = 0):
self.applyCheesyEffect(ToontownGlobals.CENormal, lerpTime=lerpTime)
if self.effectTrack != None:
self.effectTrack.finish()
self.effectTrack = None
return
def __doHeadScale(self, scale, lerpTime):
if scale == None:
scale = ToontownGlobals.toonHeadScales[self.style.getAnimal()]
track = Parallel()
for hi in range(self.headParts.getNumPaths()):
head = self.headParts[hi]
track.append(LerpScaleInterval(head, lerpTime, scale, blendType='easeInOut'))
return track
def __doLegsScale(self, scale, lerpTime):
if scale == None:
scale = 1
invScale = 1
else:
invScale = 1.0 / scale
track = Parallel()
for li in range(self.legsParts.getNumPaths()):
legs = self.legsParts[li]
torso = self.torsoParts[li]
track.append(LerpScaleInterval(legs, lerpTime, scale, blendType='easeInOut'))
track.append(LerpScaleInterval(torso, lerpTime, invScale, blendType='easeInOut'))
return track
def __doToonScale(self, scale, lerpTime):
if scale == None:
scale = 1
node = self.getGeomNode().getChild(0)
track = Sequence(Parallel(LerpHprInterval(node, lerpTime, Vec3(0.0, 0.0, 0.0), blendType='easeInOut'), LerpScaleInterval(node, lerpTime, scale, blendType='easeInOut')), Func(self.resetHeight))
return track
def doToonColorScale(self, scale, lerpTime, keepDefault = 0):
if keepDefault:
self.defaultColorScale = scale
if scale == None:
scale = VBase4(1, 1, 1, 1)
node = self.getGeomNode()
caps = self.getPieces(('torso', 'torso-bot-cap'))
track = Sequence()
track.append(Func(node.setTransparency, 1))
if scale[3] != 1:
for cap in caps:
track.append(HideInterval(cap))
track.append(LerpColorScaleInterval(node, lerpTime, scale, blendType='easeInOut'))
if scale[3] == 1:
track.append(Func(node.clearTransparency))
for cap in caps:
track.append(ShowInterval(cap))
elif scale[3] == 0:
track.append(Func(node.clearTransparency))
return track
def __doPumpkinHeadSwitch(self, lerpTime, toPumpkin):
node = self.getGeomNode()
def getDustCloudIval():
dustCloud = DustCloud.DustCloud(fBillboard=0, wantSound=1)
dustCloud.setBillboardAxis(2.0)
dustCloud.setZ(3)
dustCloud.setScale(0.4)
dustCloud.createTrack()
return Sequence(Func(dustCloud.reparentTo, self), dustCloud.track, Func(dustCloud.destroy), name='dustCloadIval')
dust = getDustCloudIval()
track = Sequence()
if toPumpkin:
track.append(Func(self.stopBlink))
track.append(Func(self.closeEyes))
if lerpTime > 0.0:
track.append(Func(dust.start))
track.append(Wait(0.5))
else:
dust.finish()
def hideParts():
self.notify.debug('hideParts')
for head in self.headParts:
for p in head.getChildren():
if hasattr(self, 'pumpkins') and not self.pumpkins.hasPath(p):
p.hide()
p.setTag('pumpkin', 'enabled')
track.append(Func(hideParts))
track.append(Func(self.enablePumpkins, True))
else:
if lerpTime > 0.0:
track.append(Func(dust.start))
track.append(Wait(0.5))
else:
dust.finish()
def showHiddenParts():
self.notify.debug('showHiddenParts')
for head in self.headParts:
for p in head.getChildren():
if not self.pumpkins.hasPath(p) and p.getTag('pumpkin') == 'enabled':
p.show()
p.setTag('pumpkin', 'disabled')
track.append(Func(showHiddenParts))
track.append(Func(self.enablePumpkins, False))
track.append(Func(self.startBlink))
return track
def __doSnowManHeadSwitch(self, lerpTime, toSnowMan):
node = self.getGeomNode()
def getDustCloudIval():
dustCloud = DustCloud.DustCloud(fBillboard=0, wantSound=0)
dustCloud.setBillboardAxis(2.0)
dustCloud.setZ(3)
dustCloud.setScale(0.4)
dustCloud.createTrack()
return Sequence(Func(dustCloud.reparentTo, self), dustCloud.track, Func(dustCloud.destroy), name='dustCloadIval')
dust = getDustCloudIval()
track = Sequence()
if toSnowMan:
track.append(Func(self.stopBlink))
track.append(Func(self.closeEyes))
if lerpTime > 0.0:
track.append(Func(dust.start))
track.append(Wait(0.5))
else:
dust.finish()
def hideParts():
self.notify.debug('HidePaths')
for hi in range(self.headParts.getNumPaths()):
head = self.headParts[hi]
parts = head.getChildren()
for pi in range(parts.getNumPaths()):
p = parts[pi]
if not p.isHidden():
p.hide()
p.setTag('snowman', 'enabled')
track.append(Func(hideParts))
track.append(Func(self.enableSnowMen, True))
else:
if lerpTime > 0.0:
track.append(Func(dust.start))
track.append(Wait(0.5))
else:
dust.finish()
def showHiddenParts():
self.notify.debug('ShowHiddenPaths')
for hi in range(self.headParts.getNumPaths()):
head = self.headParts[hi]
parts = head.getChildren()
for pi in range(parts.getNumPaths()):
p = parts[pi]
if not self.snowMen.hasPath(p) and p.getTag('snowman') == 'enabled':
p.show()
p.setTag('snowman', 'disabled')
track.append(Func(showHiddenParts))
track.append(Func(self.enableSnowMen, False))
track.append(Func(self.startBlink))
return track
def __doGreenToon(self, lerpTime, toGreen):
track = Sequence()
greenTrack = Parallel()
def getDustCloudIval():
dustCloud = DustCloud.DustCloud(fBillboard=0, wantSound=1)
dustCloud.setBillboardAxis(2.0)
dustCloud.setZ(3)
dustCloud.setScale(0.4)
dustCloud.createTrack()
return Sequence(Func(dustCloud.reparentTo, self), dustCloud.track, Func(dustCloud.destroy), name='dustCloadIval')
if lerpTime > 0.0:
dust = getDustCloudIval()
track.append(Func(dust.start))
track.append(Wait(0.5))
if toGreen:
skinGreen = VBase4(76 / 255.0, 240 / 255.0, 84 / 255.0, 1)
muzzleGreen = VBase4(4 / 255.0, 205 / 255.0, 90 / 255.0, 1)
gloveGreen = VBase4(14 / 255.0, 173 / 255.0, 40 / 255.0, 1)
greenTrack.append(self.__colorToonSkin(skinGreen, lerpTime))
greenTrack.append(self.__colorToonEars(skinGreen, muzzleGreen, lerpTime))
greenTrack.append(self.__colorScaleToonMuzzle(muzzleGreen, lerpTime))
greenTrack.append(self.__colorToonGloves(gloveGreen, lerpTime))
else:
greenTrack.append(self.__colorToonSkin(None, lerpTime))
greenTrack.append(self.__colorToonEars(None, None, lerpTime))
greenTrack.append(self.__colorScaleToonMuzzle(None, lerpTime))
greenTrack.append(self.__colorToonGloves(None, lerpTime))
track.append(greenTrack)
return track
def __colorToonSkin(self, color, lerpTime):
track = Sequence()
colorTrack = Parallel()
torsoPieces = self.getPieces(('torso', ('arms', 'neck')))
legPieces = self.getPieces(('legs', ('legs', 'feet')))
headPieces = self.getPieces(('head', '*head*'))
if color == None:
armColor = self.style.getArmColor()
legColor = self.style.getLegColor()
headColor = self.style.getHeadColor()
else:
armColor = color
legColor = color
headColor = color
for piece in torsoPieces:
colorTrack.append(Func(piece.setColor, armColor))
for piece in legPieces:
colorTrack.append(Func(piece.setColor, legColor))
for piece in headPieces:
if 'hatNode' not in str(piece) and 'glassesNode' not in str(piece):
colorTrack.append(Func(piece.setColor, headColor))
track.append(colorTrack)
return track
def __colorToonEars(self, color, colorScale, lerpTime):
track = Sequence()
earPieces = self.getPieces(('head', '*ear*'))
if len(earPieces) == 0:
return track
colorTrack = Parallel()
if earPieces[0].hasColor():
if color == None:
headColor = self.style.getHeadColor()
else:
headColor = color
for piece in earPieces:
colorTrack.append(Func(piece.setColor, headColor))
else:
if colorScale == None:
colorScale = VBase4(1, 1, 1, 1)
for piece in earPieces:
colorTrack.append(Func(piece.setColorScale, colorScale))
track.append(colorTrack)
return track
def __colorScaleToonMuzzle(self, scale, lerpTime):
track = Sequence()
colorTrack = Parallel()
muzzlePieces = self.getPieces(('head', '*muzzle*'))
if scale == None:
scale = VBase4(1, 1, 1, 1)
for piece in muzzlePieces:
colorTrack.append(Func(piece.setColorScale, scale))
track.append(colorTrack)
return track
def __colorToonGloves(self, color, lerpTime):
track = Sequence()
colorTrack = Parallel()
glovePieces = self.getPieces(('torso', '*hands*'))
if color == None:
for piece in glovePieces:
colorTrack.append(Func(piece.clearColor))
else:
for piece in glovePieces:
colorTrack.append(Func(piece.setColor, color))
track.append(colorTrack)
return track
def __doBigAndWhite(self, color, scale, lerpTime):
track = Parallel()
track.append(self.__doToonColor(color, lerpTime))
track.append(self.__doToonScale(scale, lerpTime))
return track
def __doVirtual(self):
track = Parallel()
track.append(self.__doToonColor(VBase4(0.25, 0.25, 1.0, 1), 0.0))
self.setPartsAdd(self.getHeadParts())
self.setPartsAdd(self.getTorsoParts())
self.setPartsAdd(self.getHipsParts())
self.setPartsAdd(self.getLegsParts())
return track
def __doUnVirtual(self):
track = Parallel()
track.append(self.__doToonColor(None, 0.0))
self.setPartsNormal(self.getHeadParts(), 1)
self.setPartsNormal(self.getTorsoParts(), 1)
self.setPartsNormal(self.getHipsParts(), 1)
self.setPartsNormal(self.getLegsParts(), 1)
return track
def setPartsAdd(self, parts):
actorCollection = parts
for thingIndex in range(0, actorCollection.getNumPaths()):
thing = actorCollection[thingIndex]
if thing.getName() not in ('joint_attachMeter', 'joint_nameTag'):
thing.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd))
thing.setDepthWrite(False)
self.setBin('fixed', 1)
def setPartsNormal(self, parts, alpha = 0):
actorCollection = parts
for thingIndex in range(0, actorCollection.getNumPaths()):
thing = actorCollection[thingIndex]
if thing.getName() not in ('joint_attachMeter', 'joint_nameTag'):
thing.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MNone))
thing.setDepthWrite(True)
self.setBin('default', 0)
if alpha:
thing.setTransparency(1)
thing.setBin('transparent', 0)
def __doToonGhostColorScale(self, scale, lerpTime, keepDefault = 0):
if keepDefault:
self.defaultColorScale = scale
if scale == None:
scale = VBase4(1, 1, 1, 1)
node = self.getGeomNode()
caps = self.getPieces(('torso', 'torso-bot-cap'))
track = Sequence()
track.append(Func(node.setTransparency, 1))
track.append(ShowInterval(node))
if scale[3] != 1:
for cap in caps:
track.append(HideInterval(cap))
track.append(LerpColorScaleInterval(node, lerpTime, scale, blendType='easeInOut'))
if scale[3] == 1:
track.append(Func(node.clearTransparency))
for cap in caps:
track.append(ShowInterval(cap))
elif scale[3] == 0:
track.append(Func(node.clearTransparency))
track.append(HideInterval(node))
return track
def restoreDefaultColorScale(self):
node = self.getGeomNode()
if node:
if self.defaultColorScale:
node.setColorScale(self.defaultColorScale)
if self.defaultColorScale[3] != 1:
node.setTransparency(1)
else:
node.clearTransparency()
else:
node.clearColorScale()
node.clearTransparency()
def __doToonColor(self, color, lerpTime):
node = self.getGeomNode()
if color == None:
return Func(node.clearColor)
else:
return Func(node.setColor, color, 1)
return
def __doPartsColorScale(self, scale, lerpTime):
if scale == None:
scale = VBase4(1, 1, 1, 1)
node = self.getGeomNode()
pieces = self.getPieces(('torso', ('arms', 'neck')), ('legs', ('legs', 'feet')), ('head', '+GeomNode'))
track = Sequence()
track.append(Func(node.setTransparency, 1))
for piece in pieces:
if piece.getName()[:7] == 'muzzle-' and piece.getName()[-8:] != '-neutral':
continue
track.append(ShowInterval(piece))
p1 = Parallel()
for piece in pieces:
if piece.getName()[:7] == 'muzzle-' and piece.getName()[-8:] != '-neutral':
continue
p1.append(LerpColorScaleInterval(piece, lerpTime, scale, blendType='easeInOut'))
track.append(p1)
if scale[3] == 1:
track.append(Func(node.clearTransparency))
elif scale[3] == 0:
track.append(Func(node.clearTransparency))
for piece in pieces:
if piece.getName()[:7] == 'muzzle-' and piece.getName()[-8:] != '-neutral':
continue
track.append(HideInterval(piece))
self.generateHat()
self.generateGlasses()
return track
def __doCheesyEffect(self, effect, lerpTime):
if effect == ToontownGlobals.CEBigHead:
return self.__doHeadScale(2.5, lerpTime)
elif effect == ToontownGlobals.CESmallHead:
return self.__doHeadScale(0.5, lerpTime)
elif effect == ToontownGlobals.CEBigLegs:
return self.__doLegsScale(1.4, lerpTime)
elif effect == ToontownGlobals.CESmallLegs:
return self.__doLegsScale(0.6, lerpTime)
elif effect == ToontownGlobals.CEBigToon:
return self.__doToonScale(ToontownGlobals.BigToonScale, lerpTime)
elif effect == ToontownGlobals.CESmallToon:
return self.__doToonScale(ToontownGlobals.SmallToonScale, lerpTime)
elif effect == ToontownGlobals.CEFlatPortrait:
return self.__doToonScale(VBase3(1, 0.05, 1), lerpTime)
elif effect == ToontownGlobals.CEFlatProfile:
return self.__doToonScale(VBase3(0.05, 1, 1), lerpTime)
elif effect == ToontownGlobals.CETransparent:
return self.doToonColorScale(VBase4(1, 1, 1, 0.6), lerpTime, keepDefault=1)
elif effect == ToontownGlobals.CENoColor:
return self.__doToonColor(VBase4(1, 1, 1, 1), lerpTime)
elif effect == ToontownGlobals.CEInvisible:
return self.__doPartsColorScale(VBase4(1, 1, 1, 0), lerpTime)
elif effect == ToontownGlobals.CEPumpkin:
return self.__doPumpkinHeadSwitch(lerpTime, toPumpkin=True)
elif effect == ToontownGlobals.CEBigWhite:
return self.__doBigAndWhite(VBase4(1, 1, 1, 1), ToontownGlobals.BigToonScale, lerpTime)
elif effect == ToontownGlobals.CESnowMan:
return self.__doSnowManHeadSwitch(lerpTime, toSnowMan=True)
elif effect == ToontownGlobals.CEGreenToon:
return self.__doGreenToon(lerpTime, toGreen=True)
elif effect == ToontownGlobals.CEVirtual:
return self.__doVirtual()
elif effect == ToontownGlobals.CEGhost:
# NJF
#alpha = 0.25 # TTI
alpha = 0 # TTO
if base.localAvatar.getAdminAccess() < self.adminAccess:
alpha = 0
return Sequence(self.__doToonGhostColorScale(VBase4(1, 1, 1, alpha), lerpTime, keepDefault=1), Func(self.nametag3d.hide))
return Sequence()
def __undoCheesyEffect(self, effect, lerpTime):
if effect == ToontownGlobals.CEBigHead:
return self.__doHeadScale(None, lerpTime)
elif effect == ToontownGlobals.CESmallHead:
return self.__doHeadScale(None, lerpTime)
if effect == ToontownGlobals.CEBigLegs:
return self.__doLegsScale(None, lerpTime)
elif effect == ToontownGlobals.CESmallLegs:
return self.__doLegsScale(None, lerpTime)
elif effect == ToontownGlobals.CEBigToon:
return self.__doToonScale(None, lerpTime)
elif effect == ToontownGlobals.CESmallToon:
return self.__doToonScale(None, lerpTime)
elif effect == ToontownGlobals.CEFlatPortrait:
return self.__doToonScale(None, lerpTime)
elif effect == ToontownGlobals.CEFlatProfile:
return self.__doToonScale(None, lerpTime)
elif effect == ToontownGlobals.CETransparent:
return self.doToonColorScale(None, lerpTime, keepDefault=1)
elif effect == ToontownGlobals.CENoColor:
return self.__doToonColor(None, lerpTime)
elif effect == ToontownGlobals.CEInvisible:
return self.__doPartsColorScale(None, lerpTime)
elif effect == ToontownGlobals.CEPumpkin:
return self.__doPumpkinHeadSwitch(lerpTime, toPumpkin=False)
elif effect == ToontownGlobals.CEBigWhite:
return self.__doBigAndWhite(None, None, lerpTime)
elif effect == ToontownGlobals.CESnowMan:
return self.__doSnowManHeadSwitch(lerpTime, toSnowMan=False)
elif effect == ToontownGlobals.CEGreenToon:
return self.__doGreenToon(lerpTime, toGreen=False)
elif effect == ToontownGlobals.CEVirtual:
return self.__doUnVirtual()
elif effect == ToontownGlobals.CEGhost:
return Sequence(Func(self.nametag3d.show), self.__doToonGhostColorScale(None, lerpTime, keepDefault=1))
return Sequence()
def putOnSuit(self, suitType, setDisplayName = True, rental = False):
if self.isDisguised:
self.takeOffSuit()
if launcher and not launcher.getPhaseComplete(5):
return
from toontown.suit import Suit
deptIndex = suitType
suit = Suit.Suit()
dna = SuitDNA.SuitDNA()
if rental == True:
if SuitDNA.suitDepts[deptIndex] == 's':
suitType = 'cc'
elif SuitDNA.suitDepts[deptIndex] == 'm':
suitType = 'sc'
elif SuitDNA.suitDepts[deptIndex] == 'l':
suitType = 'bf'
elif SuitDNA.suitDepts[deptIndex] == 'c':
suitType = 'f'
else:
self.notify.warning('Suspicious: Incorrect rental suit department requested')
suitType = 'cc'
dna.newSuit(suitType)
suit.setStyle(dna)
suit.isDisguised = 1
suit.generateSuit()
suit.initializeDropShadow()
suit.setPos(self.getPos())
suit.setHpr(self.getHpr())
for part in suit.getHeadParts():
part.hide()
suitHeadNull = suit.find('**/joint_head')
toonHead = self.getPart('head', '1000')
Emote.globalEmote.disableAll(self)
toonGeom = self.getGeomNode()
toonGeom.hide()
worldScale = toonHead.getScale(render)
self.headOrigScale = toonHead.getScale()
headPosNode = hidden.attachNewNode('headPos')
toonHead.reparentTo(headPosNode)
toonHead.setPos(0, 0, 0.2)
headPosNode.reparentTo(suitHeadNull)
headPosNode.setScale(render, worldScale)
suitGeom = suit.getGeomNode()
suitGeom.reparentTo(self)
if rental == True:
suit.makeRentalSuit(SuitDNA.suitDepts[deptIndex])
self.suit = suit
self.suitGeom = suitGeom
self.setHeight(suit.getHeight())
self.nametag3d.setPos(0, 0, self.height + 1.3)
if self.isLocal():
if hasattr(self, 'book'):
self.book.obscureButton(1)
self.oldForward = ToontownGlobals.ToonForwardSpeed
self.oldReverse = ToontownGlobals.ToonReverseSpeed
self.oldRotate = ToontownGlobals.ToonRotateSpeed
ToontownGlobals.ToonForwardSpeed = ToontownGlobals.SuitWalkSpeed
ToontownGlobals.ToonReverseSpeed = ToontownGlobals.SuitWalkSpeed
ToontownGlobals.ToonRotateSpeed = ToontownGlobals.ToonRotateSlowSpeed
if self.hasTrackAnimToSpeed():
self.stopTrackAnimToSpeed()
self.startTrackAnimToSpeed()
self.controlManager.disableAvatarJump()
indices = list(range(OTPLocalizer.SCMenuCommonCogIndices[0], OTPLocalizer.SCMenuCommonCogIndices[1] + 1))
customIndices = OTPLocalizer.SCMenuCustomCogIndices[suitType]
indices += list(range(customIndices[0], customIndices[1] + 1))
self.chatMgr.chatInputSpeedChat.addCogMenu(indices)
self.suit.loop('neutral')
self.isDisguised = 1
self.setFont(ToontownGlobals.getSuitFont())
if setDisplayName:
if hasattr(base, 'idTags') and base.idTags:
name = self.getAvIdName()
else:
name = self.getName()
suitDept = SuitDNA.suitDepts.index(SuitDNA.getSuitDept(suitType))
suitName = SuitBattleGlobals.SuitAttributes[suitType]['name']
self.nametag.setText(TTLocalizer.SuitBaseNameWithLevel % {'name': name,
'dept': suitName,
'level': self.cogLevels[suitDept] + 1})
self.nametag.setWordWrap(9.0)
def takeOffSuit(self):
if not self.isDisguised:
return
suitType = self.suit.style.name
toonHeadNull = self.find('**/1000/**/def_head')
if not toonHeadNull:
toonHeadNull = self.find('**/1000/**/joint_head')
toonHead = self.getPart('head', '1000')
toonHead.reparentTo(toonHeadNull)
toonHead.setScale(self.headOrigScale)
toonHead.setPos(0, 0, 0)
headPosNode = self.suitGeom.find('**/headPos')
headPosNode.removeNode()
self.suitGeom.reparentTo(self.suit)
self.resetHeight()
self.nametag3d.setPos(0, 0, self.height + 0.5)
toonGeom = self.getGeomNode()
toonGeom.show()
Emote.globalEmote.releaseAll(self)
self.isDisguised = 0
self.setFont(ToontownGlobals.getToonFont())
self.nametag.setWordWrap(None)
if hasattr(base, 'idTags') and base.idTags:
name = self.getAvIdName()
else:
name = self.getName()
self.setDisplayName(name)
if self.isLocal():
if hasattr(self, 'book'):
self.book.obscureButton(0)
ToontownGlobals.ToonForwardSpeed = self.oldForward
ToontownGlobals.ToonReverseSpeed = self.oldReverse
ToontownGlobals.ToonRotateSpeed = self.oldRotate
if self.hasTrackAnimToSpeed():
self.stopTrackAnimToSpeed()
self.startTrackAnimToSpeed()
del self.oldForward
del self.oldReverse
del self.oldRotate
self.controlManager.enableAvatarJump()
self.chatMgr.chatInputSpeedChat.removeCogMenu()
self.suit.delete()
del self.suit
del self.suitGeom
def makeWaiter(self):
if not self.isDisguised:
return
self.suit.makeWaiter(self.suitGeom)
def getPieModel(self):
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleProps
if self.pieModel != None and self.__pieModelType != self.pieType:
self.pieModel.detachNode()
self.pieModel = None
if self.pieModel == None:
self.__pieModelType = self.pieType
pieName = ToontownBattleGlobals.pieNames[self.pieType]
self.pieModel = BattleProps.globalPropPool.getProp(pieName)
self.pieScale = self.pieModel.getScale()
return self.pieModel
def getPresentPieInterval(self, x, y, z, h):
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleProps
from toontown.battle import MovieUtil
pie = self.getPieModel()
pieName = ToontownBattleGlobals.pieNames[self.pieType]
pieType = BattleProps.globalPropPool.getPropType(pieName)
animPie = Sequence()
pingpongPie = Sequence()
if pieType == 'actor':
animPie = ActorInterval(pie, pieName, startFrame=0, endFrame=31)
pingpongPie = Func(pie.pingpong, pieName, fromFrame=32, toFrame=47)
track = Sequence(Func(self.setPosHpr, x, y, z, h, 0, 0), Func(pie.reparentTo, self.rightHand), Func(pie.setPosHpr, 0, 0, 0, 0, 0, 0), Parallel(pie.scaleInterval(1, self.pieScale, startScale=MovieUtil.PNT3_NEARZERO), ActorInterval(self, 'throw', startFrame=0, endFrame=31), animPie), Func(self.pingpong, 'throw', fromFrame=32, toFrame=47), pingpongPie)
return track
def getTossPieInterval(self, x, y, z, h, power, throwType, beginFlyIval = Sequence()):
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleProps
pie = self.getPieModel()
flyPie = pie.copyTo(NodePath('a'))
pieName = ToontownBattleGlobals.pieNames[self.pieType]
pieType = BattleProps.globalPropPool.getPropType(pieName)
animPie = Sequence()
if pieType == 'actor':
animPie = ActorInterval(pie, pieName, startFrame=48)
sound = loader.loadSfx('phase_3.5/audio/sfx/AA_pie_throw_only.ogg')
if throwType == ToontownGlobals.PieThrowArc:
t = power / 100.0
dist = 100 - 70 * t
time = 1 + 0.5 * t
proj = ProjectileInterval(None, startPos=Point3(0, 0, 0),
endPos=Point3(0, dist, 0), duration=time)
relVel = proj.startVel
elif throwType == ToontownGlobals.PieThrowLinear:
magnitude = power / 2. + 25
relVel = Vec3(0, 1, 0.25)
relVel.normalize()
relVel *= magnitude
def getVelocity(toon = self, relVel = relVel):
return render.getRelativeVector(toon, relVel)
toss = Track((0, Sequence(Func(self.setPosHpr, x, y, z, h, 0, 0), Func(pie.reparentTo, self.rightHand), Func(pie.setPosHpr, 0, 0, 0, 0, 0, 0), Parallel(ActorInterval(self, 'throw', startFrame=48), animPie), Func(self.loop, 'neutral'))), (16.0 / 24.0, Func(pie.detachNode)))
fly = Track((14.0 / 24.0, SoundInterval(sound, node=self)), (16.0 / 24.0, Sequence(Func(flyPie.reparentTo, render), Func(flyPie.setScale, self.pieScale), Func(flyPie.setPosHpr, self, 0.52, 0.97, 2.24, 89.42, -10.56, 87.94), beginFlyIval, ProjectileInterval(flyPie, startVel=getVelocity, duration=3), Func(flyPie.detachNode))))
return (toss, fly, flyPie)
def getPieSplatInterval(self, x, y, z, pieCode):
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleProps
pieName = ToontownBattleGlobals.pieNames[self.pieType]
splatName = 'splat-%s' % pieName
if pieName == 'lawbook':
splatName = 'dust'
splat = BattleProps.globalPropPool.getProp(splatName)
splat.setBillboardPointWorld(2)
color = ToontownGlobals.PieCodeColors.get(pieCode)
if color:
splat.setColor(*color)
vol = 1.0
if pieName == 'lawbook':
sound = loader.loadSfx('phase_11/audio/sfx/LB_evidence_miss.ogg')
vol = 0.25
else:
sound = loader.loadSfx('phase_4/audio/sfx/AA_wholepie_only.ogg')
ival = Parallel(Func(splat.reparentTo, render), Func(splat.setPos, x, y, z), SoundInterval(sound, node=splat, volume=vol), Sequence(ActorInterval(splat, splatName), Func(splat.detachNode)))
return ival
def cleanupPieModel(self):
if self.pieModel != None:
self.pieModel.detachNode()
self.pieModel = None
return
def getFeedPetIval(self):
return Sequence(ActorInterval(self, 'feedPet'), Func(self.animFSM.request, 'neutral'))
def getScratchPetIval(self):
return Sequence(ActorInterval(self, 'pet-start'), ActorInterval(self, 'pet-loop'), ActorInterval(self, 'pet-end'))
def getCallPetIval(self):
return ActorInterval(self, 'callPet')
def enterGolfPuttLoop(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('loop-putt')
def exitGolfPuttLoop(self):
self.stop()
def enterGolfRotateLeft(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('rotateL-putt')
def exitGolfRotateLeft(self):
self.stop()
def enterGolfRotateRight(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('rotateR-putt')
def exitGolfRotateRight(self):
self.stop()
def enterGolfPuttSwing(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('swing-putt')
def exitGolfPuttSwing(self):
self.stop()
def enterGolfGoodPutt(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('good-putt', restart=0)
def exitGolfGoodPutt(self):
self.stop()
def enterGolfBadPutt(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('badloop-putt', restart=0)
def exitGolfBadPutt(self):
self.stop()
def enterFlattened(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
Emote.globalEmote.disableAll(self)
sound = loader.loadSfx('phase_9/audio/sfx/toon_decompress.ogg')
lerpTime = 0.1
node = self.getGeomNode().getChild(0)
self.origScale = node.getScale()
self.track = Sequence(LerpScaleInterval(node, lerpTime, VBase3(2, 2, 0.025), blendType='easeInOut'))
self.track.start(ts)
self.setActiveShadow(1)
def exitFlattened(self):
self.playingAnim = 'neutral'
if self.track != None:
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
node = self.getGeomNode().getChild(0)
node.setScale(self.origScale)
Emote.globalEmote.releaseAll(self)
return
def enterCogThiefRunning(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.playingAnim = None
self.playingRate = None
self.standWalkRunReverse = (('neutral', 1.0),
('run', 1.0),
('run', 1.0),
('run', -1.0))
self.setSpeed(self.forwardSpeed, self.rotateSpeed)
self.setActiveShadow(1)
return
def exitCogThiefRunning(self):
self.standWalkRunReverse = None
self.stop()
self.motion.exit()
return
def enterScientistJealous(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('scientistJealous')
if hasattr(self, 'showScientistProp'):
self.showScientistProp()
def exitScientistJealous(self):
self.stop()
def enterScientistEmcee(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('scientistEmcee')
def exitScientistEmcee(self):
self.stop()
def enterScientistWork(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('scientistWork')
def exitScientistWork(self):
self.stop()
def enterScientistLessWork(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('scientistWork', fromFrame=319, toFrame=619)
def exitScientistLessWork(self):
self.stop()
def enterScientistPlay(self, animMultiplier = 1, ts = 0, callback = None, extraArgs = []):
self.loop('scientistGame')
if hasattr(self, 'scientistPlay'):
self.scientistPlay()
def exitScientistPlay(self):
self.stop()
loadModels()
compileGlobalAnimList()
| [
"nathanielfuhr@gmail.com"
] | nathanielfuhr@gmail.com |
cf5eff96017123e59bba07e49d5d94addf5b3000 | dd8363acd9a028d9b6432936d72e7a5344077c20 | /plugins/modules/s3_logging.py | 16561ce89703400d898a79597cd3ab3f8a659732 | [] | no_license | gundalow-collections/amazon | 5d437c41af5e3cfa73731c9cd2c08b66c7480b43 | 23c743b63f58ba97960479e230b462fb1c90cc95 | refs/heads/master | 2020-08-28T04:53:02.641829 | 2019-10-25T19:40:04 | 2019-10-25T19:40:04 | 217,595,855 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''author: Rob White (@wimnat)
description:
- Manage logging facility of an s3 bucket in AWS
extends_documentation_fragment:
- ansible.amazon.aws
- ansible.amazon.ec2
module: s3_logging
options:
name:
description:
- Name of the s3 bucket.
required: true
state:
choices:
- present
- absent
default: present
description:
- Enable or disable logging.
target_bucket:
description:
- The bucket to log to. Required when state=present.
target_prefix:
default: ''
description:
- The prefix that should be prepended to the generated log files written to the
target_bucket.
short_description: Manage logging facility of an s3 bucket in AWS
version_added: '2.0'
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
s3_logging:
name: mywebsite.com
target_bucket: mylogs
target_prefix: logs/mywebsite.com
state: present
- name: Remove logging on an s3 bucket
s3_logging:
name: mywebsite.com
state: absent
'''
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.exception import S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.amazon.plugins.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def compare_bucket_logging(bucket, target_bucket, target_prefix):
bucket_log_obj = bucket.get_logging_status()
if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
return False
else:
return True
def enable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
try:
if not compare_bucket_logging(bucket, target_bucket, target_prefix):
# Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
try:
target_bucket_obj = connection.get_bucket(target_bucket)
except S3ResponseError as e:
if e.status == 301:
module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
else:
module.fail_json(msg=e.message)
target_bucket_obj.set_as_logging_target()
bucket.enable_logging(target_bucket, target_prefix)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def disable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
if not compare_bucket_logging(bucket, None, None):
bucket.disable_logging()
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
target_bucket=dict(required=False, default=None),
target_prefix=dict(required=False, default=""),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get("state")
if state == 'present':
enable_bucket_logging(connection, module)
elif state == 'absent':
disable_bucket_logging(connection, module)
if __name__ == '__main__':
main()
| [
"brian.coca+git@gmail.com"
] | brian.coca+git@gmail.com |
1dc1bf27b4946f2a8920cea5731c6958148fa159 | a54d5a5ae5ba352963f1166a29e1bb6c867157ab | /python/divides_evenly.py | 2b4eec4e705a704904ffcb8479439ddadd25217b | [] | no_license | alephist/edabit-coding-challenges | 06f573e90ffbd13bc54ecbdaa8e6a225aa44f5d8 | 35f1fc84848fc44e184aae1ae231a36319c1c81e | refs/heads/main | 2023-07-30T22:39:37.468756 | 2021-09-18T07:47:02 | 2021-09-18T07:47:02 | 341,467,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | """
Divides Evenly
Given two integers, a and b, return True if a can be divided evenly by b. Return False otherwise.
https://edabit.com/challenge/NRxWszQRw5JqSDmQS
"""
def divides_evenly(a: int, b: int) -> bool:
return a % b == 0
| [
"justin.necesito@gmail.com"
] | justin.necesito@gmail.com |
ebdf44d0b8071754b1394cf7bba558ebb4b9144c | c69e2b05b709a030c6f35244986df889d544adbf | /slackbot_ce/code_em/patrick/slacklib.py | 6770528924553d0a47a23fcf377fe93c8ed72d8b | [
"MIT"
] | permissive | wray/code_em | af325787d3f7a6dad68d28c72990c28e5baab4dd | 1500141828e0db8f4e13b0507398a65c2e0642cd | refs/heads/master | 2021-04-30T23:00:46.659505 | 2018-06-25T23:15:05 | 2018-06-25T23:15:05 | 68,224,253 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | # Put your commands here
COMMAND1 = "~~"
# Your handling code goes in this function
def handle_command(command):
"""
Determine if the command is valid. If so, take action and return
a response, if necessary.
"""
response = ""
if COMMAND1 in command:
response = "Huh?"
return response | [
"joe@techemstudios.com"
] | joe@techemstudios.com |
7de3feab7305a15bf87af61ebc2b63f2601479a7 | a111639e451d54e3284363e2ad2e3e328f711a94 | /structural/bridge/free_web_page.py | 6baede3f34d1508f259c2565d60a7eabee1044b3 | [] | no_license | TheProrok29/design_patterns_in_python | 33bc24c8853d3ffe13711b06ea11e8bbe4a5f391 | a68930ca60c431f93be596b7e4440daf7f4bc319 | refs/heads/master | 2021-02-18T14:31:05.465004 | 2020-04-03T10:07:40 | 2020-04-03T10:07:40 | 245,205,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from structural.bridge.web_page import WebPage
class FreeWebPage(WebPage):
def show_page(self):
image = self.fetcher.get_image()
snippet = self.fetcher.get_snippet()
ads = self.fetcher.get_ads()
full_version = self.fetcher.go_to_full_version()
print(snippet)
print(image)
print(ads)
print(full_version)
| [
"tomaszdbogacki@gmail.com"
] | tomaszdbogacki@gmail.com |
276da94c564d9de0962c4d7f29d8230ac4925eb8 | 9aa85999021da96ce0a7d76789c1298d174d1835 | /meet/migrations/0003_auto_20200128_1727.py | e4d4f1cf789a13902adda66a4e334e527bcc0455 | [] | no_license | m0bi5/ISTE-NITK_Website | 20b83a3a629836c33c7478c0af834f6f57e0e907 | 2e186bb1ba457c930f9b691cc5a5584b8e3c270c | refs/heads/master | 2022-11-24T03:02:49.354491 | 2020-07-24T15:43:44 | 2020-07-24T15:43:44 | 184,452,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.2.4 on 2020-01-28 17:27
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meet', '0002_auto_20200128_1714'),
]
operations = [
migrations.AlterField(
model_name='meet',
name='end_time',
field=models.TimeField(default=datetime.time(17, 27, 7, 937731)),
),
migrations.AlterField(
model_name='meet',
name='start_time',
field=models.TimeField(default=datetime.time(17, 27, 7, 937701)),
),
]
| [
"amodhshenoy@gmail.com"
] | amodhshenoy@gmail.com |
3598c364d465f9de29b3133708e16f1b6e8a21f9 | e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6 | /venv/Lib/site-packages/pybrain/structure/modules/samplelayer.py | a3096be9947029eb16c8a2d254b083c9143ce84e | [
"MIT"
] | permissive | ishatserka/MachineLearningAndDataAnalysisCoursera | cdf0f23a58617e17d6b938e3a9df17daae8585e4 | e82e772df2f4aec162cb34ac6127df10d14a625a | refs/heads/master | 2021-09-11T01:39:26.228392 | 2018-04-05T14:33:39 | 2018-04-05T14:33:39 | 117,153,454 | 0 | 0 | MIT | 2018-03-27T05:20:37 | 2018-01-11T21:05:33 | Python | UTF-8 | Python | false | false | 577 | py | #! /usr/bin/env python2.5
# -*- coding: utf-8 -*-
__author__ = ('Christian Osendorfer, osendorf@in.tum.de; '
'Justin S Bayer, bayerj@in.tum.de')
from scipy import random
from pybrain.structure.modules.neuronlayer import NeuronLayer
class SampleLayer(NeuronLayer):
"""Baseclass for all layers that have stochastic output depending on the
incoming weight."""
class BernoulliLayer(SampleLayer):
def _forwardImplementation(self, inbuf, outbuf):
outbuf[:] = inbuf <= random.random(inbuf.shape)
| [
"shatserka@gmail.com"
] | shatserka@gmail.com |
1c3496dc7a944fe3caf17d00732d0c8b872fe292 | a836aa9db79f26ac238368e35b765e55efdfbc08 | /set length.py | d2c6bebc60a4a1bb7ae4ee5675068528290dc57b | [] | no_license | abhisek08/python-sets | 8e3bc968500fd67ee72512acb1e643ff281eb7df | c48d780276deff6dee8fc2e58a1480696c73fa24 | refs/heads/master | 2022-10-13T07:41:32.226636 | 2020-06-08T05:36:30 | 2020-06-08T05:36:30 | 270,542,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | '''
Write a Python program to find the length of a set.
'''
s={1,2,3,4,5}
print(len(s)) | [
"abhisek.bhunia08@gmail.com"
] | abhisek.bhunia08@gmail.com |
a6a7cf557aeeff0dfc96557ca96fa2ecf6f6a35d | a9c43c4b1a640841f1c9b13b63e39422c4fc47c2 | /test/tests/set_more.py | 3283c3eae471a229fed147eedfc564e625324313 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | lovejavaee/pyston | be5bd8393462be17259bcc40bf8f745e157d9793 | e8f0d9667c35db043add2f07a0ea7d23e290dd80 | refs/heads/master | 2023-05-01T17:42:35.616499 | 2015-04-07T08:10:44 | 2015-04-07T08:10:44 | 33,535,295 | 0 | 0 | NOASSERTION | 2023-04-14T02:16:28 | 2015-04-07T09:53:36 | Python | UTF-8 | Python | false | false | 225 | py | # expected: fail
print hasattr(set, "__ior__")
print hasattr(set, "__isub__")
print hasattr(set, "__iand__")
print hasattr(set, "__ixor__")
s1 = set() | set(range(3))
s2 = set(range(1, 5))
s3 = s1
s1 -= s2
print s1, s2, s3
| [
"kmod@dropbox.com"
] | kmod@dropbox.com |
4f16d28884277d1d267297d0bcd2663fcaf9a841 | 4c61666c08f3564459b2e9de65f1cef50ef7ce0a | /packages/vaex-server/vaex/server/service.py | 1732c9555f41448c83fd571e7be1f3a913fcc7a7 | [
"MIT"
] | permissive | nemochina2008/vaex | 558b42e9b65455228eb95a3c96e66c9aff39a4d6 | afdaea5568fd3b8f414ab6084d0bb53c5319b968 | refs/heads/master | 2022-05-20T16:39:14.959035 | 2020-04-13T06:30:47 | 2020-04-13T06:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | import concurrent.futures
import logging
import threading
import vaex
logger = logging.getLogger("vaex.server.service")
class Service:
def __init__(self, df_map):
self.df_map = df_map
def stop(self):
pass
def __getitem__(self, item):
return self.df_map[item]
def list(self):
"""Return a dict with dataframe information"""
return {name: {
'length_original': df.length_original(),
'column_names': df.get_column_names(strings=True),
'dtypes': {name: str("str" if df.dtype(name) == vaex.column.str_type else df.dtype(name)) for name in df.get_column_names(strings=True)},
'state': df.state_get()
} for name, df in self.df_map.items()
}
def _rmi(self, df, methodname, args, kwargs):
method = getattr(df, methodname)
return method(*args, **kwargs)
def execute(self, df, tasks):
assert df.executor.task_queue == []
for task in tasks:
df.executor.schedule(task)
df.execute()
return [task.get() for task in tasks]
class Proxy:
def __init__(self, service):
self.service = service
def __getitem__(self, item):
return self.service[item]
def stop(self):
return self.service.stop()
def list(self):
return self.service.list()
def _rmi(self, df, methodname, args, kwargs):
return self.service._rmi(df, methodname, args, kwargs)
class AsyncThreadedService(Proxy):
def __init__(self, service, thread_count, threads_per_job):
super().__init__(service)
self.threads_per_job = threads_per_job
self.thread_pool = concurrent.futures.ThreadPoolExecutor(thread_count)
self.thread_local = threading.local()
self.thread_pools = []
def stop(self):
self.thread_pool.shutdown()
for thread_pool in self.thread_pools:
thread_pool.shutdown()
def execute(self, df, tasks, progress=None):
def execute():
if not hasattr(self.thread_local, "executor"):
logger.debug("creating thread pool and executor")
self.thread_local.thread_pool = vaex.multithreading.ThreadPoolIndex(max_workers=self.threads_per_job)
self.thread_local.executor = vaex.execution.Executor(thread_pool=self.thread_local.thread_pool)
self.thread_pools.append(self.thread_local.thread_pool)
executor = self.thread_local.executor
try:
if progress:
executor.signal_progress.connect(progress)
df.executor = executor
return self.service.execute(df, tasks)
finally:
if progress:
executor.signal_progress.disconnect(progress)
return self.thread_pool.submit(execute)
| [
"maartenbreddels@gmail.com"
] | maartenbreddels@gmail.com |
d6cef3773f219850c2d45b864a285ebd437b6090 | 2e26bf9c44f349ee308e63e067d93da654daf69d | /python-scripts/scripts/10_find_files_recursively.py | 7952c3841a6d2c43fc75392b73d25433959ad049 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | RelativeTech/PYTHON_PRAC | 034e44484d63d50a9c4295aa7e1dc63ef786fb37 | 7fa145dece99089706460466a89901e00eef9d28 | refs/heads/master | 2023-06-04T18:59:45.059403 | 2021-06-07T19:40:10 | 2021-06-07T19:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | import fnmatch
import os
# constants
PATH = './'
PATTERN = '*.md'
def get_file_names(filepath, pattern):
matches = []
if os.path.exists(filepath):
for root, dirnames, filenames in os.walk(filepath):
for filename in fnmatch.filter(filenames, pattern):
# matches.append(os.path.join(root, filename)) # full path
matches.append(os.path.join(filename)) # just file name
if matches:
print("Found {} files:".format(len(matches)))
output_files(matches)
else:
print("No files found.")
else:
print("Sorry that path does not exist. Try again.")
def output_files(list_of_files):
for filename in list_of_files:
print(filename)
if __name__ == '__main__':
get_file_names('./', '*.py')
# 05:09:47|bryan@LAPTOP-9LGJ3JGS:[scripts] scripts_exitstatus:1[╗__________________________________________________________o>
#
# python3 10_find_files_recursively.py
# Found 31 files:
# 02_find_all_links.py
# 03_simple_twitter_manager.py
# 04_rename_with_slice.py
# 05_load_json_without_dupes.py
# 06_execution_time.py
# 07_benchmark_permissions_loading_django.py
# 08_basic_email_web_crawler.py
# 09_basic_link_web_crawler.py
# 10_find_files_recursively.py
# 11_optimize_images_with_wand.py
# 12_csv_split.py
# 13_random_name_generator.py
# 15_check_my_environment.py
# 16_jinja_quick_load.py
# 18_zipper.py
# 19_tsv-to-csv.py
# 20_restore_file_from_git.py
# 21_twitter_bot.py
# 22_git_tag.py
# 23_flask_session_test.py
# 24_sql2csv.py
# 25_ip2geolocation.py
# 26_stock_scraper.py
# 27_send_sms.py
# 28_income_tax_calculator.py
# 29_json_to_yaml.py
# 30_fullcontact.py
# 31_youtube_sentiment.py
# 32_stock_scraper.py
# 33_country_code.py
# 34_git_all_repos.py
# |05:10:14|bryan@LAPTOP-9LGJ3JGS:[scripts] scripts_exitstatus:0[╗__________________________________________________________o>
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
91d11f83078c6f58b08135d0c5f716c6a00ddffd | 5bf88a21ad382f75ee94cf98a481df162d519304 | /functional_testing/tutorial/tests.py | f372351b7f8ebf180bda99ed038156fe8c3ca1e8 | [] | no_license | VladyslavHnatchenko/pyramid_projects | 43dd3181599c822109f0f5e39f05c7393c721f7c | 2a727a7799845231f4ba61a8129d710938880f46 | refs/heads/master | 2022-06-10T22:41:01.483199 | 2020-05-08T05:44:10 | 2020-05-08T05:44:10 | 261,702,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import unittest
from pyramid import testing
class TutorialViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_hello_world(self):
from tutorial import hello_world
request = testing.DummyRequest()
response = hello_world(request)
self.assertEqual(response.status_code, 200)
class TutorialFunctionalTests(unittest.TestCase):
def setUp(self):
from tutorial import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_hello_world(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'<h1>Whats up, Man!</h1>', res.body)
| [
"hnatchenko.vladyslav@gmail.com"
] | hnatchenko.vladyslav@gmail.com |
c284d3231f39051b27c7db6e856ea8c8fa9de65a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/b946e078bb904b47bf67863586198872.py | 8a22ddab6ef5cc2e22d14498cd235ee04d34eb67 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 661 | py | # -*- coding: utf-8 -*-
import re
"""
Bob answers 'Sure.' if you ask him a question.
He answers 'Woah, chill out!' if you yell at him.
He says 'Fine. Be that way!' if you address him without actually saying
anything.
He answers 'Whatever.' to anything else.
"""
def hey(text):
# Remove whitespace and check if string is empty
text = text.strip()
if not text:
return 'Fine. Be that way!'
# Check if string contains letters and is all uppercase
elif text.isupper():
return 'Woah, chill out!'
# Check if the string is a question
elif text.endswith("?"):
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a07065dd91263b9df6c81b2273dd18dce7104ca2 | a86e5de1a4a732172e4447d54fb96f62471fa450 | /internet/getfilegui_ftp.py | 308b819986ee148ae282c24d08b9f7ae287509c6 | [] | no_license | flwwsg/learnpy | 14c769ee5b59e62a9cd02095f00541ad5393517d | 915d71fbb53927f7063dd344f327f95cc0b25322 | refs/heads/master | 2020-06-30T23:34:19.134225 | 2017-01-18T13:43:54 | 2017-01-18T13:43:54 | 74,344,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | #!/usr/bin/env python3
from tkinter import Tk, mainloop
from tkinter.messagebox import showinfo
import getfile_ftp, os, sys, _thread
from form import Form
class FtpForm(Form):
def __init__(self):
root = Tk()
root.title(self.title)
labels = ['Server Name', 'Remote Dir', 'File Name',
'Local Dir', 'User Name?', 'Password?']
Form.__init__(self, labels, root)
self.mutex = _thread.allocate_lock()
self.threads = 0
def transfer(self, filename, servername, remotedir, userinfo):
try:
self.do_transfer(filename, servername, remotedir, userinfo)
print('%s of "%s" successful' % (self.mode, filename))
except :
print('%s of "%s" has failed:' % (self.mode, filename), end=' ')
print(sys.exc_info()[0], sys.exc_info()[1])
self.mutex.acquire()
self.threads -= 1
self.mutex.release()
def onSubmit(self):
Form.onSubmit(self)
localdir = self.content['Local Dir'].get()
remotedir = self.content['Remote Dir'].get()
servername = self.content['Server Name'].get()
filename = self.content['File Name'].get()
username = self.content['User Name?'].get()
password = self.content['Password?'].get()
userinfo = ()
if username and password:
userinfo = (username, password)
if localdir:
os.chdir(localdir)
self.mutex.acquire()
self.threads += 1
self.mutex.release()
ftpargs = (filename, servername, remotedir, userinfo)
_thread.start_new_thread(self.transfer, ftpargs)
showinfo(self.title, '%s of "%s" started' % (self.mode, filename))
def onCancel(self):
if self.threads == 0:
Tk().quit()
else:
showinfo(self.title,
'Cannot exit: %d threads running' % self.threads)
class FtpGetfileForm(FtpForm):
title = 'FtpGetfileGui'
mode = 'Download'
def do_transfer(self, filename, servername, remotedir, userinfo):
getfile_ftp.getfile(filename, servername, remotedir,
userinfo, verbose=False, refetch=True)
if __name__ == '__main__':
FtpGetfileForm()
mainloop() | [
"2319406132@qq.com"
] | 2319406132@qq.com |
548909c5d39880b2a0537f2e32cc3d0a8d52bf53 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ATM-FORUM-SRVC-REG.py | cc7455d72d5db4bb2f28214f1984998e900acb87 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 4,777 | py | #
# PySNMP MIB module ATM-FORUM-SRVC-REG (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ATM-FORUM-SRVC-REG
# Produced by pysmi-0.3.4 at Wed May 1 11:31:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint")
atmForumAdmin, atmForumUni = mibBuilder.importSymbols("ATM-FORUM-TC-MIB", "atmForumAdmin", "atmForumUni")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, Counter32, Gauge32, TimeTicks, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ObjectIdentity, MibIdentifier, iso, ModuleIdentity, NotificationType, Bits, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "Gauge32", "TimeTicks", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ObjectIdentity", "MibIdentifier", "iso", "ModuleIdentity", "NotificationType", "Bits", "IpAddress")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class AtmAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(20, 20), )
atmfSrvcRegistryGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 2, 8))
atmfSrvcRegTypes = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 1, 5))
atmfSrvcRegLecs = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 1, 5, 1))
atmfSrvcRegTable = MibTable((1, 3, 6, 1, 4, 1, 353, 2, 8, 1), )
if mibBuilder.loadTexts: atmfSrvcRegTable.setStatus('mandatory')
if mibBuilder.loadTexts: atmfSrvcRegTable.setDescription('The table implemented by the UNI Management Entity on the network side of the ATM UNI port contains all of the services that are available to the user-side of the UNI indexed by service identifier.')
atmfSrvcRegEntry = MibTableRow((1, 3, 6, 1, 4, 1, 353, 2, 8, 1, 1), ).setIndexNames((0, "ATM-FORUM-SRVC-REG", "atmfSrvcRegPort"), (0, "ATM-FORUM-SRVC-REG", "atmfSrvcRegServiceID"), (0, "ATM-FORUM-SRVC-REG", "atmfSrvcAddressIndex"))
if mibBuilder.loadTexts: atmfSrvcRegEntry.setStatus('mandatory')
if mibBuilder.loadTexts: atmfSrvcRegEntry.setDescription('Information about a single service provider that is available to the user-side of the ATM UNI port.')
atmfSrvcRegPort = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 2, 8, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: atmfSrvcRegPort.setStatus('mandatory')
if mibBuilder.loadTexts: atmfSrvcRegPort.setDescription('A unique value which identifies the UNI port for which the service provider is available to the user-side. The value of 0 has the special meaning of identifying the local UNI.')
atmfSrvcRegServiceID = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 2, 8, 1, 1, 2), ObjectIdentifier())
if mibBuilder.loadTexts: atmfSrvcRegServiceID.setStatus('mandatory')
if mibBuilder.loadTexts: atmfSrvcRegServiceID.setDescription('This is the service identifier which uniquely identifies the type of service at the address provided in the table.')
atmfSrvcRegATMAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 2, 8, 1, 1, 3), AtmAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmfSrvcRegATMAddress.setStatus('mandatory')
if mibBuilder.loadTexts: atmfSrvcRegATMAddress.setDescription('This is the full address of the service. The user-side ATM UNI port may use this address to establish a connection with the service.')
atmfSrvcRegAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 2, 8, 1, 1, 4), Integer32())
if mibBuilder.loadTexts: atmfSrvcRegAddressIndex.setStatus('mandatory')
if mibBuilder.loadTexts: atmfSrvcRegAddressIndex.setDescription('An arbitrary integer to differentiate multiple rows containing different ATM addresses for the same service on the same port.')
mibBuilder.exportSymbols("ATM-FORUM-SRVC-REG", atmfSrvcRegTypes=atmfSrvcRegTypes, atmfSrvcRegServiceID=atmfSrvcRegServiceID, atmfSrvcRegAddressIndex=atmfSrvcRegAddressIndex, atmfSrvcRegTable=atmfSrvcRegTable, AtmAddress=AtmAddress, atmfSrvcRegistryGroup=atmfSrvcRegistryGroup, atmfSrvcRegEntry=atmfSrvcRegEntry, atmfSrvcRegLecs=atmfSrvcRegLecs, atmfSrvcRegPort=atmfSrvcRegPort, atmfSrvcRegATMAddress=atmfSrvcRegATMAddress)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
a4af67979baf1f782211359a2f6236ff18ffee73 | cc2d37a71eac8422b0722533766b3ee95b5b5d1a | /taxonomy/db/models/region.py | b9fae4165d37ce5d6283593edbcfd1d4793bca2b | [] | no_license | JelleZijlstra/taxonomy | 88018a1a0ec114875c45cf87ffc825957fc3e870 | 56aac782e2cbbd084a14d2ad1b1572729ba387be | refs/heads/master | 2023-08-17T03:20:13.576928 | 2023-08-07T00:47:41 | 2023-08-07T00:47:41 | 10,962,492 | 0 | 0 | null | 2023-08-17T04:53:54 | 2013-06-26T08:21:35 | Python | UTF-8 | Python | false | false | 11,467 | py | from __future__ import annotations
import collections
import sys
from collections.abc import Iterable
from typing import IO, Any
from peewee import CharField, ForeignKeyField
from taxonomy.apis.cloud_search import SearchField, SearchFieldType
from ... import events, getinput
from .. import constants, models
from ..derived_data import DerivedField
from .base import BaseModel, EnumField, get_tag_based_derived_field
class Region(BaseModel):
creation_event = events.Event["Region"]()
save_event = events.Event["Region"]()
label_field = "name"
call_sign = "R"
name = CharField()
comment = CharField(null=True)
parent = ForeignKeyField(
"self", related_name="children", db_column="parent_id", null=True
)
kind = EnumField(constants.RegionKind)
derived_fields = [
DerivedField("has_collections", bool, lambda region: region.has_collections()),
DerivedField(
"has_citation_groups", bool, lambda region: region.has_citation_groups()
),
DerivedField("has_locations", bool, lambda region: region.has_locations()),
DerivedField("has_periods", bool, lambda region: region.has_periods()),
DerivedField("has_type_localities", bool, lambda region: not region.is_empty()),
DerivedField(
"has_associated_people", bool, lambda region: region.has_associated_people()
),
get_tag_based_derived_field(
"associated_people",
lambda: models.Person,
"tags",
lambda: models.tags.PersonTag.ActiveRegion,
1,
),
]
search_fields = [
SearchField(SearchFieldType.text, "name"),
SearchField(SearchFieldType.literal, "kind"),
]
def get_search_dicts(self) -> list[dict[str, Any]]:
return [{"name": self.name, "kind": self.kind.name}]
@classmethod
def make(
cls, name: str, kind: constants.RegionKind, parent: Region | None = None
) -> Region:
region = cls.create(name=name, kind=kind, parent=parent)
models.Location.make(
name=name,
period=models.Period.filter(models.Period.name == "Recent").get(),
region=region,
)
return region
def __repr__(self) -> str:
out = self.name
if self.parent:
out += f", {self.parent.name}"
out += f" ({self.kind.name})"
return out
def get_adt_callbacks(self) -> getinput.CallbackMap:
return {
**super().get_adt_callbacks(),
"display_collections": self.display_collections,
"display_citation_groups": self.display_citation_groups,
"display_periods": self.display_periods,
"display_type_localities": lambda: self.display(full=False, locations=True),
}
def get_general_localities(self) -> list[models.Location]:
name_field = models.Location.name
my_name = self.name
return models.Location.bfind(
models.Location.region == self,
(name_field == my_name)
| (name_field == f"{my_name} Pleistocene")
| (name_field == f"{my_name} fossil")
| (name_field.endswith(f"({my_name})")),
)
def rename(self, new_name: str | None = None) -> None:
old_name = self.name
if new_name is None:
new_name = self.getter("name").get_one_key(
default=old_name, allow_empty=False
)
for loc in self.get_general_localities():
if loc.name.endswith(f"({old_name})"):
loc_name = loc.name.replace(f"({old_name})", f"({new_name})")
elif loc.name == old_name:
loc_name = new_name
elif loc.name == f"{old_name} fossil":
loc_name = f"{new_name} fossil"
elif loc.name == f"{old_name} Pleistocene":
loc_name = f"{new_name} Pleistocene"
else:
print("Skipping unrecognized name", loc.name)
continue
print(f"Renaming {loc.name!r} -> {loc_name!r}")
loc.name = loc_name
self.name = new_name
def display(
self,
full: bool = False,
depth: int = 0,
file: IO[str] = sys.stdout,
children: bool = False,
skip_empty: bool = True,
locations: bool = False,
) -> None:
if skip_empty and self.is_empty():
return
getinput.flush()
file.write("{}{}\n".format(" " * (depth + 4), repr(self)))
if self.comment:
file.write("{}Comment: {}\n".format(" " * (depth + 12), self.comment))
if locations or full:
for location in self.sorted_locations():
if skip_empty and location.type_localities.count() == 0:
continue
location.display(full=full, depth=depth + 4, file=file)
if children or full:
for child in self.sorted_children():
child.display(
full=full,
depth=depth + 4,
file=file,
skip_empty=skip_empty,
locations=locations,
)
def display_without_stratigraphy(
self,
full: bool = False,
depth: int = 0,
file: IO[str] = sys.stdout,
skip_empty: bool = False,
) -> None:
for location in self.sorted_locations():
if skip_empty and location.type_localities.count() == 0:
continue
if location.stratigraphic_unit is not None:
continue
if location.has_tag(models.location.LocationTag.General):
continue
location.display(full=full, depth=depth + 4, file=file)
def is_empty(self) -> bool:
for loc in self.locations.filter(models.Location.deleted != True):
if loc.type_localities.count() > 0:
return False
for child in self.children:
if not child.is_empty():
return False
return True
def sorted_children(self) -> list[Region]:
return sorted(self.children, key=lambda c: c.name)
def sorted_locations(self) -> list[models.Location]:
return sorted(
self.locations.filter(models.Location.deleted != True),
key=models.Location.sort_key,
)
def get_location(self) -> models.Location:
"""Returns the corresponding Recent Location."""
return models.Location.get(region=self, name=self.name, deleted=False)
def all_parents(self) -> Iterable[Region]:
"""Returns all parent regions of this region."""
if self.parent is not None:
yield self.parent
yield from self.parent.all_parents()
def parent_of_kind(self, kind: constants.RegionKind) -> Region | None:
if self.kind is kind:
return self
for parent in self.all_parents():
if parent.kind is kind:
return parent
return None
def all_citation_groups(self) -> Iterable[models.CitationGroup]:
yield from self.citation_groups
for child in self.children:
yield from child.all_citation_groups()
def has_citation_groups(self, type: constants.ArticleType | None = None) -> bool:
for cg in self.citation_groups:
if type is None or cg.type is type:
return True
return any(child.has_citation_groups(type) for child in self.children)
def display_citation_groups(
self,
full: bool = False,
only_nonempty: bool = True,
depth: int = 0,
type: constants.ArticleType | None = None,
) -> None:
if only_nonempty and not self.has_citation_groups(type=type):
return
print(" " * depth + self.name)
by_type: dict[constants.ArticleType, list[models.CitationGroup]] = (
collections.defaultdict(list)
)
for group in sorted(self.citation_groups, key=lambda cg: cg.name):
if type is not None and group.type is not type:
continue
by_type[group.type].append(group)
for typ, groups in sorted(by_type.items(), key=lambda pair: pair[0].name):
if type is None:
print(f"{' ' * (depth + 4)}{typ.name}")
for group in groups:
if not group.deleted:
group.display(full=full, include_articles=full, depth=depth + 8)
for child in self.sorted_children():
child.display_citation_groups(
full=full, only_nonempty=only_nonempty, depth=depth + 4, type=type
)
def has_collections(self) -> bool:
for _ in self.collections:
return True
return any(child.has_collections() for child in self.children)
def display_collections(
self, full: bool = False, only_nonempty: bool = True, depth: int = 0
) -> None:
if only_nonempty and not self.has_collections():
return
print(" " * depth + self.name)
by_city: dict[str, list[models.Collection]] = collections.defaultdict(list)
cities = set()
for collection in sorted(self.collections, key=lambda c: c.label):
by_city[collection.city or ""].append(collection)
cities.add(collection.city)
if cities == {None}:
for collection in by_city[""]:
collection.display(full=full, depth=depth + 4)
else:
for city, colls in sorted(by_city.items()):
print(" " * (depth + 4) + city)
for collection in colls:
collection.display(full=full, depth=depth + 8)
for child in self.sorted_children():
child.display_collections(
full=full, only_nonempty=only_nonempty, depth=depth + 4
)
def has_locations(self) -> bool:
for _ in self.locations:
return True
return any(child.has_locations() for child in self.children)
def has_associated_people(self) -> bool:
if self.get_raw_derived_field("associated_people"):
return True
return any(child.has_associated_people() for child in self.children)
def has_periods(self) -> bool:
for _ in self.periods:
return True
return any(child.has_periods() for child in self.children)
def display_periods(self, full: bool = False, depth: int = 0) -> None:
if not self.has_periods():
return
print(" " * depth + self.name)
for period in sorted(self.periods, key=lambda p: p.name):
if full:
period.display(depth=depth + 4)
else:
print(" " * (depth + 4) + period.name)
for child in self.sorted_children():
child.display_periods(full=full, depth=depth + 4)
def add_cities(self) -> None:
for collection in self.collections.filter(models.Collection.city == None):
collection.display()
collection.fill_field("city")
for child in self.children:
child.add_cities()
def has_parent(self, parent: Region) -> bool:
if self == parent:
return True
elif self.parent is None:
return False
else:
return self.parent.has_parent(parent)
| [
"jelle.zijlstra@gmail.com"
] | jelle.zijlstra@gmail.com |
becc258774d0ac676d932c75db50604c7e761c3e | b25182d0034468e5e545c6c72e5a2cdd3c43a484 | /.PyCharm2017.2/config/fileTemplates/internal/Python Script.py | 4a7e3a73b9fa90a9603b16a68691ed30f3fc57b0 | [] | no_license | lovewula/config | f9ac16b30082c04be7733969d5359ee6c7258db6 | c0720e5bfd49f579a52f83de36de40c76996ebf6 | refs/heads/master | 2021-08-19T19:31:44.088218 | 2017-11-27T08:04:06 | 2017-11-27T08:04:06 | 111,974,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | # python -- which | [
"lovewula1314@gmail.com"
] | lovewula1314@gmail.com |
963865d264c997cda532d2ed72b29f073ba51061 | 3ffeeae8a9a3245d8998d94aa08f680f00056cad | /226.翻转二叉树.py | c06af9247a396860bcc143a341de6a944f56d89f | [] | no_license | Ezi4Zy/leetcode | 6e293e5c07a7d8c3e38f9445ff24330134ef6c48 | 9d394cd2862703cfb7a7b505b35deda7450a692e | refs/heads/master | 2022-04-09T14:11:36.957861 | 2022-03-09T10:30:30 | 2022-03-09T10:30:30 | 57,290,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | #
# @lc app=leetcode.cn id=226 lang=python
#
# [226] 翻转二叉树
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
# @lc code=end
| [
"Ezi4zy@163.com"
] | Ezi4zy@163.com |
85451990d7639637c9028004b7716a90604a4d5e | 41d1e085dc3ec6c329b8d6443035e1e8a1c93bcc | /gridded/tests/test_ugrid/test_grid_manipulation.py | 61772aa3fa37c4e63d59b1f2e94ce2ec0254dcef | [
"Unlicense"
] | permissive | Ocean1125/gridded | 9252d3d89ecacc55c59a0ecf6fd60fe6ac0afd6e | 90cca5edf4c8d9a47914c2b6d6f78180d9c280a5 | refs/heads/master | 2023-05-15T13:21:34.144583 | 2021-06-03T21:50:01 | 2021-06-03T21:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | #!/usr/bin/env python
"""
Testing of various utilities to manipulate the grid.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import pytest
from .utilities import two_triangles, twenty_one_triangles
def test_build_face_face_connectivity_small(two_triangles):
ugrid = two_triangles
ugrid.build_face_face_connectivity()
face_face = ugrid.face_face_connectivity
assert np.array_equal(face_face[0], [-1, 1, -1])
assert np.array_equal(face_face[1], [-1, -1, 0])
def test_build_face_face_connectivity_big(twenty_one_triangles):
ugrid = twenty_one_triangles
ugrid.build_face_face_connectivity()
face_face = ugrid.face_face_connectivity
assert face_face[0].tolist() == [-1, 3, 2]
assert face_face[9].tolist() == [8, 10, 7]
assert face_face[8].tolist() == [-1, 9, 6]
assert face_face[15].tolist() == [14, 16, 13]
assert face_face[20].tolist() == [19, -1, -1]
def test_build_edges(two_triangles):
ugrid = two_triangles
ugrid.build_edges()
edges = ugrid.edges
edges.sort(axis=0)
assert np.array_equal(edges, [[0, 1], [0, 2], [1, 2], [1, 3], [2, 3]])
def test_build_face_coordinates(two_triangles):
grid = two_triangles
grid.build_face_coordinates()
coords = grid.face_coordinates
assert coords.shape == (2, 2)
assert np.allclose(coords, [(1.1, 0.76666667),
(2.1, 1.43333333)])
def test_build_edge_coordinates(two_triangles):
grid = two_triangles
grid.build_edge_coordinates()
coords = grid.edge_coordinates
assert coords.shape == (5, 2)
assert np.allclose(coords, [[1.1, 0.1],
[2.6, 1.1],
[2.1, 2.1],
[0.6, 1.1],
[1.6, 1.1]])
def test_build_boundary_coordinates(two_triangles):
grid = two_triangles
grid.boundaries = [(0, 1), (0, 2), (2, 3), (1, 3)]
grid.build_boundary_coordinates()
coords = grid.boundary_coordinates
assert coords.shape == (4, 2)
assert np.allclose(coords, [[1.1, 0.1],
[0.6, 1.1],
[2.1, 2.1],
[2.6, 1.1]])
def test_build_boundaries_small(two_triangles):
ugrid = two_triangles
ugrid.build_face_face_connectivity()
ugrid.build_boundaries()
boundaries = sorted(ugrid.boundaries.tolist())
expected_boundaries = [[0, 1], [1, 3], [2, 0], [3, 2]]
assert boundaries == expected_boundaries
def test_build_boundaries_big(twenty_one_triangles):
ugrid = twenty_one_triangles
ugrid.build_face_face_connectivity()
ugrid.build_boundaries()
boundaries = sorted(ugrid.boundaries.tolist())
expected_boundaries = [[0, 1], [1, 5], [2, 0], [3, 6], [4, 3], [5, 11],
[6, 9], [7, 2], [9, 10], [10, 4], [11, 14], [12, 7],
[13, 12], [14, 16], [15, 13], [16, 18], [17, 15],
[18, 19], [19, 17]]
assert boundaries == expected_boundaries
| [
"Chris.Barker@noaa.gov"
] | Chris.Barker@noaa.gov |
14eb64cd3a3bd8cedfcded8bc29dfff5f452c7c8 | b59372692c912ba17ec2e6812983663a6deccdaf | /.history/bsServer/models_20200502170425.py | 683a93190755a6adc35736cedb98242f994d2e94 | [] | no_license | nanjigirl/bs-server-project | 2d7c240ddf21983ed0439829a7995bde94082467 | 7863aed279b233d359c540c71fdd08ce8633976b | refs/heads/master | 2022-08-02T17:33:48.201967 | 2020-05-25T15:18:34 | 2020-05-25T15:18:34 | 261,204,713 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from django.db import models
# Create your models here.
#创建模型类
class User(models.Model):
id = models.AutoField(primary_key = True) #该字段可不写,它会自动补全
name = models.CharField(max_length = 30)
age = models.IntegerField()
sex = models.CharField(max_length = 2)
def _str_(self):
return "<User:{id=%s,name=%s,age=%s,sex=%s}>"\(self) | [
"chenxueb@yonyou.com"
] | chenxueb@yonyou.com |
160c19d7410b6667479572a9c770f85c8b7fc851 | 718203f44f80436001eb50ed9a85408b4ea8d778 | /venv/Lib/site-packages/facebook_business/adobjects/campaign.py | bdf9830876b8ecd5927d1e6d5bedcfe14ffb4e05 | [
"ISC"
] | permissive | ShashkinRoman/fb_importads_for_kans | 52cc73ae7ca315647a08b1c424ee27885a35a104 | 41184710a0a8bae5b342573a0dd25adc64751409 | refs/heads/master | 2020-12-22T07:00:20.488197 | 2020-01-28T10:03:28 | 2020-01-28T10:03:28 | 236,703,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,979 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
from facebook_business.mixins import HasAdLabels
from facebook_business.mixins import CanValidate
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class Campaign(
AbstractCrudObject,
HasAdLabels,
CanValidate,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isCampaign = True
super(Campaign, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
account_id = 'account_id'
adlabels = 'adlabels'
bid_strategy = 'bid_strategy'
boosted_object_id = 'boosted_object_id'
brand_lift_studies = 'brand_lift_studies'
budget_rebalance_flag = 'budget_rebalance_flag'
budget_remaining = 'budget_remaining'
buying_type = 'buying_type'
can_create_brand_lift_study = 'can_create_brand_lift_study'
can_use_spend_cap = 'can_use_spend_cap'
configured_status = 'configured_status'
created_time = 'created_time'
daily_budget = 'daily_budget'
effective_status = 'effective_status'
id = 'id'
issues_info = 'issues_info'
last_budget_toggling_time = 'last_budget_toggling_time'
lifetime_budget = 'lifetime_budget'
name = 'name'
objective = 'objective'
pacing_type = 'pacing_type'
promoted_object = 'promoted_object'
recommendations = 'recommendations'
source_campaign = 'source_campaign'
source_campaign_id = 'source_campaign_id'
special_ad_category = 'special_ad_category'
spend_cap = 'spend_cap'
start_time = 'start_time'
status = 'status'
stop_time = 'stop_time'
topline_id = 'topline_id'
updated_time = 'updated_time'
adbatch = 'adbatch'
execution_options = 'execution_options'
iterative_split_test_configs = 'iterative_split_test_configs'
upstream_events = 'upstream_events'
class BidStrategy:
lowest_cost_without_cap = 'LOWEST_COST_WITHOUT_CAP'
lowest_cost_with_bid_cap = 'LOWEST_COST_WITH_BID_CAP'
target_cost = 'TARGET_COST'
class ConfiguredStatus:
active = 'ACTIVE'
archived = 'ARCHIVED'
deleted = 'DELETED'
paused = 'PAUSED'
class EffectiveStatus:
active = 'ACTIVE'
archived = 'ARCHIVED'
deleted = 'DELETED'
in_process = 'IN_PROCESS'
paused = 'PAUSED'
with_issues = 'WITH_ISSUES'
class Status:
active = 'ACTIVE'
archived = 'ARCHIVED'
deleted = 'DELETED'
paused = 'PAUSED'
class DatePreset:
last_14d = 'last_14d'
last_28d = 'last_28d'
last_30d = 'last_30d'
last_3d = 'last_3d'
last_7d = 'last_7d'
last_90d = 'last_90d'
last_month = 'last_month'
last_quarter = 'last_quarter'
last_week_mon_sun = 'last_week_mon_sun'
last_week_sun_sat = 'last_week_sun_sat'
last_year = 'last_year'
lifetime = 'lifetime'
this_month = 'this_month'
this_quarter = 'this_quarter'
this_week_mon_today = 'this_week_mon_today'
this_week_sun_today = 'this_week_sun_today'
this_year = 'this_year'
today = 'today'
yesterday = 'yesterday'
class ExecutionOptions:
include_recommendations = 'include_recommendations'
validate_only = 'validate_only'
class Objective:
app_installs = 'APP_INSTALLS'
brand_awareness = 'BRAND_AWARENESS'
conversions = 'CONVERSIONS'
event_responses = 'EVENT_RESPONSES'
lead_generation = 'LEAD_GENERATION'
link_clicks = 'LINK_CLICKS'
local_awareness = 'LOCAL_AWARENESS'
messages = 'MESSAGES'
offer_claims = 'OFFER_CLAIMS'
page_likes = 'PAGE_LIKES'
post_engagement = 'POST_ENGAGEMENT'
product_catalog_sales = 'PRODUCT_CATALOG_SALES'
reach = 'REACH'
video_views = 'VIDEO_VIEWS'
class SpecialAdCategory:
credit = 'CREDIT'
employment = 'EMPLOYMENT'
housing = 'HOUSING'
none = 'NONE'
class Operator:
all = 'ALL'
any = 'ANY'
class StatusOption:
active = 'ACTIVE'
inherited_from_source = 'INHERITED_FROM_SOURCE'
paused = 'PAUSED'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'campaigns'
# @deprecated api_create is being deprecated
def api_create(self, parent_id, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.adobjects.adaccount import AdAccount
return AdAccount(api=self._api, fbid=parent_id).create_campaign(fields, params, batch, success, failure, pending)
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'am_call_tags': 'map',
'date_preset': 'date_preset_enum',
'from_adtable': 'bool',
'time_range': 'Object',
}
enums = {
'date_preset_enum': [
'last_14d',
'last_28d',
'last_30d',
'last_3d',
'last_7d',
'last_90d',
'last_month',
'last_quarter',
'last_week_mon_sun',
'last_week_sun_sat',
'last_year',
'lifetime',
'this_month',
'this_quarter',
'this_week_mon_today',
'this_week_sun_today',
'this_year',
'today',
'yesterday',
],
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Campaign,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'adlabels': 'list<Object>',
'adset_bid_amounts': 'map',
'adset_budgets': 'list<map>',
'bid_strategy': 'bid_strategy_enum',
'budget_rebalance_flag': 'bool',
'daily_budget': 'unsigned int',
'execution_options': 'list<execution_options_enum>',
'iterative_split_test_configs': 'list<Object>',
'lifetime_budget': 'unsigned int',
'name': 'string',
'objective': 'objective_enum',
'pacing_type': 'list<string>',
'promoted_object': 'Object',
'special_ad_category': 'special_ad_category_enum',
'spend_cap': 'unsigned int',
'status': 'status_enum',
'upstream_events': 'map',
}
enums = {
'bid_strategy_enum': Campaign.BidStrategy.__dict__.values(),
'execution_options_enum': Campaign.ExecutionOptions.__dict__.values(),
'objective_enum': Campaign.Objective.__dict__.values(),
'special_ad_category_enum': Campaign.SpecialAdCategory.__dict__.values(),
'status_enum': Campaign.Status.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Campaign,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_ad_studies(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adstudy import AdStudy
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/ad_studies',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdStudy,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdStudy, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def delete_ad_labels(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'adlabels': 'list<Object>',
'execution_options': 'list<execution_options_enum>',
}
enums = {
'execution_options_enum': Campaign.ExecutionOptions.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/adlabels',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_ad_label(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'adlabels': 'list<Object>',
'execution_options': 'list<execution_options_enum>',
}
enums = {
'execution_options_enum': Campaign.ExecutionOptions.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/adlabels',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Campaign,
api_type='EDGE',
response_parser=ObjectParser(target_class=Campaign, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_ad_rules_governed(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adrule import AdRule
param_types = {
'pass_evaluation': 'bool',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/adrules_governed',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdRule,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdRule, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_ads(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.ad import Ad
param_types = {
'ad_draft_id': 'string',
'date_preset': 'date_preset_enum',
'effective_status': 'list<string>',
'include_deleted': 'bool',
'include_drafts': 'bool',
'time_range': 'Object',
'updated_since': 'int',
}
enums = {
'date_preset_enum': Ad.DatePreset.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/ads',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Ad,
api_type='EDGE',
response_parser=ObjectParser(target_class=Ad, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_ad_sets(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adset import AdSet
param_types = {
'ad_draft_id': 'string',
'date_preset': 'date_preset_enum',
'effective_status': 'list<effective_status_enum>',
'include_drafts': 'bool',
'is_completed': 'bool',
'time_range': 'Object',
}
enums = {
'date_preset_enum': AdSet.DatePreset.__dict__.values(),
'effective_status_enum': AdSet.EffectiveStatus.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/adsets',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdSet,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdSet, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_content_delivery_report(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.contentdeliveryreport import ContentDeliveryReport
param_types = {
'end_date': 'datetime',
'platform': 'platform_enum',
'position': 'position_enum',
'start_date': 'datetime',
'summary': 'bool',
}
enums = {
'platform_enum': ContentDeliveryReport.Platform.__dict__.values(),
'position_enum': ContentDeliveryReport.Position.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/content_delivery_report',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=ContentDeliveryReport,
api_type='EDGE',
response_parser=ObjectParser(target_class=ContentDeliveryReport, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_copies(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'date_preset': 'date_preset_enum',
'effective_status': 'list<effective_status_enum>',
'is_completed': 'bool',
'time_range': 'Object',
}
enums = {
'date_preset_enum': Campaign.DatePreset.__dict__.values(),
'effective_status_enum': Campaign.EffectiveStatus.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/copies',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Campaign,
api_type='EDGE',
response_parser=ObjectParser(target_class=Campaign, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_copy(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
'deep_copy': 'bool',
'end_time': 'datetime',
'rename_options': 'Object',
'start_time': 'datetime',
'status_option': 'status_option_enum',
}
enums = {
'status_option_enum': Campaign.StatusOption.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/copies',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Campaign,
api_type='EDGE',
response_parser=ObjectParser(target_class=Campaign, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_insights(self, fields=None, params=None, is_async=False, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adsinsights import AdsInsights
if is_async:
return self.get_insights_async(fields, params, batch, success, failure, pending)
param_types = {
'action_attribution_windows': 'list<action_attribution_windows_enum>',
'action_breakdowns': 'list<action_breakdowns_enum>',
'action_report_time': 'action_report_time_enum',
'breakdowns': 'list<breakdowns_enum>',
'date_preset': 'date_preset_enum',
'default_summary': 'bool',
'export_columns': 'list<string>',
'export_format': 'string',
'export_name': 'string',
'fields': 'list<string>',
'filtering': 'list<Object>',
'level': 'level_enum',
'product_id_limit': 'int',
'sort': 'list<string>',
'summary': 'list<string>',
'summary_action_breakdowns': 'list<summary_action_breakdowns_enum>',
'time_increment': 'string',
'time_range': 'Object',
'time_ranges': 'list<Object>',
'use_account_attribution_setting': 'bool',
}
enums = {
'action_attribution_windows_enum': AdsInsights.ActionAttributionWindows.__dict__.values(),
'action_breakdowns_enum': AdsInsights.ActionBreakdowns.__dict__.values(),
'action_report_time_enum': AdsInsights.ActionReportTime.__dict__.values(),
'breakdowns_enum': AdsInsights.Breakdowns.__dict__.values(),
'date_preset_enum': AdsInsights.DatePreset.__dict__.values(),
'level_enum': AdsInsights.Level.__dict__.values(),
'summary_action_breakdowns_enum': AdsInsights.SummaryActionBreakdowns.__dict__.values(),
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/insights',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdsInsights,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdsInsights, api=self._api),
include_summary=False,
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_insights_async(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.adreportrun import AdReportRun
from facebook_business.adobjects.adsinsights import AdsInsights
param_types = {
'action_attribution_windows': 'list<action_attribution_windows_enum>',
'action_breakdowns': 'list<action_breakdowns_enum>',
'action_report_time': 'action_report_time_enum',
'breakdowns': 'list<breakdowns_enum>',
'date_preset': 'date_preset_enum',
'default_summary': 'bool',
'export_columns': 'list<string>',
'export_format': 'string',
'export_name': 'string',
'fields': 'list<string>',
'filtering': 'list<Object>',
'level': 'level_enum',
'product_id_limit': 'int',
'sort': 'list<string>',
'summary': 'list<string>',
'summary_action_breakdowns': 'list<summary_action_breakdowns_enum>',
'time_increment': 'string',
'time_range': 'Object',
'time_ranges': 'list<Object>',
'use_account_attribution_setting': 'bool',
}
enums = {
'action_attribution_windows_enum': AdsInsights.ActionAttributionWindows.__dict__.values(),
'action_breakdowns_enum': AdsInsights.ActionBreakdowns.__dict__.values(),
'action_report_time_enum': AdsInsights.ActionReportTime.__dict__.values(),
'breakdowns_enum': AdsInsights.Breakdowns.__dict__.values(),
'date_preset_enum': AdsInsights.DatePreset.__dict__.values(),
'level_enum': AdsInsights.Level.__dict__.values(),
'summary_action_breakdowns_enum': AdsInsights.SummaryActionBreakdowns.__dict__.values(),
}
if fields is not None:
params['fields'] = params.get('fields') if params.get('fields') is not None else list()
params['fields'].extend(fields)
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/insights',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AdReportRun,
api_type='EDGE',
response_parser=ObjectParser(target_class=AdReportRun, api=self._api),
include_summary=False,
)
request.add_params(params)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'account_id': 'string',
'adlabels': 'list<AdLabel>',
'bid_strategy': 'BidStrategy',
'boosted_object_id': 'string',
'brand_lift_studies': 'list<AdStudy>',
'budget_rebalance_flag': 'bool',
'budget_remaining': 'string',
'buying_type': 'string',
'can_create_brand_lift_study': 'bool',
'can_use_spend_cap': 'bool',
'configured_status': 'ConfiguredStatus',
'created_time': 'datetime',
'daily_budget': 'string',
'effective_status': 'EffectiveStatus',
'id': 'string',
'issues_info': 'list<AdCampaignIssuesInfo>',
'last_budget_toggling_time': 'datetime',
'lifetime_budget': 'string',
'name': 'string',
'objective': 'string',
'pacing_type': 'list<string>',
'promoted_object': 'AdPromotedObject',
'recommendations': 'list<AdRecommendation>',
'source_campaign': 'Campaign',
'source_campaign_id': 'string',
'special_ad_category': 'string',
'spend_cap': 'string',
'start_time': 'datetime',
'status': 'Status',
'stop_time': 'datetime',
'topline_id': 'string',
'updated_time': 'datetime',
'adbatch': 'list<Object>',
'execution_options': 'list<ExecutionOptions>',
'iterative_split_test_configs': 'list<Object>',
'upstream_events': 'map',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['BidStrategy'] = Campaign.BidStrategy.__dict__.values()
field_enum_info['ConfiguredStatus'] = Campaign.ConfiguredStatus.__dict__.values()
field_enum_info['EffectiveStatus'] = Campaign.EffectiveStatus.__dict__.values()
field_enum_info['Status'] = Campaign.Status.__dict__.values()
field_enum_info['DatePreset'] = Campaign.DatePreset.__dict__.values()
field_enum_info['ExecutionOptions'] = Campaign.ExecutionOptions.__dict__.values()
field_enum_info['Objective'] = Campaign.Objective.__dict__.values()
field_enum_info['SpecialAdCategory'] = Campaign.SpecialAdCategory.__dict__.values()
field_enum_info['Operator'] = Campaign.Operator.__dict__.values()
field_enum_info['StatusOption'] = Campaign.StatusOption.__dict__.values()
return field_enum_info
| [
"romanshashkin@mail.ru"
] | romanshashkin@mail.ru |
4eee6f4fb6f52da831716c694e832f567a110bd2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1481.py | 60894a78cec6553a3757bede617c8af1cbf45e05 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | f = open('/home/cse/btech/cs1130260/workfile.txt')
sumit = open("/home/cse/btech/cs1130260/output.txt","w")
a = int(f.readline())
x = 1
while (a > 0):
firstanswer = int(f.readline())
firstrow1 = f.readline()
firstrow2 = f.readline()
firstrow3 = f.readline()
firstrow4 = f.readline()
secondanswer = int(f.readline())
secondrow1 = f.readline()
secondrow2 = f.readline()
secondrow3 = f.readline()
secondrow4 = f.readline()
if (firstanswer == 1):
b = firstrow1.split()
elif (firstanswer == 2):
b = firstrow2.split()
elif (firstanswer == 3):
b = firstrow3.split()
elif (firstanswer == 4):
b = firstrow4.split()
if (secondanswer == 1):
c = secondrow1.split()
elif (secondanswer == 2):
c = secondrow2.split()
elif (secondanswer == 3):
c = secondrow3.split()
elif (secondanswer == 4):
c = secondrow4.split()
i = 0
k = 0
while (i <= 3) and (k < 2):
j = 0
while (j <=3) and (k < 2):
if (b[i] == c[j]):
l = j
k = k+1
j = j+1
else:
j = j+1
i = i+1
if (k == 1):
sumit.write("Case #%s: %s\n" % (x,int(c[l])))
if (k == 2):
sumit.write("Case #%s: Bad Magician!\n" % (x))
if (k == 0):
sumit.write("Case #%s: Volunteer cheated!\n" % (x))
x = x+1
a = a-1
sumit.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4f98552f0e559f617ec311f4d3261eceae59e4d2 | dfcbc3d04adda5925ff36f0708173cb7baa10e7e | /leetcode/145.py | 9993c2e7a9a0e8d47052b9fd01540bec850fad61 | [] | no_license | yanggelinux/algorithm-data-structure | 66a6fe9acdcacce226b9dbb85e6236776e7206e9 | 3b13b36f37eb364410b3b5b4f10a1808d8b1111e | refs/heads/master | 2021-07-25T14:14:18.521174 | 2020-09-04T08:20:24 | 2020-09-04T08:20:24 | 215,970,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | # -*- coding: utf8 -*-
"""
给定一个二叉树,返回它的 后序 遍历。
示例:
输入: [1,null,2,3]
1
\
2
/
3
输出: [3,2,1]
进阶: 递归算法很简单,你可以通过迭代算法完成吗?
后序遍历:对于树中的任意节点来说,先打印它的左子树,然后再打印它的右子树,最后打印它本身。
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def postorderTraversal(self, root):
"""
递归法
:type root: TreeNode
:rtype: List[int]
"""
res_list = []
if root is None:return res_list
res_left_list = self.postorderTraversal(root.left)
res_list += res_left_list
res_right_list = self.postorderTraversal(root.right)
res_list += res_right_list
res_list.append(root.val)
return res_list
def postorderTraversal2(self, root):
"""
递归法
:type root: TreeNode
:rtype: List[int]
"""
res_list = []
if root is None: return res_list
stack = [root]
stack1 = []
while stack:
tree_node = stack.pop()
#先检查左子节点,进栈
if tree_node.left is not None:
stack.append(tree_node.left)
#再检查右子节点进栈
if tree_node.right is not None:
stack.append(tree_node.right)
#获取stack1 的反序
stack1.append(tree_node)
while stack1:
res_list.append(stack1.pop().val)
return res_list
def postorderTraversal3(self, root):
"""
迭代法,标记颜色
:type root: TreeNode
:rtype: List[int]
"""
white,grey = 0,1
res_list = []
if root is None: return res_list
stack = [(white,root)]
while stack:
color,tree_node = stack.pop()
if tree_node is None:continue
if color == white:
#入栈方式,正好和 递归方式的顺序相反。
stack.append((grey,tree_node))
stack.append((white,tree_node.right))
stack.append((white,tree_node.left))
else:
res_list.append(tree_node.val)
return res_list
if __name__ == '__main__':
slt = Solution()
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
print(slt.postorderTraversal3(root)) | [
"yangyang@ishansong.com"
] | yangyang@ishansong.com |
c75cb1f819a7c2b1caff1b2ab17f1b923f229602 | a2812fad2ff72d4769d136a4a79c320749bffe72 | /jesusanaya_blog/providers/blog_post.py | 99b6551e5d10f12053d069a0f7159931da5f8821 | [] | no_license | JesusAnaya/jesusanaya_blog | 7976ab2c3b1edc773d5c2e04674f865464033566 | fecc91c479caf8e7c0514fcdb85bcb9ad34f1308 | refs/heads/master | 2021-05-04T10:02:20.308204 | 2016-10-14T09:03:44 | 2016-10-14T09:03:44 | 51,033,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from jesusanaya_blog.services.storage import StorageService
from .base import Provider
class BlogPostProvider(Provider):
def __init__(self, dbsession, settings):
super(BlogPostProvider, self).__init__(dbsession, settings)
self.storage = StorageService(settings)
def create(self, post_data):
pass
| [
"jesus.anaya.dev@gmail.com"
] | jesus.anaya.dev@gmail.com |
fc6c2a31ee7db5f9532e1462ad34bd6736bc43dc | 08cef372f61ba96b05e88a44a8528ac506633c51 | /Pantera/Tools/BLAST/__init__.py | 90ac553468320b5640596f8eea68f82f81856a4f | [] | no_license | xubeisi/Pantera | 322603575db54ff5e63c2fa3b4ef6b6c80376940 | b1315bbf8362ae514971b86824a144ef6b696d71 | refs/heads/master | 2021-06-22T21:58:43.048214 | 2017-08-27T23:23:29 | 2017-08-27T23:23:29 | 105,809,057 | 1 | 0 | null | 2017-10-04T19:25:27 | 2017-10-04T19:25:27 | null | UTF-8 | Python | false | false | 297 | py | __author__ = 'mahajrod'
from Pantera.Tools.BLAST.BLAST import *
from Pantera.Tools.BLAST.BLASTPlus import *
BLAST = BLAST()
BLASTn = BLASTn()
BLASTp = BLASTp()
BLASTPlus = BLASTPlus()
DustMasker = DustMasker()
BLASTDbCmd = BLASTDbCmd()
MakeBLASTDb = MakeBLASTDb()
Windowmasker = Windowmasker()
| [
"mahajrod@gmail.com"
] | mahajrod@gmail.com |
06978625b54d51d9560dc5732e5211749a1fa1fd | d1c67f2031d657902acef4411877d75b992eab91 | /test/test_drip_stat_integration.py | d8752d86bfdd76e55e14d2fec9e4dfbf4bd27a58 | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.drip_stat_integration import DripStatIntegration # noqa: E501
from swagger_client.rest import ApiException
class TestDripStatIntegration(unittest.TestCase):
"""DripStatIntegration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDripStatIntegration(self):
"""Test DripStatIntegration"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.drip_stat_integration.DripStatIntegration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"john@oram.ca"
] | john@oram.ca |
78476fd94dbd2f8efaad66cfcdfdc067311dd1c7 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200212/example_monogusa2/01fake_stream.py | 2dabee38e314697abf9b966624a9316bbb5a7e37 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 1,017 | py | import logging
from monogusa.events import EventParser, subscription
from monogusa.events import Message
logger = logging.getLogger(__name__)
@subscription.subscribe(Message)
def echo(ev: Message) -> None:
print("!", ev.content)
def read():
import typing as t
import sys
import os
import io
def stream(default_or_io: t.Union[str, t.IO[str]]):
if not os.isatty(sys.stdin.fileno()):
return sys.stdin
if isinstance(default_or_io, io.StringIO):
o = default_or_io
else:
o = io.StringIO()
o.write(default_or_io)
if not o.getvalue().endswith("\n"):
o.write("\n")
o.seek(0)
return o
p = EventParser(sep=",")
for line in stream("Message, hello\nMessage, byebye"):
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
logger.debug(f"<- %r", line)
ev = p.parse(line)
subscription(ev)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
43c1a434653a699d34d6d91683a508e50c615182 | aa3b7c6a81a323d2e17a1be7cb7ce90a20d6099a | /cproject/donation_form/api.py | 48cec882a58d24ee8f22fe9be7e58654cc1de176 | [] | no_license | gadia-aayush/sample | fdf00a4a890af6e4380b133cc64d7df89c1defff | 145b83206f9fb0972d19bef9229da0c1bf0aede0 | refs/heads/master | 2022-12-22T16:54:50.228277 | 2020-08-18T20:26:05 | 2020-08-18T20:26:05 | 288,516,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from donation_form.models import Donationreqs
from rest_framework import viewsets,permissions
from .serializers import DonationRquestsSerializers
#Donation_Request Viewset
class DonationRequestViewSet(viewsets.ModelViewSet):
queryset = Donationreqs.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = DonationRquestsSerializers | [
"gadia.aayush@gmail.com"
] | gadia.aayush@gmail.com |
0de1d75746071df0018fe7e5eb5ddb319207be71 | 11cd15da4be0eba77aee8edc64dbfcacd9f418c4 | /probleme22.py | 8fb7376bcba8d81369815d8f8a79c41b63d82ee5 | [] | no_license | mines-nancy-tcss5ac-2018/td1-YonasHassnaoui | 1bdc1af9872a6faacda5ba0479f8eafddc0ba8ee | 96c3e030fe1345429dd6d17af50e19d8f6383ea4 | refs/heads/master | 2020-03-30T23:38:42.027520 | 2018-10-05T11:00:08 | 2018-10-05T11:00:08 | 151,707,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | def alphabet(lettre):
res=0
al=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
for i in range(len(al)):
if al[i]==lettre:
res=i+1
return res
fichier=open('p022_names.txt','r')
for i in fichier:
print(i)
p=i.split(',')
print(p)
def solve():
res=0
for k in range(len(p)):
somme=0
for x in p[k]:
somme=somme+alphabet('x')
res=res+somme*k
return res
print(solve()) | [
"noreply@github.com"
] | mines-nancy-tcss5ac-2018.noreply@github.com |
b6c6c2429c61e883c2c8c7455645825a069486cf | 5ff9dbbae3bd9675936c6e952657cba08a2aecef | /build/lib/lfm_flow/kernels/kernels.py | e93fcdc76d7212ce6d3ef4d1c9191d0e8b9cdaab | [] | no_license | danieljtait/lfm_flow | 9e4dc892fc2405e8390d5e3861cd3514c9eb03eb | 50e36c3b5bb9692d69e3d47e42438fdfdd00cc3c | refs/heads/master | 2020-04-27T12:38:20.843760 | 2019-03-14T13:06:58 | 2019-03-14T13:06:58 | 174,338,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py |
class Kernel:
pass
| [
"tait.djk@gmail.com"
] | tait.djk@gmail.com |
32e84c96e90c012be5b6f4123a13f29014248c3a | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/SVM/Mutants/code/SVM_rbf/DigitRecognitionApp_8.py | 3932c672a9a2b7163e2eba4970f01104b676b8ec | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | """
Created on Fri May 26 15:20:01 2017
#Digit Recognition for V & V
#Following note added by RR
Note:
1. The actual digits data from the http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits is different than the one referred in this sklearn example
2. For more info, refer this link http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html and the above one.
3. The digits data referred by this Sklearn example can be downloaded from the following link.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/data/digits.csv.gz
"""
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
import numpy as np
import _pickle as cPickle
digits = np.loadtxt('digits_Train.csv', delimiter=',')
digits_images_flat = digits[:,:(-1)]
digits_images = digits_images_flat.view()
digits_images.shape = ((-1), 8, 8)
digits_target = digits[:,(+1)].astype(np.int)
digits_test = np.loadtxt('digits_Test.csv', delimiter=',')
digits_test_images_flat = digits_test[:,:(-1)]
digits_test_images = digits_test_images_flat.view()
digits_test_images.shape = ((-1), 8, 8)
digits_test_target = digits_test[:,(-1)].astype(np.int)
images_and_labels = list(zip(digits_images, digits_target))
n_samples = len(digits_images)
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits_images_flat, digits_target)
expected = digits_test_target
predicted = classifier.predict(digits_test_images_flat)
print('Classification report for classifier %s:\n%s\n' % (
classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
print("accuracy:", metrics.accuracy_score(expected, predicted))
images_and_predictions = list(zip(digits_test_images, predicted))
np.savetxt('output.txt', classifier.decision_function(digits_test_images_flat))
outputData = {'data_array': metrics.confusion_matrix(expected, predicted)}
with open('output.pkl', 'wb') as outputFile:
cPickle.dump(outputData, outputFile)
with open('model.pkl', 'wb') as modelFile:
cPickle.dump(classifier, modelFile) | [
"anurag.bms@gmail.com"
] | anurag.bms@gmail.com |
d620033debfba7430ebb3d0748e29d104a5f7713 | 764696896b3a0595f83dc7d108d79d23700d4573 | /pytest_relaxed/trap.py | 8f6baba442e4e1b42c6d3515273cda866a17ba12 | [
"BSD-2-Clause"
] | permissive | bitprophet/pytest-relaxed | 57f35b65c54f0ac86032050ac4f841772c755d1a | 5c18490316cbeebe9a4650a26218c10740950d8a | refs/heads/main | 2023-05-27T05:45:06.488119 | 2023-05-23T14:53:28 | 2023-05-23T14:53:28 | 87,468,490 | 31 | 9 | BSD-2-Clause | 2022-12-31T22:25:32 | 2017-04-06T19:45:24 | Python | UTF-8 | Python | false | false | 2,260 | py | """
Test decorator for capturing stdout/stderr/both.
Based on original code from Fabric 1.x, specifically:
* fabric/tests/utils.py
* as of Git SHA 62abc4e17aab0124bf41f9c5f9c4bc86cc7d9412
Though modifications have been made since.
"""
import io
import sys
from functools import wraps
class CarbonCopy(io.BytesIO):
"""
An IO wrapper capable of multiplexing its writes to other buffer objects.
"""
def __init__(self, buffer=b"", cc=None):
"""
If ``cc`` is given and is a file-like object or an iterable of same,
it/they will be written to whenever this instance is written to.
"""
super().__init__(buffer)
if cc is None:
cc = []
elif hasattr(cc, "write"):
cc = [cc]
self.cc = cc
def write(self, s):
# Ensure we always write bytes.
if isinstance(s, str):
s = s.encode("utf-8")
# Write out to our capturing object & any CC's
super().write(s)
for writer in self.cc:
writer.write(s)
# Real sys.std(out|err) requires writing to a buffer attribute obj in some
# situations.
@property
def buffer(self):
return self
# Make sure we always hand back strings
def getvalue(self):
ret = super().getvalue()
if isinstance(ret, bytes):
ret = ret.decode("utf-8")
return ret
def trap(func):
"""
Replace sys.std(out|err) with a wrapper during execution, restored after.
In addition, a new combined-streams output (another wrapper) will appear at
``sys.stdall``. This stream will resemble what a user sees at a terminal,
i.e. both out/err streams intermingled.
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Use another CarbonCopy even though we're not cc'ing; for our "write
# bytes, return strings" behavior. Meh.
sys.stdall = CarbonCopy()
my_stdout, sys.stdout = sys.stdout, CarbonCopy(cc=sys.stdall)
my_stderr, sys.stderr = sys.stderr, CarbonCopy(cc=sys.stdall)
try:
return func(*args, **kwargs)
finally:
sys.stdout = my_stdout
sys.stderr = my_stderr
del sys.stdall
return wrapper
| [
"jeff@bitprophet.org"
] | jeff@bitprophet.org |
b38c5460f365eaedac5a460595d58f26f950814c | f0d521aad290086063c8928ebc064e021d63aa0c | /mlib/Mobigen/Common/Log/__init__.py | af22ed78642e24c02c685db408f255d5baa5012f | [] | no_license | mobigen/MSF_V2 | 9817cc97c2598c3f69b1192d4b186c3bc1761932 | 8a32adc3969d77455464a07523ced68564eb10af | refs/heads/master | 2023-04-27T20:14:16.020536 | 2022-02-18T08:17:10 | 2022-02-18T08:17:10 | 134,684,685 | 3 | 2 | null | 2023-04-18T22:47:33 | 2018-05-24T08:24:01 | Roff | UHC | Python | false | false | 2,938 | py | # -*- coding: cp949 -*-
import sys
import types
from traceback import *
from DummyLog import CDummyLog
from StandardLog import CStandardLog
from StandardErrorLog import CStandardErrorLog
from RotatingLog import CRotatingLog
from PipeLog import CPipeLog
from UDPLog import CUDPLog
__VERSION__ = "Release 2 (2005/10/21)"
# pysco.full() 모드에서 동작 가능하도록 수정.
#__VERSION__ = "Release 1 (2005/10/11)"
# pysco 충돌문제 해결
__LOG__ = None
#def Init(**args) :
def Init(userDefine = None) :
# 모듈 Import 정보를 조사한다.
impStepList = extract_stack()
if(len(impStepList)==0) :
# psyco.full()이 동작하는걸로 본다.
import psyco
frame = psyco._getemulframe()
impStepList = frame.f_code.co_names
# __main__ 이 아닌 곳에서 import 되는경우 __LOG__ 사용을 위해
# 임시로 Dummy Log 를 생성한다.
if(len(impStepList)!=2) :
curModule = __GetParentModule__()
if(curModule==None) :
sys.modules['__main__'].__dict__["__LOG__"] = CDummyLog()
return
if(curModule.__name__ != "__main__" and not curModule.__dict__.has_key("__LOG__")) :
curModule.__dict__["__LOG__"] = CDummyLog()
return
# __LOG__ 를 생성한다.
global __LOG__
if(userDefine != None) : __LOG__ = userDefine
else : __LOG__ = __InitMain__()
sys.modules["__main__"].__LOG__ = __LOG__
for subModuleName in sys.modules :
subModule = sys.modules[subModuleName]
if(type(subModule) == types.NoneType) : continue
if(not "Log" in subModule.__dict__) : continue
if(subModuleName == "__main__") : continue
# 하위 모듈에서 사용 가능하도록 __LOG__ 등록한다.
subModule.__LOG__ = __LOG__
def __Exception__(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty() or type == SyntaxError:
sys.__excepthook__(type, value, tb)
else:
if(__LOG__) :
__LOG__.PrintException(type, value, tb)
def AutoException() :
if __debug__:
sys.excepthook = __Exception__
def SetLevel(level) :
global __LOG__
if(__LOG__) : __LOG__.SetLevel(level)
def __InitMain__() :
return CStandardLog()
def __GetParentModule__(Test = 0) :
# impStepList[0] : __GetParentModlue__ 을 호출한 함수
# impStepList[1] : Log.py
# impStepList[2] : Log.py를 Import 한 modlue
try :
impStepList = extract_stack()
impStepList.reverse()
parentModulePath = impStepList[2][0]
except :
import psyco
frame = psyco._getemulframe(2)
parentModulePath = frame.f_code.co_filename
parentModule = None
for name in sys.modules :
moduleInfo = str(sys.modules[name])
if (moduleInfo.find(parentModulePath) != -1) :
parentModule = sys.modules[name] # 상위 모듈 획득
break
elif (moduleInfo.find("__main__") != -1 and \
moduleInfo.find("<frozen>") != -1) :
# freeze로 컴파일한경우...
parentModule = sys.modules[name] # 상위 모듈 획득
break
return parentModule
def Version() :
return __VERSION__
| [
"cheerupdi@gmail.com"
] | cheerupdi@gmail.com |
926854e5ba9d587446f3978075fb9e0f6484759c | 9633fb1796269d049aad814efc46ac1545c3b88d | /tetris.py | 239dce1c1bc3fe5fd9d561ba450374c4861dac61 | [] | no_license | Ajax12345/pygame | 90fea131736c87778965a11f4b8ed1fcce729576 | 0bfe055d3a1e3e16c5a1bed01b41044dd0671746 | refs/heads/master | 2021-01-21T08:24:07.355205 | 2017-12-04T14:06:57 | 2017-12-04T14:06:57 | 91,626,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,984 | py | import pygame
import random
import collections
import time
import sys
converter = {"tercoise":(0,238,238), 'yellow':(255,215,0), 'purple':(191,62,255), 'green':(127,255,0), 'red':(255,0,0), 'blue':(0, 0, 255), 'brown':(255,127,36)}
class Block(pygame.sprite.Sprite):
def __init__(self, x, y, color):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([40, 40])
self.image.fill(converter[color])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
class Rectangle(pygame.sprite.Sprite):
def __init__(self, x, y, color):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([100, 40])
self.image.fill(converter[color])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x =x
class LittleRectangle(pygame.sprite.Sprite):
def __init__(self, x, y, color):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([80, 40])
self.image.fill(converter[color])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x =x
class I:
def __init__(self):
self.group = pygame.sprite.Group()
self.main_block = Rectangle(400, 30, 'tercoise')
self.group.add(self.main_block)
def __iter__(self):
for sprite in self.group:
yield sprite
class O:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = LittleRectangle(400, 30, 'yellow')
self.bottom = LittleRectangle(400, 70, 'yellow')
for a, b in self.__dict__.items():
if a in ['top', 'bottom']:
self.group.add(b)
def __iter__(self):
for sprite in self.group:
yield sprite
class T:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = Block(430, 30, 'purple')
self.bottom = Rectangle(400, 70, 'purple')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class S:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = LittleRectangle(440, 30, 'green')
self.bottom = LittleRectangle(400, 70, 'green')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class Z:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = LittleRectangle(400, 30, 'red')
self.bottom = LittleRectangle(440, 70, 'red')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class J:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = Block(400, 30, 'blue')
self.bottom = Rectangle(400, 70, 'blue')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class L:
def __init__(self):
self.group = pygame.sprite.Group()
self.top = Block(460, 30, 'brown')
self.bottom = Rectangle(400, 70, 'brown')
self.group.add(self.top)
self.group.add(self.bottom)
def __iter__(self):
for sprite in self.group:
yield sprite
class MainGame:
def __init__(self):
self.image = pygame.image.load('/Users/jamespetullo/Desktop/maxresdefault.jpg')
self.screen = pygame.display.set_mode((1100, 900))
self.quit = False
self.first_group = pygame.sprite.Group()
self.block_types = {'I':I, 'O':O, 'T':T, 'S':S, 'Z':Z, 'J':J, 'L':L}
self.game_clock = 1
self.navigation_y = 0
self.navigation_x = 0
self.current_block = pygame.sprite.Group()
self.current_block = L().group
self.future_block = pygame.sprite.Group()
self.final_blocks = pygame.sprite.Group()
self.flag = False
self.prioraty = collections.deque()
self.current_time = time.time()
def play(self):
pygame.init()
self.screen.fill((255, 255, 255))
while not self.quit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.quit = True
if event.type == pygame.KEYDOWN:
print 'here111'
if event.key == pygame.K_RIGHT:
print "here"
self.navigation_x = 1
if event.key == pygame.K_LEFT:
print "here"
self.navigation_x = -1
for sprite in self.current_block:
sprite.rect.y += 3
sprite.rect.x += 10*self.navigation_x
for sprite in self.future_block:
sprite.rect.y += 3
for group in self.prioraty:
for sprite in group:
sprite.rect.y += 3
self.navigation_x = 0
if self.game_clock%70 == 0:
new_group = self.block_types[random.choice(self.block_types.keys())]()
self.prioraty.append(new_group.group)
'''
for sprite in self.future_block:
if any(pygame.sprite.collide_rect(i, sprite2) for sprite2 in self.final_blocks for i in self.future_block):
for s
'''
for group in self.prioraty:
if any(pygame.sprite.collide_rect(i, sprite2) for sprite2 in self.final_blocks for i in group):
for sprite in group:
self.final_blocks.add(sprite)
for sprite in self.current_block:
if sprite.rect.y >= 700 or any(pygame.sprite.collide_rect(i, sprite2) for sprite2 in self.final_blocks for i in self.current_block):
for sprite in self.current_block:
self.final_blocks.add(sprite)
try:
self.current_block = self.prioraty.popleft()
except IndexError:
print "Congradulations! Game time was {} minutes".format(round(abs(self.current_time-time.time())/60, 2))
sys.exit()
break
self.screen.blit(self.image, (0, 0))
self.current_block.update()
self.current_block.draw(self.screen)
'''
self.future_block.update()
self.future_block.draw(self.screen)
'''
for group in self.prioraty:
group.update()
group.draw(self.screen)
self.final_blocks.update()
self.final_blocks.draw(self.screen)
self.game_clock += 1
pygame.display.flip()
if __name__ == '__main__':
tetris = MainGame()
tetris.play()
| [
"noreply@github.com"
] | Ajax12345.noreply@github.com |
e7bbafa17d265718d8a42bc0251991aeb3da2de2 | 4fe1dc7170d2d44e2c9988c71b08f66d469ee4b8 | /Unit7/ej7.28.py | f48af0397e694f0f8fdcb886a9e279d206aa6a27 | [] | no_license | ftorresi/PythonLearning | 53c0689a6f3e7e219a6314a673a318b25cda82d1 | f2aeb5f81d9090a5a5aa69a8d1203688e9f01adf | refs/heads/master | 2023-01-12T00:40:05.806774 | 2020-11-13T14:33:08 | 2020-11-13T14:33:08 | 267,460,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py |
### Polynomials
#import numpy
class Polynomial:
def __init__(self, coefficients):
self.coeff = coefficients
def __call__(self, x):
"""Evaluate the polynomial."""
return sum(self.coeff[power]*x**power for power in self.coeff)
def __add__(self, other):
"""Return self + other as Polynomial object."""
result = self.coeff.copy()
for power in other.coeff:
if power in result:
result[power]+=other.coeff[power]
else:
result[power] = other.coeff[power]
result_coeff=result.copy()
for power in result:
if result[power]==0:
del result_coeff[power] #delete terms with zero coeff.
return Polynomial(result_coeff) #return a Polynomial (not a dict of coeff.)
def __sub__(self, other):
"""Return self - other as Polynomial object."""
result = self.coeff.copy()
for power in other.coeff:
if power in result:
result[power]-=other.coeff[power]
else:
result[power] = -other.coeff[power]
result_coeff=result.copy()
for power in result:
if result[power]==0:
del result_coeff[power] #delete terms with zero coeff.
return Polynomial(result_coeff) #return a Polynomial (not a dict of coeff.)
def __mul__(self, other):
c = self.coeff
d = other.coeff
result={}
for i in c:
for j in d:
k=i+j
if k in result:
result[k]+=c[i]*d[j]
else:
result[k]=c[i]*d[j]
return Polynomial(result)
def test_Polynomial():
p1 = Polynomial({4: 1, 2: -2, 0: 3})
p2 = Polynomial({0: 4, 1: 3})
success = (p1(2)-11)<1e-14
assert success, "Bug in evaluating values"
success = (p1(-10)-9803)<1e-14
assert success, "Bug in evaluating values"
success = (p2(2)-10)<1e-14
assert success, "Bug in evaluating values"
success = (p2(-10)+26)<1e-14
assert success, "Bug in evaluating values"
p3 = p1 + p2
p3_exact = Polynomial({0: 7, 1: 3,2: -2, 4: 1})
msg = 'p1 = %s, p2 = %s\np3=p1+p2 = %s\nbut wrong p3 = %s'%(p1, p2, p3_exact, p3)
assert p3.coeff == p3_exact.coeff, msg
## Note __add__ applies lists only, here with integers, so
## == for comparing lists is not subject to round-off errors
p4 = p1*p2
p4_exact = Polynomial({0: 12, 1: 9, 2: -8, 3: -6, 4: 4, 5: 3})
msg = 'p1 = %s, p2 = %s\np4=p1*p2 = %s\ngot wrong p4 = %s'%(p1, p2, p4_exact, p4)
assert p4.coeff == p4_exact.coeff, msg
p5=p1-p2
p5_exact = Polynomial({0: -1, 1: -3,2: -2, 4: 1})
msg = 'p1 = %s, p2 = %s\np5=p1-p2 = %s\nbut wrong p5 = %s'%(p1, p2, p5_exact, p5)
assert p5.coeff == p5_exact.coeff, msg
p6=p2-p1
p6_exact = Polynomial({0: 1, 1: 3,2: 2, 4: -1})
msg = 'p1 = %s, p2 = %s\np6=p2-p1 = %s\nbut wrong p6 = %s'%(p1, p2, p6_exact, p6)
assert p6.coeff == p6_exact.coeff, msg
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'verify':
test_Polynomial()
| [
"noreply@github.com"
] | ftorresi.noreply@github.com |
7cda5cdd83a59146eb5c1d6c5eb8be5261114aaf | c4e05230949efbd1ef858839850520ee94a87a58 | /musicbingo/gui/dialogbase.py | 0fb81331348d5a628a6d5e765f7eb7ed4df5e60c | [] | no_license | asrashley/music-bingo | bd33b883da9b6f88df506860475861daea63c6fb | f49d26900a10593a6f993b82d8d782b2e7367f84 | refs/heads/main | 2023-07-20T11:15:47.696132 | 2023-06-29T09:59:51 | 2023-07-05T16:48:41 | 125,717,777 | 1 | 1 | null | 2023-08-28T17:28:04 | 2018-03-18T11:26:17 | Python | UTF-8 | Python | false | false | 3,461 | py | """
A base class for creating dialog boxes
This class is based upon the code from:
http://effbot.org/tkinterbook/tkinter-dialog-windows.htm
"""
from abc import ABC, abstractmethod
from typing import Any, Optional, Protocol, Union
import tkinter as tk # pylint: disable=import-error
from .panel import Panel
class Focusable(Protocol):
"""
Interface for an object that supports focus_set
"""
def focus_set(self) -> None:
"""
Set this object as focus
"""
class DialogBase(tk.Toplevel, ABC):
"""
Base class for dialog boxes
"""
NORMAL_BACKGROUND = '#FFF'
ALTERNATE_BACKGROUND = '#BBB'
NORMAL_FOREGROUND = "#343434"
ALTERNATE_FOREGROUND = "#505024"
TYPEFACE = Panel.TYPEFACE
def __init__(self, parent: tk.Tk, title: str, height: Union[str, float] = 0,
width: Union[str, float] = 0):
super().__init__(parent, width=width, height=height)
self.transient(parent)
self.title(title)
self.parent = parent
self.result: Optional[Any] = None
if height and width:
self.geometry(f"{width}x{height}")
body = tk.Frame(self, height=height, width=width)
else:
body = tk.Frame(self)
focus = self.body(body)
if focus:
self.initial_focus = focus
else:
self.initial_focus = self
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry(f"+{parent.winfo_rootx()+50}+{parent.winfo_rooty()+50}")
self.initial_focus.focus_set()
self.wait_window(self)
@abstractmethod
def body(self, frame: tk.Frame) -> Optional[Focusable]:
"""
create dialog body. return widget that should have
initial focus.
"""
return None
def buttonbox(self):
"""
add standard button box.
override if you don't want the standard buttons
"""
box = tk.Frame(self)
btn = tk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
btn.pack(side=tk.LEFT, padx=5, pady=5)
btn = tk.Button(box, text="Cancel", width=10, command=self.cancel)
btn.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
# pylint: disable=invalid-name, unused-argument
def ok(self, event=None):
"""
called when ok button is pressed
"""
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
# pylint: disable=unused-argument
def cancel(self, event=None):
"""
called when ok or cancel buttons are pressed, or window is closed
"""
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def validate(self) -> bool:
"""
Validate the fields in this dialog box
"""
return True
@abstractmethod
def apply(self) -> None:
"""
Called when OK button is pressed just before the dialog is
closed. Used to make use of the fields in the dialog before it
is closed.
"""
# pylint: disable=unnecessary-pass
pass
| [
"alex@ashley-family.net"
] | alex@ashley-family.net |
94806089d8e2bf6b6faa81a9841e2b0dfc88baf2 | cbedb18df0aaac810aeea87a2273edb15c1cf899 | /mixed bag/day9-10/394. Decode String (straight sol, but not easy to write).py | cb739c9f859f998c3e95e8efa8d9fe182117b893 | [] | no_license | kanglicheng/CodeBreakersCode | 71b833bb9f4c96d520c26f0044365dc62137a940 | 31f7f730227a0e10951e7468bad1b995cf2eafcb | refs/heads/master | 2023-08-07T20:32:05.267695 | 2020-09-14T14:36:25 | 2020-09-14T14:36:25 | 265,978,034 | 0 | 0 | null | 2020-05-22T00:05:29 | 2020-05-22T00:05:29 | null | UTF-8 | Python | false | false | 1,326 | py | class Solution:
def decodeString(self, s: str) -> str:
def rec(s, l, r, pair):
res = ""
i = l
num = 0
while i < r:
curChar = s[i]
if curChar.isnumeric():
j = i
numStr = ""
while s[j].isnumeric():
numStr += s[j]
j += 1
i = j
num = int(numStr)
elif s[i] == '[':
subStr = rec(s, i + 1, pair[i], pair)
for j in range(0, num):
res += subStr
i = pair[i] + 1
else:
res += s[i]
i += 1
return res
n = len(s)
stack, pair = [], dict()
for i in range(0, n):
if s[i] == '[':
stack.append(i)
elif s[i] == ']':
pair[stack.pop()] = i
return rec(s, 0, n, pair)
| [
"56766457+Wei-LiHuang@users.noreply.github.com"
] | 56766457+Wei-LiHuang@users.noreply.github.com |
92595caed1dd7131190d5a6ceabcea4944d68c2e | b4c6013f346e178222cc579ede4da019c7f8c221 | /src/main/python/idlelib/paragraph.py | f11bdaeb77ac38beb420366597c89ff90ac062fe | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"GPL-1.0-or-later",
"LicenseRef-scancode-unicode"
] | permissive | cafebabepy/cafebabepy | e69248c4f3d9bab00e93ee749d273bc2c9244f8d | 4ab0e67b8cd79f2ca7cab6281bc811d3b9bc69c1 | refs/heads/develop | 2022-12-09T21:14:56.651792 | 2019-07-01T09:05:23 | 2019-07-01T09:05:23 | 90,854,936 | 9 | 1 | BSD-3-Clause | 2018-01-02T02:13:51 | 2017-05-10T11:05:11 | Java | UTF-8 | Python | false | false | 7,277 | py | """Extension to format a paragraph or selection to a max width.
Does basic, standard text formatting, and also understands Python
comment blocks. Thus, for editing Python source code, this
extension is really only suitable for reformatting these comment
blocks or triple-quoted strings.
Known problems with comment reformatting:
* If there is a selection marked, and the first line of the
selection is not complete, the block will probably not be detected
as comments, and will have the normal "text formatting" rules
applied.
* If a comment block has leading whitespace that mixes tabs and
spaces, they will not be considered part of the same block.
* Fancy comments, like this bulleted list, aren't handled :-)
"""
import re
from idlelib.config import idleConf
class FormatParagraph:
menudefs = [
('format', [ # /s/edit/format dscherer@cmu.edu
('Format Paragraph', '<<format-paragraph>>'),
])
]
def __init__(self, editwin):
self.editwin = editwin
def close(self):
self.editwin = None
def format_paragraph_event(self, event, limit=None):
"""Formats paragraph to a max width specified in idleConf.
If text is selected, format_paragraph_event will start breaking lines
at the max width, starting from the beginning selection.
If no text is selected, format_paragraph_event uses the current
cursor location to determine the paragraph (lines of text surrounded
by blank lines) and formats it.
The length limit parameter is for testing with a known value.
"""
if limit is None:
# The default length limit is that defined by pep8
limit = idleConf.GetOption(
'extensions', 'FormatParagraph', 'max-width',
type='int', default=72)
text = self.editwin.text
first, last = self.editwin.get_selection_indices()
if first and last:
data = text.get(first, last)
comment_header = get_comment_header(data)
else:
first, last, comment_header, data = \
find_paragraph(text, text.index("insert"))
if comment_header:
newdata = reformat_comment(data, limit, comment_header)
else:
newdata = reformat_paragraph(data, limit)
text.tag_remove("sel", "1.0", "end")
if newdata != data:
text.mark_set("insert", first)
text.undo_block_start()
text.delete(first, last)
text.insert(first, newdata)
text.undo_block_stop()
else:
text.mark_set("insert", last)
text.see("insert")
return "break"
def find_paragraph(text, mark):
"""Returns the start/stop indices enclosing the paragraph that mark is in.
Also returns the comment format string, if any, and paragraph of text
between the start/stop indices.
"""
lineno, col = map(int, mark.split("."))
line = text.get("%d.0" % lineno, "%d.end" % lineno)
# Look for start of next paragraph if the index passed in is a blank line
while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
first_lineno = lineno
comment_header = get_comment_header(line)
comment_header_len = len(comment_header)
# Once start line found, search for end of paragraph (a blank line)
while get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
last = "%d.0" % lineno
# Search back to beginning of paragraph (first blank line before)
lineno = first_lineno - 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
while lineno > 0 and \
get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno - 1
line = text.get("%d.0" % lineno, "%d.end" % lineno)
first = "%d.0" % (lineno+1)
return first, last, comment_header, text.get(first, last)
# This should perhaps be replaced with textwrap.wrap
def reformat_paragraph(data, limit):
"""Return data reformatted to specified width (limit)."""
lines = data.split("\n")
i = 0
n = len(lines)
while i < n and is_all_white(lines[i]):
i = i+1
if i >= n:
return data
indent1 = get_indent(lines[i])
if i+1 < n and not is_all_white(lines[i+1]):
indent2 = get_indent(lines[i+1])
else:
indent2 = indent1
new = lines[:i]
partial = indent1
while i < n and not is_all_white(lines[i]):
# XXX Should take double space after period (etc.) into account
words = re.split(r"(\s+)", lines[i])
for j in range(0, len(words), 2):
word = words[j]
if not word:
continue # Can happen when line ends in whitespace
if len((partial + word).expandtabs()) > limit and \
partial != indent1:
new.append(partial.rstrip())
partial = indent2
partial = partial + word + " "
if j+1 < len(words) and words[j+1] != " ":
partial = partial + " "
i = i+1
new.append(partial.rstrip())
# XXX Should reformat remaining paragraphs as well
new.extend(lines[i:])
return "\n".join(new)
def reformat_comment(data, limit, comment_header):
"""Return data reformatted to specified width with comment header."""
# Remove header from the comment lines
lc = len(comment_header)
data = "\n".join(line[lc:] for line in data.split("\n"))
# Reformat to maxformatwidth chars or a 20 char width,
# whichever is greater.
format_width = max(limit - len(comment_header), 20)
newdata = reformat_paragraph(data, format_width)
# re-split and re-insert the comment header.
newdata = newdata.split("\n")
# If the block ends in a \n, we dont want the comment prefix
# inserted after it. (Im not sure it makes sense to reformat a
# comment block that is not made of complete lines, but whatever!)
# Can't think of a clean solution, so we hack away
block_suffix = ""
if not newdata[-1]:
block_suffix = "\n"
newdata = newdata[:-1]
return '\n'.join(comment_header+line for line in newdata) + block_suffix
def is_all_white(line):
"""Return True if line is empty or all whitespace."""
return re.match(r"^\s*$", line) is not None
def get_indent(line):
"""Return the initial space or tab indent of line."""
return re.match(r"^([ \t]*)", line).group()
def get_comment_header(line):
"""Return string with leading whitespace and '#' from line or ''.
A null return indicates that the line is not a comment line. A non-
null return, such as ' #', will be used to find the other lines of
a comment block with the same indent.
"""
m = re.match(r"^([ \t]*#*)", line)
if m is None: return ""
return m.group(1)
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_paragraph',
verbosity=2, exit=False)
| [
"zh1bvtan1@gmail.com"
] | zh1bvtan1@gmail.com |
312dd6aaf8ba2a988707f09b3851cb85fa7bc812 | 2387cd7657d82b3102e7f8361496307c5f49a534 | /设计模式/python/singleton.py | b0ed87bf24fba431ef90934fc234f64cfcf4ba84 | [] | no_license | kelele67/ReadBooks | da36b25b01d008b2732ad4673e6b676ac7f25027 | 1cf5fc42e1a9edc32971fbcadb64b4b7a84481ad | refs/heads/master | 2021-01-19T14:48:56.932194 | 2017-10-16T10:11:35 | 2017-10-16T10:11:35 | 100,925,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # import threading
# class Singleton(object):
# """Singleton"""
# instance = None
# lock = threading.RLock()
# @classmethod
# def __new__(cls):
# if cls.instance is None:
# cls.lock.acquire()
# if cls.instance is None:
# cls.instance = super(Singleton, cls).__new__(cls)
# cls.lock.release()
# return cls.instance
# if __name__ == '__main__':
# instance1 = Singleton()
# instance2 = Singleton()
# print (id(instance1) == id(instance2))
# class Singleton(object):
# def __new__(cls):
# if not hasattr(cls, '_instance'):
# cls._instance = super(Singleton, cls).__new__(cls)
# return cls._instance
# if __name__ == '__main__':
# class A(Singleton):
# def __init__(self, s):
# self.s = s
# a = A('apple')
# b = A('banana')
# print (id(a), a.s)
# print (id(b), b.s)
| [
"kingand67@outlook.com"
] | kingand67@outlook.com |
06c38aa52a024c092191fe02628c564ccab24845 | e92a3d0fb77120be99de6040cb6cd34eda0a95f4 | /Ветки в Git, словари, кортежи и множества/code/delete_all-words.py | 0f30acdcfd066b2f9022bb81ba363d691eac67d1 | [] | no_license | Python18Academy/python_first_level | 495f85631f5afc737aa156ef8ca0ea307340c322 | 9ce490da3108474b135a17086f4d11f2a3bbbe55 | refs/heads/master | 2023-09-04T17:00:36.920987 | 2021-03-31T18:44:37 | 2021-03-31T18:44:37 | 331,934,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # я знаю что вы люите подобные списки
spisok = ['skovorodka', 'stolick', 'istoria', 'skovorodka', 'stena', 'kartina']
while 'skovorodka' in spisok:
spisok.remove('skovorodka')
print(spisok)
# выведет только обрезанный список
| [
"isakura313@gmail.com"
] | isakura313@gmail.com |
6b5e7cbb93508bf3bf78ec609a82b00d5bd0e6c9 | 742f1c8301264d4f06fc1d389157613c57614b7f | /web_test/web.py | 3d455f2d3970d34edb9732ad9174c6b8a2f4bab2 | [
"MIT"
] | permissive | DieMyDarling/python-web-test | 93fe9be2fb372545346621a0a6bd5b3961eb9af4 | 31a2beb82c8ad50df481a7ce0541813a2088a2b6 | refs/heads/master | 2022-11-23T10:34:44.072430 | 2020-07-30T18:12:46 | 2020-07-30T18:12:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | from web_test.pages.duckduckgo import Duckduckgo
from web_test.pages.ecosia import Ecosia
from web_test.pages.github import Github
from web_test.pages.google import Google
"""
This module is optional.
Usually it makes sense to call it `app.py`,
but in the context of this template project, our app is "all web",
and the word "web" is already a good name describing exactly what we want.
The idea is to provide a one entry point to all PageObjects
So you can import just this entry point in your test:
from web_test.pages import web
and then fluently access any page:
web.ecosia
# ...
web.searchencrypt
# ...
web.duckduckgo
instead of direct import:
from web_test.pages.ecosia import ecosia
from web_test.pages.searchencrypt import searchencrypt
from web_test.pages.duckduckgo import duckduckgo
ecosia
# ...
searchencrypt
# ...
duckduckgo
Probably instead of:
web_test/web.py
you can use any of:
web_test/pages/web.py
web_test/pages/__init__.py
we type hint variables below to allow better IDE support,
e.g. for Quick Fix feature...
"""
duckduckgo: Duckduckgo = Duckduckgo()
ecosia: Ecosia = Ecosia()
google: Google = Google()
"""
searchencrypt is "PageModule" not "PageObject"
that's we don't have to introduce a new variable for page's object
just an import is enough
There is one nuance though...
If we want the IDE in case of "quick fixing imports" to
show for us ability to directly import searchencrypt from web.py
then we have to do something like this:
from web_test.pages import searchencrypt as _searchencrypt
searchencrypt = _searchencrypt
But probably you will never need it;)
Hence keep things simple;)
"""
from web_test.pages import searchencrypt
github: Github = Github()
| [
"yashaka@gmail.com"
] | yashaka@gmail.com |
5e71698c0e44077c937447562f247b876aac103f | 625a3b84b86df1b0a61a7088373094e481b7502e | /simple_shop/wsgi.py | 8e65b23f6d90e12dc890866495e35847af8f8601 | [] | no_license | muremwa/simple-shop-api | 868dc4747fbdb0e6b3eca18969c373c2dd056b4f | ab96e04030de4f2833a4ede8834c4bce393e4528 | refs/heads/master | 2022-05-09T23:43:28.587469 | 2020-04-16T04:57:03 | 2020-04-16T04:57:03 | 256,277,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for simple_shop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simple_shop.settings')
application = get_wsgi_application()
| [
"danmburu254@gmail.com"
] | danmburu254@gmail.com |
17f12258192b8825a2fe6d30320ad544966d0f2b | 085488720112922ff3aed15f99f3c93911425c4a | /vesper/old_bird/old_bird_detector_redux_1_0.py | 624dd8d95f553d5433cceece3e7c4f84b8249de6 | [
"MIT"
] | permissive | HaroldMills/Vesper | 0b61d18bc241af22bfc251088fc87d72add6367b | ec92fe5231f54336499db189a3bbc6cb08a19e61 | refs/heads/master | 2023-07-05T22:45:27.316498 | 2023-07-04T11:58:14 | 2023-07-04T11:58:14 | 19,112,486 | 49 | 6 | MIT | 2023-02-14T16:09:19 | 2014-04-24T14:55:34 | Python | UTF-8 | Python | false | false | 25,097 | py | """
Module containing reimplementations of Old Bird Tseep and Thrush detectors.
The original detectors were implemented in the late 1990's by Steve Mitchell
and Bill Evans using the MathWorks' Simulink and Real-Time Workshop. The
reimplementations are writen in Python and make use of NumPy and SciPy.
The original detectors ran only on Windows, could process only input
sampled at 22050 hertz, and could process only one file at a time on a
given computer. The reimplementations remove all of these restrictions.
"""
import math
import numpy as np
import scipy.linalg as linalg
import scipy.signal as signal
from vesper.util.bunch import Bunch
_OLD_FS = 22050.
"""
the fixed sample rate of the original Old Bird detectors, in hertz.
The reimplemented detectors can operate on input of a variety of sample
rates. We use the sample rate of the original detectors only to convert
detector settings that were originally specified in units of sample
periods to units of seconds.
"""
_TSEEP_SETTINGS = Bunch(
filter_f0=6000, # hertz
filter_f1=10000, # hertz
filter_bw=100, # hertz
filter_length=100, # taps
integration_time=2000 / _OLD_FS, # samples
ratio_delay=.02, # seconds
ratio_threshold=2, # dimensionless
min_duration=.100, # seconds
max_duration=.400, # seconds
initial_padding=3000 / _OLD_FS, # seconds
final_padding=0 / _OLD_FS, # seconds
suppressor_count_threshold=15, # clips
suppressor_period=20 # seconds
)
_THRUSH_SETTINGS = Bunch(
filter_f0=2800, # hertz
filter_f1=5000, # hertz
filter_bw=100, # hertz
filter_length=100, # taps
integration_time=4000 / _OLD_FS, # samples
ratio_delay=.02, # seconds
ratio_threshold=1.3, # dimensionless
min_duration=.100, # seconds
max_duration=.400, # seconds
initial_padding=5000 / _OLD_FS, # seconds
final_padding=0 / _OLD_FS, # seconds
suppressor_count_threshold=10, # clips
suppressor_period=20 # seconds
)
class _Detector:
"""
Reimplementation of Old Bird transient detector.
The original Old Bird Tseep and Thrush detectors were implemented in
the late 1990's by Steve Mitchell and Bill Evans using the MathWorks'
Simulink and Real-Time Workshop. The detectors were based on a transient
detection algorithm developed in 1994 by Harold Mills at the Bioacoustics
Research Program of the Cornell Lab of Ornithology. The Tseep and Thrush
detectors used the same basic transient detection algorithm, but with
different settings.
The Old Bird detectors had a number of limitations. For example, they
ran only on Windows, they assumed an input sample rate of 22050 hertz,
and they could only run on one input file on a given computer at once.
This reimplementation removes those limitations.
An instance of this class operates on a single audio channel. It has a
`detect` method that takes a NumPy array of samples. The method can be
called repeatedly with consecutive sample arrays. The `complete_detection`
method should be called after the final call to the `detect` method.
During detection, the detector notifies a listener each time it detects
a clip. The listener must have a `process_clip` method that accepts two
arguments, a clip start index and length.
See the `_TSEEP_SETTINGS` and `_THRUSH_SETTINGS` objects above for
settings that make a `_Detector` behave like the original Old Bird
Tseep and Thrush detectors in the sense that it will detect the
same clips (almost: there remain some slight differences between
both the number and extent of clips produced by the old and new
detectors, but for most purposes the old and new detectors are
essentially identical). Note, however, that when a `_Detector`
produces a clip it calls its listener with the start index and the
length of the clip, while the Old Bird detector saves the clip to a
.wav file. The `TseepDetector` and `ThrushDetector` classes of this
module subclass the `_Detector` class with fixed settings, namely
`_TSEEP_SETTINGS` AND `_THRUSH_SETTINGS`, respectively.
This detector reimplementation was developed and tested initially in
the GitHub repository https://github.com/HaroldMills/Vesper-Tseep-Thrush,
and then copied to the Vesper repository for further development and
testing. See the README of the Vesper-Tseep-Thrush repository for more
about the reimplementation effort.
"""
def __init__(self, settings, sample_rate, listener):
self._settings = settings
self._sample_rate = sample_rate
self._listener = listener
self._signal_processor = self._create_signal_processor()
self._series_processor = self._create_series_processor()
self._num_samples_processed = 0
self._recent_samples = np.array([], dtype='float')
self._initial_samples_repeated = False
def _create_signal_processor(self):
coefficients = self._design_filter()
s = self.settings
integration_length = int(round(s.integration_time * self.sample_rate))
# We use `math.floor` here rather than `round` since the Simulink
# .mdl files we have access to suggest that the original Old Bird
# detectors use MATLAB's `fix` function, which rounds towards zero.
delay = math.floor(s.ratio_delay * self.sample_rate)
processors = [
_FirFilter(coefficients),
_Squarer(),
_Integrator(integration_length),
_DelayAndDivideProcessor(delay),
_ThresholdCrossingMarker(s.ratio_threshold)
]
return _SignalProcessorChain(processors)
def _design_filter(self):
s = self.settings
f0 = s.filter_f0
f1 = s.filter_f1
bw = s.filter_bw
fs2 = self.sample_rate / 2
bands = np.array([0, f0 - bw, f0, f1, f1 + bw, fs2]) / fs2
desired = np.array([0, 0, 1, 1, 0, 0])
return _firls(s.filter_length, bands, desired)
def _create_series_processor(self):
s = self.settings
sample_rate = self.sample_rate
# We use `math.floor` here rather than `round` since the Simulink
# .mdl files we have access to suggest that the original Old Bird
# detectors use MATLAB's `fix` function, which rounds toward zero.
min_length = int(math.floor(s.min_duration * sample_rate))
max_length = int(math.floor(s.max_duration * sample_rate))
initial_padding = int(round(s.initial_padding * sample_rate))
final_padding = int(round(s.final_padding * sample_rate))
suppressor_period = int(round(s.suppressor_period * sample_rate))
processors = [
_TransientFinder(min_length, max_length),
_TransientPadder(initial_padding, final_padding),
_ClipMerger(),
_ClipSuppressor(s.suppressor_count_threshold, suppressor_period)
]
return _SeriesProcessorChain(processors)
@property
def settings(self):
return self._settings
@property
def sample_rate(self):
return self._sample_rate
@property
def listener(self):
return self._transient_finder.listener
def detect(self, samples):
augmented_samples = np.concatenate((self._recent_samples, samples))
if len(augmented_samples) <= self._signal_processor.latency:
# don't yet have enough samples to fill processing pipeline
self._recent_samples = augmented_samples
else:
# have enough samples to fill processing pipeline
# Run signal processors on samples.
crossing_samples = \
self._signal_processor.process(augmented_samples)
# Get transient index offset.
offset = self._num_samples_processed
if not self._initial_samples_repeated:
offset += self._signal_processor.latency
self._initial_samples_repeated = True
# Add one to offset for agreement with original Old Bird detector.
offset += 1
crossings = self._get_crossings(crossing_samples, offset)
clips = self._series_processor.process(crossings)
self._notify_listener(clips)
# Save trailing samples for next call to this method.
self._recent_samples = \
augmented_samples[-self._signal_processor.latency:]
self._num_samples_processed += len(samples)
def _get_crossings(self, crossing_samples, offset):
# Find indices of outward-going threshold crossing events.
rise_indices = np.where(crossing_samples == 1)[0] + offset
fall_indices = np.where(crossing_samples == -2)[0] + offset
return sorted(
[(i, True) for i in rise_indices] +
[(i, False) for i in fall_indices])
def _notify_listener(self, clips):
for start_index, length in clips:
self._listener.process_clip(start_index, length)
def complete_detection(self):
"""
Completes detection after the `detect` method has been called
for all input.
"""
# Send a final falling crossing to the series processor to
# terminate a transient that may have started more than the
# minimum clip duration before the end of the input but for
# which for whatever reason there has not yet been a fall.
fall = (self._num_samples_processed, False)
clips = self._series_processor.complete_processing([fall])
self._notify_listener(clips)
if hasattr(self._listener, 'complete_processing'):
self._listener.complete_processing()
class _SignalProcessor:
def __init__(self, latency):
self._latency = latency
@property
def latency(self):
return self._latency
def process(self, x):
raise NotImplementedError()
class _FirFilter(_SignalProcessor):
def __init__(self, coefficients):
super().__init__(len(coefficients) - 1)
self._coefficients = coefficients
def process(self, x):
return signal.fftconvolve(x, self._coefficients, mode='valid')
class _Squarer(_SignalProcessor):
def __init__(self):
super().__init__(0)
def process(self, x):
return x * x
class _Integrator(_FirFilter):
# An alternative to making this class an `_FirFilter` subclass would
# be to use the `np.cumsum` function to compute the cumulative sum
# of the input and then the difference between the result and a
# delayed version of the result. That approach is more efficient
# but it has numerical problems for sufficiently long inputs
# (the cumulative sum of the squared samples grows ever larger, but
# the samples do not, so you'll eventually start throwing away sample
# bits), so I have chosen not to use it. An alternative would be to use
# Cython or Numba or something like that to implement the integration
# in a way that is both faster and accurate for arbitrarily long inputs.
def __init__(self, integration_length):
coefficients = np.ones(integration_length) / integration_length
super().__init__(coefficients)
class _DelayAndDivideProcessor(_SignalProcessor):
def __init__(self, delay):
super().__init__(delay - 1)
self._delay = delay
def process(self, x):
return x[self._delay:] / x[:-self._delay]
class _ThresholdCrossingMarker(_SignalProcessor):
def __init__(self, threshold):
super().__init__(1)
self._threshold = threshold
def process(self, x):
# Compare input to threshold and its inverse.
y = np.zeros(len(x))
y[x > self._threshold] = 1
y[x < 1. / self._threshold] = -2
# Take differences to mark where the input crosses the threshold
# and its inverse. The four types of crossing will be marked as
# follows:
#
# 1 - upward crossing of threshold
# -1 - downward crossing of threshold
# 2 - upward crossing of threshold inverse
# -2 - downward crossing of threshold inverse
return np.diff(y)
class _SignalProcessorChain(_SignalProcessor):
def __init__(self, processors):
latency = sum([p.latency for p in processors])
super().__init__(latency)
self._processors = processors
def process(self, x):
for processor in self._processors:
x = processor.process(x)
return x
class _SeriesProcessor:
def process(self, items):
raise NotImplementedError()
def complete_processing(self, items):
return self.process(items)
_STATE_DOWN = 0
_STATE_UP = 1
_STATE_HOLDING = 2
class _TransientFinder(_SeriesProcessor):
"""Finds transients in a series of threshold crossings."""
def __init__(self, min_length, max_length):
self._min_length = min_length
self._max_length = max_length
self._state = _STATE_DOWN
self._start_index = 0
"""
index of start of current transient.
The value of this attribute only has meaning for the up and holding
states. It does not mean anything for the down state.
"""
def process(self, crossings):
transients = []
emit = transients.append
for index, rise in crossings:
if self._state == _STATE_DOWN:
if rise:
# rise while down
# Start new transient.
self._start_index = index
self._state = _STATE_UP
# Do nothing for fall while down.
elif self._state == _STATE_UP:
if rise:
# rise while up
if index == self._start_index + self._max_length:
# rise just past end of maximal transient
# Emit maximal transient.
emit((self._start_index, self._max_length))
# Return to down state. It seems a little odd that
# a rise would return us to the down state, but
# that is what happens in the original Old Bird
# detector (see line 252 of the original detector
# source code file splimflipflop.c), and our goal
# here is to reimplement that detector. This code
# should seldom execute on real inputs, since it
# should be rare for two consecutive rises to occur
# precisely `self._max_length` samples apart.
self._state = _STATE_DOWN
elif index > self._start_index + self._max_length:
# rise more than one sample past end of maximal
# transient
# Emit maximal transient
emit((self._start_index, self._max_length))
# Start new transient.
self._start_index = index
# Do nothing for rise before end of maximal transient.
else:
# fall while up
if index < self._start_index + self._min_length:
# fall before end of minimal transient
self._state = _STATE_HOLDING
else:
# fall at or after end of minimal transient
length = index - self._start_index
# Truncate transient if after end of maximal transient.
if length > self._max_length:
length = self._max_length
# Emit transient.
emit((self._start_index, length))
self._state = _STATE_DOWN
else:
# holding after short transient
if rise:
# rise while holding after short transient
if index > self._start_index + self._min_length:
# rise follows end of minimal transient by at least
# one non-transient sample
# Emit minimal transient.
emit((self._start_index, self._min_length))
# Start new transient.
self._start_index = index
self._state = _STATE_UP
else:
# fall while holding after short transient
if index >= self._start_index + self._min_length:
# fall at or after end of minimal transient
# Emit minimal transient.
emit((self._start_index, self._min_length))
self._state = _STATE_DOWN
# Do nothing for fall before end of minimal transient.
return transients
class _TransientPadder(_SeriesProcessor):
def __init__(self, initial_padding, final_padding):
self._initial_padding = initial_padding
self._final_padding = final_padding
def process(self, transients):
clips = []
for start_index, length in transients:
start_index -= self._initial_padding
length += self._initial_padding + self._final_padding
if start_index < 0:
length += start_index
start_index = 0
clips.append((start_index, length))
return clips
class _ClipMerger(_SeriesProcessor):
def __init__(self):
self._prev_start_index = None
self._prev_end_index = None
def process(self, clips):
merged_clips = []
for start_index, length in clips:
if self._prev_start_index is None:
# first clip
self._remember_clip(start_index, length)
elif start_index <= self._prev_end_index:
# not first clip, and new clip overlaps or immediately
# follows previous clip
# Merge new clip into previous clip.
self._prev_end_index = start_index + length
else:
# not first clip, new clip does not overlap previous clip
self._append_previous_clip(merged_clips)
self._remember_clip(start_index, length)
return merged_clips
def _remember_clip(self, start_index, length):
self._prev_start_index = start_index
self._prev_end_index = start_index + length
def _append_previous_clip(self, clips):
prev_length = self._prev_end_index - self._prev_start_index
clips.append((self._prev_start_index, prev_length))
def complete_processing(self, clips):
merged_clips = self.process(clips)
if self._prev_start_index is not None:
# one more clip to emit
self._append_previous_clip(merged_clips)
return merged_clips
class _ClipSuppressor(_SeriesProcessor):
def __init__(self, count_threshold, period):
self._count_threshold = count_threshold
self._period = period
self._recent_start_indices = []
def process(self, clips):
unsuppressed_clips = []
indices = self._recent_start_indices
for start_index, length in clips:
# Remember clip.
indices.append(start_index)
# Discard oldest clip if there are more than count threshold.
if len(indices) > self._count_threshold:
indices.pop(0)
if len(indices) == self._count_threshold:
# have enough clips to test for suppression
delta = (indices[-1] - indices[0])
if delta < self._period:
# got more than `self._count_threshold` clips in the
# last `self._period` samples
# Suppress clip.
continue
# If we get here, the clip was not suppressed.
unsuppressed_clips.append((start_index, length))
return unsuppressed_clips
class _SeriesProcessorChain(_SeriesProcessor):
def __init__(self, processors):
self._processors = processors
def process(self, items):
for processor in self._processors:
items = processor.process(items)
return items
def complete_processing(self, items):
for processor in self._processors:
items = processor.complete_processing(items)
return items
class TseepDetector(_Detector):
extension_name = 'Old Bird Tseep Detector Redux 1.0'
def __init__(self, sample_rate, listener):
super().__init__(_TSEEP_SETTINGS, sample_rate, listener)
class ThrushDetector(_Detector):
extension_name = 'Old Bird Thrush Detector Redux 1.0'
def __init__(self, sample_rate, listener):
super().__init__(_THRUSH_SETTINGS, sample_rate, listener)
def _firls(numtaps, bands, desired):
"""
Designs an FIR filter that is optimum in a least squares sense.
This function is like `scipy.signal.firls` except that `numtaps`
can be even as well as odd and band weighting is not supported.
"""
# TODO: Add support for band weighting and then submit a pull
# request to improve `scipy.signal.firls`.
numtaps = int(numtaps)
if numtaps % 2 == 1:
return signal.firls(numtaps, bands, desired)
else:
return _firls_even(numtaps, bands, desired)
def _firls_even(numtaps, bands, desired):
# This function implements an algorithm similar to the one of the
# SciPy `firls` function, but for even-length filters rather than
# odd-length ones. See paper notes entitled "Least squares FIR
# filter design for even N" for derivation. The derivation is
# similar to that of Ivan Selesnick's "Linear-Phase FIR Filter
# Design By Least Squares" (available online at
# http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7),
# with due alteration of detail for even filter lengths.
bands.shape = (-1, 2)
desired.shape = (-1, 2)
weights = np.ones(len(desired))
M = int(numtaps / 2)
# Compute M x M matrix Q (actually twice Q).
n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weights)
Q1 = linalg.toeplitz(q[:M])
Q2 = linalg.hankel(q[1:M + 1], q[M:])
Q = Q1 + Q2
# Compute M-vector b.
k = np.arange(M) + .5
e = bands[1]
b = np.diff(e * np.sinc(e * k[:, np.newaxis])).reshape(-1)
# Compute a (actually half a).
a = np.dot(linalg.pinv(Q), b)
# Compute h.
h = np.concatenate((np.flipud(a), a))
return h
| [
"harold.mills@gmail.com"
] | harold.mills@gmail.com |
5218e4c00c5dad5ac8b32aa30d93dff669c14ca5 | ee1bd2a5c88989a43fee1d9b3c85c08d66392502 | /intro_to_statistics/class15_probability.py | 002c1a3723c3ee75f9b05865185001170828f69a | [] | no_license | shasky2014/PythonLearning | 46288bd915466110ee14b5ee3c390ae9b4f67922 | 04c06d06a2c3f1c4e651627fd6b224f55205c06f | refs/heads/master | 2021-06-27T13:14:10.730525 | 2020-10-09T07:54:31 | 2020-10-09T07:54:31 | 252,903,485 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | # Relative Probabilities 1
# Let's suppose we have a fair coin and we flipped it four times.
# p1= 4*0.5*0.5*0.5*0.5
# p2= 0.5
#
# print p1/p2
# FlipPredictor
# A coin is drawn at random from a bag of coins of varying probabilities
# Each coin has the same chance of being drawn
# Your class FlipPredictor will be initialized with a list of the probability of
# heads for each coin. This list of probabilities can be accessed as self.coins
# in the functions you must write. The function update will be called after every
# flip to enable you to update your estimate of the probability of each coin being
# the selected coin. The function pheads may be called and any time and will
# return your best estimate of the next flip landing on heads.
from __future__ import division
class FlipPredictor(object):
def __init__(self, coins):
self.coins = coins
n = len(coins)
self.probs = [1 / n] * n
def pheads(self):
return sum(pcoin*p for pcoin,p in zip(self.coins,self.probs))
# Write a function that returns
# the probability of the next flip being heads
def update(self, result):
pheads = self.pheads()
if result == 'H':
self.probs = [pcoin * p / pheads for pcoin, p in zip(self.coins, self.probs)]
else:
self.probs = [(1 - pcoin) * p / (1 - pheads) for pcoin, p in zip(self.coins, self.probs)]
# Write a function the updates
# the probabilities of flipping each coin
# The code below this line tests your implementation.
# You need not change it
# You may add additional htmlprs cases or otherwise modify if desired
def test(coins, flips):
f = FlipPredictor(coins)
guesses = []
for flip in flips:
f.update(flip)
guesses.append(f.pheads())
return guesses
def maxdiff(l1, l2):
return max([abs(x - y) for x, y in zip(l1, l2)])
testcases = [
(([0.5, 0.4, 0.3], 'HHTH'), [0.4166666666666667, 0.432, 0.42183098591549295, 0.43639398998330553]),
(([0.14, 0.32, 0.42, 0.81, 0.21], 'HHHTTTHHH'),
[0.5255789473684211, 0.6512136991788505, 0.7295055220497553, 0.6187139453483192, 0.4823974597714815,
0.3895729901052968, 0.46081730193074644, 0.5444108434105802, 0.6297110187222278]),
(([0.14, 0.32, 0.42, 0.81, 0.21], 'TTTHHHHHH'),
[0.2907741935483871, 0.25157009005730924, 0.23136284577678012, 0.2766575695593804, 0.3296000585271367,
0.38957299010529806, 0.4608173019307465, 0.5444108434105804, 0.6297110187222278]),
(([0.12, 0.45, 0.23, 0.99, 0.35, 0.36], 'THHTHTTH'),
[0.28514285714285714, 0.3378256513026052, 0.380956725493104, 0.3518717367468537, 0.37500429586037076,
0.36528605387582497, 0.3555106542906013, 0.37479179323540324]),
(([0.03, 0.32, 0.59, 0.53, 0.55, 0.42, 0.65], 'HHTHTTHTHHT'),
[0.528705501618123, 0.5522060353798126, 0.5337142767315369, 0.5521920592821695, 0.5348391689038525,
0.5152373451083692, 0.535385450497415, 0.5168208803156963, 0.5357708613431963, 0.5510509656933194,
0.536055356823069])]
for inputs, output in testcases:
if maxdiff(test(*inputs), output) < 0.001:
print('Correct')
else:
print('Incorrect')
| [
"249398363@qq.com"
] | 249398363@qq.com |
d584073eb7a7fc0392f9fc00e0573e29debd20dd | 66c6df450753acc7c41db5afe66abd35d5018c8c | /cliente Rujel/bin92.py | 76d7a6fac53865a674f5e3fd250b034be7f8ee9a | [] | no_license | hanmiton/CodigoCompletoEncriptacion | a33807d9470b538842751071031c9ce60951260f | efb7898af5d39025e98c82f1f71c8e9633cce186 | refs/heads/master | 2020-03-24T02:03:08.242655 | 2018-07-25T22:41:05 | 2018-07-25T22:41:05 | 142,360,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | import sys
import math
import random
import openpyxl
import xlsxwriter
from time import time
LAMBDA = 16 #security parameter
N = LAMBDA
P = LAMBDA ** 2
Q = LAMBDA ** 5
def principal(m1,m2):
numsA = []
numsB = []
aux = []
numsAEncrypt = []
numsBEncrypt = []
keys= []
doc = openpyxl.load_workbook('cifrado.xlsx')
doc.get_sheet_names()
hoja = doc.get_sheet_by_name('Hoja1')
prueba = []
for fila in hoja.rows:
for columna in fila:
boln1 = bin(int(columna.value))
boln2 = bin(0)
if(len(boln1) > len(boln2)):
print len(boln1) - len(boln2)
for i in range(0, len(boln1) - len(boln2)):
aux.append(0)
boln2 = aux + boln2
else:
print len(boln2) - len(boln1)
for i in range(0, len(boln2) - len(boln1)):
aux.append(0)
boln1 = aux + boln1
key = map(keygen,boln1)
boln1Encrypt = map(encrypt,key,boln1)
#boln2Encrypt = map(encrypt,key,boln2)
numsA.append(boln1)
numsB.append(boln2)
keys.append(key)
numsAEncrypt.append(boln1Encrypt)
return numsAEncrypt
boln1Encrypt = []
boln2Encrypt = []
sumEncrypt = []
mulEnctypt = []
res = []
sumEncrypt = map(add,boln1Encrypt,boln2Encrypt)
strEncriptSum = ''.join(str(e) for e in sumEncrypt)
mulEnctypt = map(mult,boln1Encrypt, boln2Encrypt)
resSuma = map (decrypt, key, sumEncrypt)
strSuma = ''.join(str(e) for e in resSuma)
workbook = xlsxwriter.Workbook('enc1.xlsx')
worksheet = workbook.add_worksheet()
i=2
celda = 'A' + repr(i)
celda2 = 'B' + repr(i)
worksheet.write(celda, strEncriptSum)
worksheet.write(celda2, str(len(sumEncrypt) ))
workbook.close()
decSuma = int(strSuma, 2)
#start_time = time()
resMult = map (decrypt, key, mulEnctypt)
#elapsed_time = time() - start_time
#return elapsed_time
strMult = ''.join(str(e) for e in resMult)
decMult = int(strMult, 2)
return decSuma
def quot(z, p):
# http://stackoverflow.com/questions/3950372/round-with-integer-division
return (z + p // 2) // p
def mod(z, p):
return z - quot(z,p) * p
def keygen(n):
key = random.getrandbits(P)
while(key % 2 == 0):
key = random.getrandbits(P)
return key
def encrypt(key, aBit):
q = random.getrandbits(Q)
m_a = 2 * random.getrandbits(N - 1)
c = key * q + m_a + aBit
return c
def decrypt(key, cipherText):
return mod(cipherText, key) % 2
def add(cipherText1, cipherText2):
return cipherText1 + cipherText2
def mult(cipherText1, cipherText2):
return cipherText1 * cipherText2
def bin(numero):
binario = ""
listaN = []
listaRn = []
if (numero >0):
while (numero >0):
if(numero % 2 ==0):
listaN.append(0)
binario="0"+binario
else:
listaN.append(1)
binario = "1"+ binario
numero = int (math.floor(numero/2))
else:
if (numero ==0):
listaN.append(0)
return listaN
else:
return " no se pudo convertir el numero. ingrese solo numeros positivos"
for i in reversed(listaN):
listaRn.append(i)
return listaRn
if __name__ == '__main__':
principal(m1,m2) | [
"hanmilton_12@outlook.com"
] | hanmilton_12@outlook.com |
0df51dc4002580c130c48d9c16bdcd453f42d795 | 3665e5e6946fd825bb03b3bcb79be96262ab6d68 | /jc/parsers/route.py | 668ea3545e3157b06975c36ee8c9bd699778ec9a | [
"MIT",
"BSD-3-Clause"
] | permissive | philippeitis/jc | a28b84cff7fb2852a374a7f0f41151b103288f26 | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | refs/heads/master | 2021-02-16T05:03:03.022601 | 2020-03-04T16:30:52 | 2020-03-04T16:30:52 | 244,969,097 | 0 | 0 | MIT | 2020-03-08T21:10:36 | 2020-03-04T18:01:38 | null | UTF-8 | Python | false | false | 4,085 | py | """jc - JSON CLI output utility route Parser
Usage:
specify --route as the first argument if the piped input is coming from route
Compatibility:
'linux'
Examples:
$ route -ee | jc --route -p
[
{
"destination": "default",
"gateway": "gateway",
"genmask": "0.0.0.0",
"flags": "UG",
"metric": 100,
"ref": 0,
"use": 0,
"iface": "ens33",
"mss": 0,
"window": 0,
"irtt": 0
},
{
"destination": "172.17.0.0",
"gateway": "0.0.0.0",
"genmask": "255.255.0.0",
"flags": "U",
"metric": 0,
"ref": 0,
"use": 0,
"iface": "docker",
"mss": 0,
"window": 0,
"irtt": 0
},
{
"destination": "192.168.71.0",
"gateway": "0.0.0.0",
"genmask": "255.255.255.0",
"flags": "U",
"metric": 100,
"ref": 0,
"use": 0,
"iface": "ens33",
"mss": 0,
"window": 0,
"irtt": 0
}
]
$ route -ee | jc --route -p -r
[
{
"destination": "default",
"gateway": "gateway",
"genmask": "0.0.0.0",
"flags": "UG",
"metric": "100",
"ref": "0",
"use": "0",
"iface": "ens33",
"mss": "0",
"window": "0",
"irtt": "0"
},
{
"destination": "172.17.0.0",
"gateway": "0.0.0.0",
"genmask": "255.255.0.0",
"flags": "U",
"metric": "0",
"ref": "0",
"use": "0",
"iface": "docker",
"mss": "0",
"window": "0",
"irtt": "0"
},
{
"destination": "192.168.71.0",
"gateway": "0.0.0.0",
"genmask": "255.255.255.0",
"flags": "U",
"metric": "100",
"ref": "0",
"use": "0",
"iface": "ens33",
"mss": "0",
"window": "0",
"irtt": "0"
}
]
"""
import jc.utils
import jc.parsers.universal
class info():
version = '1.0'
description = 'route command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux']
magic_commands = ['route']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
List of dictionaries. Structured data with the following schema:
[
{
"destination": string,
"gateway": string,
"genmask": string,
"flags": string,
"metric": integer,
"ref": integer,
"use": integer,
"mss": integer,
"window": integer,
"irtt": integer,
"iface": string
}
]
"""
for entry in proc_data:
int_list = ['metric', 'ref', 'use', 'mss', 'window', 'irtt']
for key in int_list:
if key in entry:
try:
key_int = int(entry[key])
entry[key] = key_int
except (ValueError):
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = data.splitlines()[1:]
cleandata[0] = cleandata[0].lower()
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
if raw:
return raw_output
else:
return process(raw_output)
| [
"kellyjonbrazil@gmail.com"
] | kellyjonbrazil@gmail.com |
7e3ced9ace84a2042505115765dc9b9879f9ec7e | 55b4fe0a6616b30c128b51a9918605050ce49f6d | /migrate_reverb | 2a0df049882f7873eb0353b8db62d960747cadec | [] | no_license | samhaug/ScS_reverb_setup | 783a4fb7c942a598f18dc6c9e3544aa5e2bbcafe | 05e96b9f871d25a1e7b5e9284083167993f56cec | refs/heads/master | 2021-01-12T03:35:45.657459 | 2017-06-24T17:24:07 | 2017-06-24T17:24:07 | 78,234,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,844 | #!/home/samhaug/anaconda2/bin/python
'''
==============================================================================
File Name : migrate_reverb.py
Purpose : Perform a migration to detect reflection coefficients of mid mantle
discontinuities. Must have access to a lookup table, waveform glossary,
data stripped of zeroth-order discontinuities.
See eq (14) of 'A Study of mid-mantle layering beneath the Western Pacific'
1989, Revenaugh & Jordan.
Creation Date : 14-03-2017
Last Modified : Tue 14 Mar 2017 11:54:11 AM EDT
Created By : Samuel M. Haugland
==============================================================================
'''
import numpy as np
import obspy
import seispy
import h5py
from matplotlib import pyplot as plt
from scipy.signal import correlate
from scipy.signal import tukey
def main():
wvlt_glossary = h5py.File('/home/samhaug/work1/ScS_reverb_sims/wave_glossary/prem_568_FJ_20160130.h5','r')
lkup = h5py.File('/home/samhaug/work1/ScS_reverb_sims/lookup_tables/NA_prem_568_20160130.h5','r')
st = obspy.read('/home/samhaug/work1/ScS_reverb_sims/mineos/prem_568_FJ/st_T.pk')
st.integrate().detrend().integrate().detrend()
st.interpolate(1)
st.filter('bandpass',freqmax=1/15.,freqmin=1/75.,zerophase=True)
st = seispy.data.align_on_phase(st,phase=['ScSScS'],a_min=False)
#st.differentiate()
st.normalize()
for idx,tr in enumerate(st):
st[idx] = seispy.data.phase_window(tr,phase=['ScSScS'],window=(-400,2400))
idx=3
ones = np.ones(len(st[idx].data))
ones[387:425] = 1+(-1*tukey(425-387,0.3))
ones[632:669] = 1+(-1*tukey(669-632,0.3))
ones[1299:1343] = 1+(-1*tukey(1343-1299,0.3))
ones[1561:1600] = 1+(-1*tukey(1600-1561,0.3))
ones[2221:2278] = 1+(-1*tukey(2278-2221,0.3))
ones[2466:2524] = 1+(-1*tukey(2524-2466,0.3))
#plt.plot(st[idx].data)
#plt.plot(ones)
#plt.show()
#st[idx].data *= ones
#depth = np.arange(10,2800,2)
#depth = np.arange(900,1000,10)
depth = np.array([670])
stat = st[idx].stats.station
corr_dict,wave_e,wvlt_len = correlate_sig(st[idx],wvlt_glossary)
R_list = []
for h in depth:
h_R = 0
for keys in corr_dict:
ScS2 = lkup[stat+'/ScS2'][:]
lkup_t = lkup[stat+'/'+keys][:]
shift = int(wvlt_len/2.)-58
h_R += find_R(corr_dict[keys],h,lkup_t,ScS2,shift=shift,data=st[idx].data)/wave_e[keys]
R_list.append(h_R)
plt.plot(np.array(R_list),depth,lw=2)
plt.ylim(depth.max(),depth.min())
plt.axhline(220,color='k')
plt.axhline(400,color='k')
plt.axhline(670,color='k')
plt.xlim(-10,10)
plt.grid()
plt.show()
def correlate_sig(tr,wvlt_glos):
corr_dict = {}
wave_e = {}
for keys in wvlt_glos:
wvlt = wvlt_glos[keys]
corr_sig = correlate(tr.data,wvlt,mode='same')
wave_e[keys] = np.dot(wvlt,wvlt)
corr_dict[keys] = corr_sig
return corr_dict,wave_e,len(wvlt)
def find_R(corr_sig,h,lkup,ScS2,**kwargs):
shift = kwargs.get('shift',0)
data = kwargs.get('data',np.zeros(5))
t = lkup[np.argmin(np.abs(lkup[:,0]-h)),1]
ScS2_time = ScS2[np.argmin(np.abs(lkup[:,0]-h)),1]
plot_corr(t,corr_sig,data,ScS2_time,shift)
try:
r = corr_sig[int(t-ScS2_time+400+shift)]
return r
except IndexError:
return 0
corr *= 1./denominator(wvlt_glos)
def plot_corr(t,corr_sig,data,ScS2_time,shift):
fig,ax = plt.subplots(figsize=(25,6))
ax.plot(corr_sig,lw=2)
ax.plot(data,alpha=0.5,color='k')
ax.axvline(t-ScS2_time+400+shift)
plt.tight_layout()
plt.show()
def denominator(wvlt_glos):
energy = 0
for keys in wvlt_glos:
energy += np.dot(wvlt_glos[keys][...],wvlt_glos[keys][...])
return energy
main()
| [
"samhaug@umich.edu"
] | samhaug@umich.edu | |
5c5a167f3d78f3d568304c19afe7a914241562ad | 9597cb1a23e082cf8950408e7fce72a8beff6177 | /src/pipeline.py | 4cb6a56808ebe9440a34c8c428a9f6d11039dcfd | [] | no_license | xiaomi388/sydw_wsl | 5653c981b3720c6fd496b5a12966bb9cee604878 | 459bbeadb288b4fb65e5d816b19b749bd447df4c | refs/heads/master | 2021-03-24T12:29:22.643163 | 2017-10-04T15:00:33 | 2017-10-04T15:00:33 | 105,512,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | import os
import csv
class reportPipeline(object):
def __init__(self, parameters):
flag = os.path.exists('output.csv')
self.outfile = open('output.csv', 'a')
self.outcsv = csv.writer(self.outfile)
if not flag:
self.outcsv.writerow(parameters)
def save(self, arguments):
self.outcsv.writerow(arguments)
| [
"xiaomi388@gmail.com"
] | xiaomi388@gmail.com |
247fe433152dd7d1247fc2bfb4b7d841a962c1cc | e6ead9c9489c1b97fb63dabb60e8083a76fe7e76 | /program/sandboxv2/server/tcp/components/send.py | bfc4e94cf1b48c7e407623d63777ac08d7d9f29a | [] | no_license | montarion/morsecode | a313471c2ccd40c62fa7249897ff58407c1bb03d | eed29720c1bb6ade102d8e8a39b4b1b188737681 | refs/heads/master | 2021-04-30T11:45:30.576645 | 2018-02-12T15:13:12 | 2018-02-12T15:13:12 | 121,256,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from socket import *
from time import sleep
class send:
def __init__(self, ip):
self.ss = socket(AF_INET, SOCK_STREAM)
self.ss.connect(((ip), 13334))
def send(self, code):
print('ready to send')
self.ss.send(code.encode())
print('sent')
self.ss.close()
| [
"jamirograntsaan@gmail.com"
] | jamirograntsaan@gmail.com |
e4de52a01a45251293a26a1950fbbfb56fc8bd34 | f571590e3c1787d183e00b81c408362e65671f76 | /namestring.py | 0e6c17addc2a6ac9c5fbce45bc29ba1924217a11 | [] | no_license | neymarthan/project1 | 0b3d108dd8eb4b6fa5093525d469d978faf88b88 | 5e07f9dff181bb310f3ce2c7818a8c6787d4b116 | refs/heads/master | 2022-12-26T08:44:53.464398 | 2020-10-06T09:14:29 | 2020-10-06T09:14:29 | 279,528,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | filename=input('Enter the filename: ')
if filename.endswith('.txt'):
print('That is the name of text file.')
elif filename.endswith('.py'):
print('That is the name of a Python source file.')
elif filename.endswith('.doc'):
print('That is the name of word processing document.')
else:
print('Unknown file type.') | [
"INE-02@Admins-iMac-5.local"
] | INE-02@Admins-iMac-5.local |
267efccef26c76fef97df4a0bb5bda3924f48090 | af47797c9518e12a00a8de5a379d5fa27f579c40 | /newbeercellar/login.py | e483db50fe8883ead45c750312921ea650f795fe | [
"MIT"
] | permissive | atlefren/newbeercellar | 29526154e74b4c613061c01eb59815e08cf03f1b | fcf5a174f45a3d21ce9613b88977a88d9bae4aa5 | refs/heads/master | 2021-01-20T08:47:03.976431 | 2015-04-26T16:14:42 | 2015-04-26T16:14:42 | 29,885,741 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | from flask import current_app, redirect, url_for, session
from flask.ext.login import login_user, logout_user
from flask_googlelogin import USERINFO_EMAIL_SCOPE
from newbeercellar import login_manager, app, googlelogin
from models import User
from util import get_or_create_default_cellar
@app.route("/login")
def login():
return redirect(
googlelogin.login_url(scopes=[USERINFO_EMAIL_SCOPE])
)
login_manager.unauthorized_handler(login)
@app.route('/logout')
def logout():
logout_user()
session.clear()
return redirect(url_for('index'))
@login_manager.user_loader
def load_user(userid):
return current_app.db_session.query(User).get(userid)
@app.route('/oauth2callback')
@googlelogin.oauth2callback
def create_or_update_user(token, userinfo, **params):
if params.get('error', False):
return redirect(url_for('index'))
db = current_app.db_session
user = db.query(User).filter(User.google_id == userinfo['id']).first()
if user:
user.name = userinfo['name']
else:
user = User(
google_id=userinfo['id'],
name=userinfo['name'],
email=userinfo['email'],
username=userinfo['email'].split('@')[0].replace('.', '')
)
db.add(user)
db.commit()
db.flush()
login_user(user)
cellar = get_or_create_default_cellar(user)
return redirect(url_for(
'view_cellar',
username=user.username,
cellar_id=cellar.id
))
| [
"atle@frenviksveen.net"
] | atle@frenviksveen.net |
cfc8baabe5fbc4634ddd6a8cb4267db77e24b358 | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stubs/passlib/passlib/ifc.pyi | 80467bfea35e5977019150a5275c3534e3f1145c | [
"Apache-2.0",
"MIT"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 1,061 | pyi | import abc
from abc import abstractmethod
from typing import Any
class PasswordHash(metaclass=abc.ABCMeta):
is_disabled: bool
truncate_size: Any
truncate_error: bool
truncate_verify_reject: bool
@classmethod
@abstractmethod
def hash(cls, secret, **setting_and_context_kwds): ...
@classmethod
def encrypt(cls, *args, **kwds): ...
@classmethod
@abstractmethod
def verify(cls, secret, hash, **context_kwds): ...
@classmethod
@abstractmethod
def using(cls, relaxed: bool = ..., **kwds): ...
@classmethod
def needs_update(cls, hash, secret: Any | None = ...): ...
@classmethod
@abstractmethod
def identify(cls, hash): ...
@classmethod
def genconfig(cls, **setting_kwds): ...
@classmethod
def genhash(cls, secret, config, **context) -> None: ...
deprecated: bool
class DisabledHash(PasswordHash, metaclass=abc.ABCMeta):
is_disabled: bool
@classmethod
def disable(cls, hash: Any | None = ...): ...
@classmethod
def enable(cls, hash) -> None: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
7a6660a4ebd2a02738361cb692acd4b4451abde7 | 4fc86f5c444f52619f9f748c9bad5bf3e0e2c0b2 | /megatron/data/test/test_indexed_dataset.py | 78622d275de5b38bbe02b1c1827d34036aff0eb1 | [
"MIT",
"Apache-2.0"
] | permissive | Xianchao-Wu/megatron2 | 95ea620b74c66e51f9e31075b1df6bb1b761678b | f793c37223b32051cb61d3b1d5661dddd57634bf | refs/heads/main | 2023-08-17T03:42:31.602515 | 2021-09-24T05:12:00 | 2021-09-24T05:12:00 | 330,527,561 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,739 | py | # This file isn't really a formal automated test, it's just a place to
# put some code used during development and manual testing of
# indexed_dataset.
import os
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_dir, "../../../"))
from megatron.data import indexed_dataset
from megatron.tokenizer import build_tokenizer
import argparse
#import os
#import sys
import torch
#script_dir = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(os.path.join(script_dir, "../../../"))
def test_indexed_dataset(args):
ds = indexed_dataset.make_dataset(args.data, args.dataset_impl)
tokenizer = build_tokenizer(args)
print('len(ds.doc_idx)={}'.format(len(ds.doc_idx)))
print('len(ds)={}'.format(len(ds)))
print('ds.doc_idx[-1]={}'.format(ds.doc_idx[-1]))
if ds.supports_prefetch: # False
# just prefetch the whole thing in test (so assume it is small)
ds.prefetch(range(len(ds)))
if args.count > len(ds.doc_idx) - 1:
args.count = len(ds.doc_idx) - 1
for i in range(args.count):
start = ds.doc_idx[i]
end = ds.doc_idx[i + 1]
ids = ds[start:end]
print(f"Document {i}:")
print("--------------")
for s in ids:
assert len(s) > 0
l = s.data.tolist()
text = tokenizer.detokenize(l)
print(text)
print("---")
def test_indexed_dataset_get(args):
ds = indexed_dataset.make_dataset(args.data, args.dataset_impl) # ds=dataset
tokenizer = build_tokenizer(args)
size = ds.sizes[0] # [30 54 16 30 27 40 30 3]'s 30
print(f"size: {size}")
full = ds.get(0)
print(full)
print(tokenizer.detokenize(full.data.tolist())) # 「オタ」とも呼ばれているこのペラナカン(華人)の特製料理は、とてもおいしいスナック料理です。
print("---")
end = ds.get(0, offset=size - 10)
print(end)
print(tokenizer.detokenize(end.data.tolist())) # 、とてもおいしいスナック料理です。
start = ds.get(0, length=10)
print(start)
print(tokenizer.detokenize(start.data.tolist())) # 「オタ」とも呼ばれているこの
part = ds.get(0, offset=2, length=8)
print(part)
print(tokenizer.detokenize(part.data.tolist())) # オタ」とも呼ばれているこの
# def test_albert_dataset(args):
# # tokenizer = FullBertTokenizer(args.vocab, do_lower_case=True)
# # idataset = indexed_dataset.make_dataset(args.data, args.dataset_impl)
# # ds = AlbertDataset(idataset, tokenizer)
# ds = AlbertDataset.from_paths(args.vocab, args.data, args.dataset_impl,
# args.epochs, args.max_num_samples,
# args.masked_lm_prob, args.seq_length,
# args.short_seq_prob, args.seed)
# truncated = 0
# total = 0
# for i, s in enumerate(ds):
# ids = s['text']
# tokens = ds.tokenizer.convert_ids_to_tokens(ids)
# print(tokens)
# if i >= args.count-1:
# exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, help='prefix to data files')
parser.add_argument('--dataset-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'])
parser.add_argument('--count', type=int, default=10,
help='Number of samples/documents to print')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase', 'BertWordPieceCase', 'BertWordPieceCaseJp',
'GPT2BPETokenizer', 'GPT2BPETokenizerJp', 'GPT2BPETokenizerJpMecab'],
help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group.add_argument('--emoji-file', type=str, default=None,
help='Path to the emoji file for Japanese tokenization')
group.add_argument('--mecab-dict-path', type=str, default=None,
help='path to the mecab dict file for japanese tokenization')
parser.add_argument('--epochs', type=int, default=5,
help='Number of epochs to plan for')
parser.add_argument('--max-num-samples', type=int, default=None,
help='Maximum number of samples to plan for')
parser.add_argument('--masked-lm-prob', type=float, default=0.15,
help='probability of masking tokens')
parser.add_argument('--seq-length', type=int, default=512,
help='maximum sequence length')
parser.add_argument('--short-seq-prob', type=float, default=0.1,
help='probability of creating a short sequence')
parser.add_argument('--seed', type=int, default=1234,
help='random seed')
args = parser.parse_args()
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
if args.dataset_impl == "infer":
args.dataset_impl = indexed_dataset.infer_dataset_impl(args.data)
# test_albert_dataset(args)
print('-'*10 + 'test_indexed_dataset_get(args)' + '-'*10)
test_indexed_dataset_get(args)
print('-'*30)
print('-'*10 + 'test_indexed_dataset(args)' + '-'*10)
test_indexed_dataset(args)
if __name__ == "__main__":
main()
| [
"wuxianchao@gmail.com"
] | wuxianchao@gmail.com |
d6cdb884372725cfa6a83177edf21c31408b92e1 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/Python_Indentation_1.txt.py | d645b6e26f5356cbcfb271b23e86a4036273dfa2 | [
"MIT",
"Python-2.0"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 70 | py | if 5 > 2:
print("Five is greater than two!")
//Creater By Bryan G
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
ee0b17ad5ca05895993677298cc9cc9d610b1be4 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/586613e9c4dd65b64bfcffb16c008562f967ba42-<test_cdist_calling_conventions>-bug.py | 98025efd20cb767648c554c73b6b99fa2470e902 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | def test_cdist_calling_conventions(self):
for eo_name in self.rnd_eo_names:
X1 = eo[eo_name][:, ::(- 1)]
X2 = eo[eo_name][:(- 3):2]
for metric in _metrics:
if (verbose > 2):
print('testing: ', metric, ' with: ', eo_name)
if ((metric == 'yule') and ('bool' not in eo_name)):
continue
self._check_calling_conventions(X1, X2, metric)
if (metric == 'wminkowski'):
w = (1.0 / X1.std(axis=0))
self._check_calling_conventions(X1, X2, metric, w=w)
elif (metric == 'seuclidean'):
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.var(X12, axis=0, ddof=1)
self._check_calling_conventions(X1, X2, metric, V=V)
elif (metric == 'mahalanobis'):
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.atleast_2d(np.cov(X12.T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X1, X2, metric, VI=VI) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
4222606e1a7a16d3a76ac4e443a33e885993f6a7 | 915496215d4c4ae4c952dc1839e73d112dab0460 | /manage/fabriccloud/vagrant.py | 3fa1f48de523f43f2eed7851619dd4db94fc96c7 | [] | no_license | nicholsn/simple-application-framework | 4afa6f71af68968e45364fd0691c54be9acf0f71 | a62f332bdebc9fff31ad58bdcb68cff990bdc663 | refs/heads/master | 2021-01-18T01:20:40.474399 | 2014-03-18T20:41:53 | 2014-03-18T20:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | __author__ = 'stonerri'
from base import *
def setDefaults():
env.user = 'vagrant'
env.hosts = ['127.0.0.1']
env.port = 2200
# use vagrant ssh key
result = local('vagrant ssh-config | grep IdentityFile', capture=True)
env.key_filename = result.split()[1]
def systemInformation():
pass
def sync():
print 'vagrant gets sync for free' | [
"stonerri@gmail.com"
] | stonerri@gmail.com |
e2fbb383b2e508e6e2293ae87a3433534eff975d | f78cb1e6a6b6c644cd35b5139b01017fcf8dcdb1 | /PostProcessing/test/Palisade/test_processor_base.py | 574194b068ce248e4320fa255794666e91297e61 | [] | no_license | dsavoiu/Karma | 9506fc449d94db060fabd0bfa61ff7673107b40d | 1e9f1d237cb6b0f7c92e7c2ea1a14c6b4a5745cd | refs/heads/master | 2023-06-26T08:53:09.979674 | 2023-04-06T15:11:14 | 2023-04-06T15:19:33 | 143,059,118 | 1 | 6 | null | 2022-11-22T20:39:17 | 2018-07-31T19:37:46 | Python | UTF-8 | Python | false | false | 11,973 | py | import numpy as np
import operator as op
import os
import shutil
import unittest2 as unittest
from copy import deepcopy
from rootpy import asrootpy
from rootpy.io import root_open, DoesNotExist
from rootpy.plotting import Hist1D, Hist2D, Profile1D, Efficiency, Graph
from rootpy.plotting.hist import _Hist, _Hist2D
from rootpy.plotting.profile import _ProfileBase
from Karma.PostProcessing.Palisade import ContextValue, String, LiteralString
from Karma.PostProcessing.Palisade.Processors._base import _ProcessorBase, ConfigurationError
from Karma.PostProcessing.Palisade._lazy import String
_RESULTS = []
class DummyProcessor(_ProcessorBase):
CONFIG_KEY_FOR_TEMPLATES = "templates"
SUBKEYS_FOR_CONTEXT_REPLACING = ["replace_under_here"]
CONFIG_KEY_FOR_CONTEXTS = "expansions"
def __init__(self, config):
super(DummyProcessor, self).__init__(config, output_folder="dummy")
self.results = []
def _process(self, config):
print(config)
return self.results.append(config)
# -- register action slots
_ACTIONS = [_process]
class TestProcessorBase(unittest.TestCase):
#MANDATORY_CONFIG_KEYS = ['input_files', 'expansions', 'templates']
MANDATORY_CONFIG_KEYS = []
BASE_CFG = {
'templates': [
dict(),
],
'expansions': {
'namespace': [dict(key=42, meta_key='key')],
},
}
@staticmethod
def _run_palisade(config):
_p = DummyProcessor(config)
_p.run(show_progress=False)
return _p.results
def test_missing_config_keys_raise(self):
for _key in self.MANDATORY_CONFIG_KEYS:
_cfg = deepcopy(self.BASE_CFG)
del _cfg[_key]
with self.subTest(key=_key):
with self.assertRaises(KeyError) as _err:
self._run_palisade(config=_cfg)
self.assertEqual(_err.exception.args[0], _key)
def test_run_through(self):
'''base config should run through without exceptions being raised'''
_cfg = deepcopy(self.BASE_CFG)
_results = self._run_palisade(config=_cfg)
def test_number_of_contexts(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'] = [dict()] * 2 # two empty tasks...
_cfg['expansions'] = {
# ...times fifteen contexts
'namespace_A': [dict(a='b')] * 3,
'namespace_B': [dict(c='d')] * 5,
}
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 2*3*5)
def test_replace_context_string(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_string': '{namespace[key]}',
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_string'], str(self.BASE_CFG['expansions']['namespace'][0]['key']))
def test_replace_context_value_namespace_with_space(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['expansions'] = {
# ...times fifteen contexts
'namespace with space': [dict(key=42)],
}
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue('"namespace with space"[key]'),
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_value'], 42)
def test_replace_context_value(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue('namespace[key]'),
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_value'], self.BASE_CFG['expansions']['namespace'][0]['key'])
def test_replace_context_literal_string(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'literal_string': String('{namespace[key]}'),
'literal_string_deprecated': LiteralString('{namespace[key]}'),
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['literal_string'], '{namespace[key]}')
self.assertEqual(_results[0]['replace_under_here']['literal_string_deprecated'], '{namespace[key]}')
def test_replace_context_nested_dict(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_nested_dict': {
'lvl_1': { 'lvl_2': { 'inner_key': ContextValue('namespace[key]')}}
},
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_nested_dict']['lvl_1']['lvl_2']['inner_key'], self.BASE_CFG['expansions']['namespace'][0]['key'])
def test_replace_context_nested_list(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_nested_list': [[
{ 'inner_key': ContextValue('namespace[key]')}
]],
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_nested_list'][0][0]['inner_key'], self.BASE_CFG['expansions']['namespace'][0]['key'])
def test_replace_context_nested_dict_list(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_nested_dict_list': {
'inner_key': [
ContextValue('namespace[key]'),
ContextValue('namespace[key]'),
]
},
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
for _i in range(2):
self.assertEqual(_results[0]['replace_under_here']['context_nested_dict_list']['inner_key'][_i], self.BASE_CFG['expansions']['namespace'][0]['key'])
def test_replace_context_nested_list_dict(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_nested_list_dict': [
{ 'inner_key': [ContextValue('namespace[key]'), ContextValue('namespace[key]')]},
{ 'inner_key': [ContextValue('namespace[key]'), ContextValue('namespace[key]')]}
],
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
for _i in range(2):
for _j in range(2):
self.assertEqual(_results[0]['replace_under_here']['context_nested_list_dict'][_i]['inner_key'][_j], self.BASE_CFG['expansions']['namespace'][0]['key'])
def test_noreplace_context_not_under_key(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'dont_replace_under_here': {
'literal_string': '{namespace[key]}',
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['dont_replace_under_here']['literal_string'], '{namespace[key]}')
def test_replace_context_string_top_level_key(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_this_as_well' : '{namespace[key]}',
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_this_as_well'], str(self.BASE_CFG['expansions']['namespace'][0]['key']))
def test_context_string_inexistent_key_raise(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': '{namespace[inexistent_key]}',
}
})
with self.assertRaises(ConfigurationError) as _err:
_results = self._run_palisade(config=_cfg)
self.assertIn("'inexistent_key'", _err.exception.args[0])
def test_replace_context_string_top_level_key(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_this_as_well' : ContextValue('namespace[key]'),
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_this_as_well'], self.BASE_CFG['expansions']['namespace'][0]['key'])
def test_context_value_unsupported_syntax_raise(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue('namespace+key'),
}
})
with self.assertRaises(ConfigurationError):
_results = self._run_palisade(config=_cfg)
def test_context_value_inexistent_key_raise(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue('namespace[inexistent_key]'),
}
})
with self.assertRaises(ConfigurationError) as _err:
_results = self._run_palisade(config=_cfg)
self.assertIn("'inexistent_key'", _err.exception.args[0])
def test_replace_context_value_lazy_arithmetic(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue('namespace[key]') * 2,
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_value'], self.BASE_CFG['expansions']['namespace'][0]['key'] * 2)
def test_replace_context_value_lazy_arithmetic_2(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue('namespace[key]') * ContextValue('namespace[key]'),
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_value'], self.BASE_CFG['expansions']['namespace'][0]['key'] ** 2)
def test_replace_context_value_lazy_format_string(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': String("{0}{0}").format(ContextValue('namespace[key]')),
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_value'], str(self.BASE_CFG['expansions']['namespace'][0]['key']) * 2)
def test_replace_context_value_lazy_meta_key(self):
_cfg = deepcopy(self.BASE_CFG)
_cfg['templates'][0].update({
'replace_under_here': {
'context_value': ContextValue(String("namespace[{}]").format(ContextValue('namespace[meta_key]'))),
}
})
_results = self._run_palisade(config=_cfg)
self.assertEqual(len(_results), 1)
self.assertEqual(_results[0]['replace_under_here']['context_value'], self.BASE_CFG['expansions']['namespace'][0]['key'])
| [
"daniel.savoiu@cern.ch"
] | daniel.savoiu@cern.ch |
7251e6e8d7ab60a638ac28701018bae4595f14da | 1bed2f766620acf085ed2d7fd3e354a3482b8960 | /tests/components/zwave_js/conftest.py | 6585deddbdb78e6b5a276817a7abcaf1c5b4593c | [
"Apache-2.0"
] | permissive | elupus/home-assistant | 5cbb79a2f25a2938a69f3988534486c269b77643 | 564150169bfc69efdfeda25a99d803441f3a4b10 | refs/heads/dev | 2023-08-28T16:36:04.304864 | 2022-09-16T06:35:12 | 2022-09-16T06:35:12 | 114,460,522 | 2 | 2 | Apache-2.0 | 2023-02-22T06:14:54 | 2017-12-16T12:50:55 | Python | UTF-8 | Python | false | false | 39,828 | py | """Provide common Z-Wave JS fixtures."""
import asyncio
import copy
import io
import json
from unittest.mock import AsyncMock, patch
import pytest
from zwave_js_server.event import Event
from zwave_js_server.model.driver import Driver
from zwave_js_server.model.node import Node
from zwave_js_server.version import VersionInfo
from tests.common import MockConfigEntry, load_fixture
# Add-on fixtures
@pytest.fixture(name="addon_info_side_effect")
def addon_info_side_effect_fixture():
"""Return the add-on info side effect."""
return None
@pytest.fixture(name="addon_info")
def mock_addon_info(addon_info_side_effect):
"""Mock Supervisor add-on info."""
with patch(
"homeassistant.components.zwave_js.addon.async_get_addon_info",
side_effect=addon_info_side_effect,
) as addon_info:
addon_info.return_value = {
"options": {},
"state": None,
"update_available": False,
"version": None,
}
yield addon_info
@pytest.fixture(name="addon_store_info_side_effect")
def addon_store_info_side_effect_fixture():
"""Return the add-on store info side effect."""
return None
@pytest.fixture(name="addon_store_info")
def mock_addon_store_info(addon_store_info_side_effect):
"""Mock Supervisor add-on info."""
with patch(
"homeassistant.components.zwave_js.addon.async_get_addon_store_info",
side_effect=addon_store_info_side_effect,
) as addon_store_info:
addon_store_info.return_value = {
"installed": None,
"state": None,
"version": "1.0.0",
}
yield addon_store_info
@pytest.fixture(name="addon_running")
def mock_addon_running(addon_store_info, addon_info):
"""Mock add-on already running."""
addon_store_info.return_value = {
"installed": "1.0.0",
"state": "started",
"version": "1.0.0",
}
addon_info.return_value["state"] = "started"
addon_info.return_value["version"] = "1.0.0"
return addon_info
@pytest.fixture(name="addon_installed")
def mock_addon_installed(addon_store_info, addon_info):
"""Mock add-on already installed but not running."""
addon_store_info.return_value = {
"installed": "1.0.0",
"state": "stopped",
"version": "1.0.0",
}
addon_info.return_value["state"] = "stopped"
addon_info.return_value["version"] = "1.0.0"
return addon_info
@pytest.fixture(name="addon_not_installed")
def mock_addon_not_installed(addon_store_info, addon_info):
"""Mock add-on not installed."""
return addon_info
@pytest.fixture(name="addon_options")
def mock_addon_options(addon_info):
"""Mock add-on options."""
return addon_info.return_value["options"]
@pytest.fixture(name="set_addon_options_side_effect")
def set_addon_options_side_effect_fixture(addon_options):
"""Return the set add-on options side effect."""
async def set_addon_options(hass, slug, options):
"""Mock set add-on options."""
addon_options.update(options["options"])
return set_addon_options
@pytest.fixture(name="set_addon_options")
def mock_set_addon_options(set_addon_options_side_effect):
"""Mock set add-on options."""
with patch(
"homeassistant.components.zwave_js.addon.async_set_addon_options",
side_effect=set_addon_options_side_effect,
) as set_options:
yield set_options
@pytest.fixture(name="install_addon_side_effect")
def install_addon_side_effect_fixture(addon_store_info, addon_info):
"""Return the install add-on side effect."""
async def install_addon(hass, slug):
"""Mock install add-on."""
addon_store_info.return_value = {
"installed": "1.0.0",
"state": "stopped",
"version": "1.0.0",
}
addon_info.return_value["state"] = "stopped"
addon_info.return_value["version"] = "1.0.0"
return install_addon
@pytest.fixture(name="install_addon")
def mock_install_addon(install_addon_side_effect):
"""Mock install add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_install_addon",
side_effect=install_addon_side_effect,
) as install_addon:
yield install_addon
@pytest.fixture(name="update_addon")
def mock_update_addon():
"""Mock update add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_update_addon"
) as update_addon:
yield update_addon
@pytest.fixture(name="start_addon_side_effect")
def start_addon_side_effect_fixture(addon_store_info, addon_info):
"""Return the start add-on options side effect."""
async def start_addon(hass, slug):
"""Mock start add-on."""
addon_store_info.return_value = {
"installed": "1.0.0",
"state": "started",
"version": "1.0.0",
}
addon_info.return_value["state"] = "started"
return start_addon
@pytest.fixture(name="start_addon")
def mock_start_addon(start_addon_side_effect):
"""Mock start add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_start_addon",
side_effect=start_addon_side_effect,
) as start_addon:
yield start_addon
@pytest.fixture(name="stop_addon")
def stop_addon_fixture():
"""Mock stop add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_stop_addon"
) as stop_addon:
yield stop_addon
@pytest.fixture(name="restart_addon_side_effect")
def restart_addon_side_effect_fixture():
"""Return the restart add-on options side effect."""
return None
@pytest.fixture(name="restart_addon")
def mock_restart_addon(restart_addon_side_effect):
"""Mock restart add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_restart_addon",
side_effect=restart_addon_side_effect,
) as restart_addon:
yield restart_addon
@pytest.fixture(name="uninstall_addon")
def uninstall_addon_fixture():
"""Mock uninstall add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_uninstall_addon"
) as uninstall_addon:
yield uninstall_addon
@pytest.fixture(name="create_backup")
def create_backup_fixture():
"""Mock create backup."""
with patch(
"homeassistant.components.zwave_js.addon.async_create_backup"
) as create_backup:
yield create_backup
@pytest.fixture(name="controller_state", scope="session")
def controller_state_fixture():
"""Load the controller state fixture data."""
return json.loads(load_fixture("zwave_js/controller_state.json"))
@pytest.fixture(name="controller_node_state", scope="session")
def controller_node_state_fixture():
"""Load the controller node state fixture data."""
return json.loads(load_fixture("zwave_js/controller_node_state.json"))
@pytest.fixture(name="version_state", scope="session")
def version_state_fixture():
"""Load the version state fixture data."""
return {
"type": "version",
"driverVersion": "6.0.0-beta.0",
"serverVersion": "1.0.0",
"homeId": 1234567890,
}
@pytest.fixture(name="log_config_state")
def log_config_state_fixture():
"""Return log config state fixture data."""
return {
"enabled": True,
"level": "info",
"logToFile": False,
"filename": "",
"forceConsole": False,
}
@pytest.fixture(name="config_entry_diagnostics", scope="session")
def config_entry_diagnostics_fixture():
"""Load the config entry diagnostics fixture data."""
return json.loads(load_fixture("zwave_js/config_entry_diagnostics.json"))
@pytest.fixture(name="multisensor_6_state", scope="session")
def multisensor_6_state_fixture():
"""Load the multisensor 6 node state fixture data."""
return json.loads(load_fixture("zwave_js/multisensor_6_state.json"))
@pytest.fixture(name="ecolink_door_sensor_state", scope="session")
def ecolink_door_sensor_state_fixture():
"""Load the Ecolink Door/Window Sensor node state fixture data."""
return json.loads(load_fixture("zwave_js/ecolink_door_sensor_state.json"))
@pytest.fixture(name="hank_binary_switch_state", scope="session")
def binary_switch_state_fixture():
"""Load the hank binary switch node state fixture data."""
return json.loads(load_fixture("zwave_js/hank_binary_switch_state.json"))
@pytest.fixture(name="bulb_6_multi_color_state", scope="session")
def bulb_6_multi_color_state_fixture():
"""Load the bulb 6 multi-color node state fixture data."""
return json.loads(load_fixture("zwave_js/bulb_6_multi_color_state.json"))
@pytest.fixture(name="light_color_null_values_state", scope="session")
def light_color_null_values_state_fixture():
"""Load the light color null values node state fixture data."""
return json.loads(load_fixture("zwave_js/light_color_null_values_state.json"))
@pytest.fixture(name="eaton_rf9640_dimmer_state", scope="session")
def eaton_rf9640_dimmer_state_fixture():
"""Load the eaton rf9640 dimmer node state fixture data."""
return json.loads(load_fixture("zwave_js/eaton_rf9640_dimmer_state.json"))
@pytest.fixture(name="lock_schlage_be469_state", scope="session")
def lock_schlage_be469_state_fixture():
"""Load the schlage lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_schlage_be469_state.json"))
@pytest.fixture(name="lock_august_asl03_state", scope="session")
def lock_august_asl03_state_fixture():
"""Load the August Pro lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_august_asl03_state.json"))
@pytest.fixture(name="climate_radio_thermostat_ct100_plus_state", scope="session")
def climate_radio_thermostat_ct100_plus_state_fixture():
"""Load the climate radio thermostat ct100 plus node state fixture data."""
return json.loads(
load_fixture("zwave_js/climate_radio_thermostat_ct100_plus_state.json")
)
@pytest.fixture(
name="climate_radio_thermostat_ct100_plus_different_endpoints_state",
scope="session",
)
def climate_radio_thermostat_ct100_plus_different_endpoints_state_fixture():
"""Load the thermostat fixture state with values on different endpoints.
This device is a radio thermostat ct100.
"""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct100_plus_different_endpoints_state.json"
)
)
@pytest.fixture(name="climate_adc_t3000_state", scope="session")
def climate_adc_t3000_state_fixture():
"""Load the climate ADC-T3000 node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_adc_t3000_state.json"))
@pytest.fixture(name="climate_danfoss_lc_13_state", scope="session")
def climate_danfoss_lc_13_state_fixture():
"""Load the climate Danfoss (LC-13) electronic radiator thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_danfoss_lc_13_state.json"))
@pytest.fixture(name="climate_eurotronic_spirit_z_state", scope="session")
def climate_eurotronic_spirit_z_state_fixture():
"""Load the climate Eurotronic Spirit Z thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_eurotronic_spirit_z_state.json"))
@pytest.fixture(name="climate_heatit_z_trm3_state", scope="session")
def climate_heatit_z_trm3_state_fixture():
"""Load the climate HEATIT Z-TRM3 thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_heatit_z_trm3_state.json"))
@pytest.fixture(name="climate_heatit_z_trm2fx_state", scope="session")
def climate_heatit_z_trm2fx_state_fixture():
"""Load the climate HEATIT Z-TRM2fx thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_heatit_z_trm2fx_state.json"))
@pytest.fixture(name="climate_heatit_z_trm3_no_value_state", scope="session")
def climate_heatit_z_trm3_no_value_state_fixture():
"""Load the climate HEATIT Z-TRM3 thermostat node w/no value state fixture data."""
return json.loads(
load_fixture("zwave_js/climate_heatit_z_trm3_no_value_state.json")
)
@pytest.fixture(name="nortek_thermostat_state", scope="session")
def nortek_thermostat_state_fixture():
"""Load the nortek thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/nortek_thermostat_state.json"))
@pytest.fixture(name="srt321_hrt4_zw_state", scope="session")
def srt321_hrt4_zw_state_fixture():
"""Load the climate HRT4-ZW / SRT321 / SRT322 thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/srt321_hrt4_zw_state.json"))
@pytest.fixture(name="chain_actuator_zws12_state", scope="session")
def window_cover_state_fixture():
"""Load the window cover node state fixture data."""
return json.loads(load_fixture("zwave_js/chain_actuator_zws12_state.json"))
@pytest.fixture(name="fan_generic_state", scope="session")
def fan_generic_state_fixture():
"""Load the fan node state fixture data."""
return json.loads(load_fixture("zwave_js/fan_generic_state.json"))
@pytest.fixture(name="hs_fc200_state", scope="session")
def hs_fc200_state_fixture():
"""Load the HS FC200+ node state fixture data."""
return json.loads(load_fixture("zwave_js/fan_hs_fc200_state.json"))
@pytest.fixture(name="leviton_zw4sf_state", scope="session")
def leviton_zw4sf_state_fixture():
"""Load the Leviton ZW4SF node state fixture data."""
return json.loads(load_fixture("zwave_js/leviton_zw4sf_state.json"))
@pytest.fixture(name="gdc_zw062_state", scope="session")
def motorized_barrier_cover_state_fixture():
"""Load the motorized barrier cover node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_zw062_state.json"))
@pytest.fixture(name="iblinds_v2_state", scope="session")
def iblinds_v2_state_fixture():
"""Load the iBlinds v2 node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_iblinds_v2_state.json"))
@pytest.fixture(name="qubino_shutter_state", scope="session")
def qubino_shutter_state_fixture():
"""Load the Qubino Shutter node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_qubino_shutter_state.json"))
@pytest.fixture(name="aeotec_nano_shutter_state", scope="session")
def aeotec_nano_shutter_state_fixture():
"""Load the Aeotec Nano Shutter node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_aeotec_nano_shutter_state.json"))
@pytest.fixture(name="fibaro_fgr222_shutter_state", scope="session")
def fibaro_fgr222_shutter_state_fixture():
"""Load the Fibaro FGR222 node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_fibaro_fgr222_state.json"))
@pytest.fixture(name="aeon_smart_switch_6_state", scope="session")
def aeon_smart_switch_6_state_fixture():
"""Load the AEON Labs (ZW096) Smart Switch 6 node state fixture data."""
return json.loads(load_fixture("zwave_js/aeon_smart_switch_6_state.json"))
@pytest.fixture(name="ge_12730_state", scope="session")
def ge_12730_state_fixture():
"""Load the GE 12730 node state fixture data."""
return json.loads(load_fixture("zwave_js/fan_ge_12730_state.json"))
@pytest.fixture(name="aeotec_radiator_thermostat_state", scope="session")
def aeotec_radiator_thermostat_state_fixture():
"""Load the Aeotec Radiator Thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/aeotec_radiator_thermostat_state.json"))
@pytest.fixture(name="inovelli_lzw36_state", scope="session")
def inovelli_lzw36_state_fixture():
"""Load the Inovelli LZW36 node state fixture data."""
return json.loads(load_fixture("zwave_js/inovelli_lzw36_state.json"))
@pytest.fixture(name="null_name_check_state", scope="session")
def null_name_check_state_fixture():
"""Load the null name check node state fixture data."""
return json.loads(load_fixture("zwave_js/null_name_check_state.json"))
@pytest.fixture(name="lock_id_lock_as_id150_state", scope="session")
def lock_id_lock_as_id150_state_fixture():
"""Load the id lock id-150 lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_id_lock_as_id150_state.json"))
@pytest.fixture(
name="climate_radio_thermostat_ct101_multiple_temp_units_state", scope="session"
)
def climate_radio_thermostat_ct101_multiple_temp_units_state_fixture():
"""Load the climate multiple temp units node state fixture data."""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct101_multiple_temp_units_state.json"
)
)
@pytest.fixture(
name="climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state",
scope="session",
)
def climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state_fixture():
"""Load the climate device with mode and setpoint on different endpoints node state fixture data."""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state.json"
)
)
@pytest.fixture(name="vision_security_zl7432_state", scope="session")
def vision_security_zl7432_state_fixture():
"""Load the vision security zl7432 switch node state fixture data."""
return json.loads(load_fixture("zwave_js/vision_security_zl7432_state.json"))
@pytest.fixture(name="zen_31_state", scope="session")
def zem_31_state_fixture():
"""Load the zen_31 node state fixture data."""
return json.loads(load_fixture("zwave_js/zen_31_state.json"))
@pytest.fixture(name="wallmote_central_scene_state", scope="session")
def wallmote_central_scene_state_fixture():
"""Load the wallmote central scene node state fixture data."""
return json.loads(load_fixture("zwave_js/wallmote_central_scene_state.json"))
@pytest.fixture(name="ge_in_wall_dimmer_switch_state", scope="session")
def ge_in_wall_dimmer_switch_state_fixture():
"""Load the ge in-wall dimmer switch node state fixture data."""
return json.loads(load_fixture("zwave_js/ge_in_wall_dimmer_switch_state.json"))
@pytest.fixture(name="aeotec_zw164_siren_state", scope="session")
def aeotec_zw164_siren_state_fixture():
"""Load the aeotec zw164 siren node state fixture data."""
return json.loads(load_fixture("zwave_js/aeotec_zw164_siren_state.json"))
@pytest.fixture(name="lock_popp_electric_strike_lock_control_state", scope="session")
def lock_popp_electric_strike_lock_control_state_fixture():
"""Load the popp electric strike lock control node state fixture data."""
return json.loads(
load_fixture("zwave_js/lock_popp_electric_strike_lock_control_state.json")
)
@pytest.fixture(name="fortrezz_ssa1_siren_state", scope="session")
def fortrezz_ssa1_siren_state_fixture():
"""Load the fortrezz ssa1 siren node state fixture data."""
return json.loads(load_fixture("zwave_js/fortrezz_ssa1_siren_state.json"))
@pytest.fixture(name="fortrezz_ssa3_siren_state", scope="session")
def fortrezz_ssa3_siren_state_fixture():
"""Load the fortrezz ssa3 siren node state fixture data."""
return json.loads(load_fixture("zwave_js/fortrezz_ssa3_siren_state.json"))
@pytest.fixture(name="zp3111_not_ready_state", scope="session")
def zp3111_not_ready_state_fixture():
"""Load the zp3111 4-in-1 sensor not-ready node state fixture data."""
return json.loads(load_fixture("zwave_js/zp3111-5_not_ready_state.json"))
@pytest.fixture(name="zp3111_state", scope="session")
def zp3111_state_fixture():
"""Load the zp3111 4-in-1 sensor node state fixture data."""
return json.loads(load_fixture("zwave_js/zp3111-5_state.json"))
@pytest.fixture(name="express_controls_ezmultipli_state", scope="session")
def light_express_controls_ezmultipli_state_fixture():
"""Load the Express Controls EZMultiPli node state fixture data."""
return json.loads(load_fixture("zwave_js/express_controls_ezmultipli_state.json"))
@pytest.fixture(name="lock_home_connect_620_state", scope="session")
def lock_home_connect_620_state_fixture():
"""Load the Home Connect 620 lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_home_connect_620_state.json"))
@pytest.fixture(name="client")
def mock_client_fixture(controller_state, version_state, log_config_state):
"""Mock a client."""
with patch(
"homeassistant.components.zwave_js.ZwaveClient", autospec=True
) as client_class:
client = client_class.return_value
async def connect():
await asyncio.sleep(0)
client.connected = True
async def listen(driver_ready: asyncio.Event) -> None:
driver_ready.set()
listen_block = asyncio.Event()
await listen_block.wait()
assert False, "Listen wasn't canceled!"
async def disconnect():
client.connected = False
client.connect = AsyncMock(side_effect=connect)
client.listen = AsyncMock(side_effect=listen)
client.disconnect = AsyncMock(side_effect=disconnect)
client.driver = Driver(client, controller_state, log_config_state)
client.version = VersionInfo.from_message(version_state)
client.ws_server_url = "ws://test:3000/zjs"
yield client
@pytest.fixture(name="controller_node")
def controller_node_fixture(client, controller_node_state):
"""Mock a controller node."""
node = Node(client, copy.deepcopy(controller_node_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="multisensor_6")
def multisensor_6_fixture(client, multisensor_6_state):
"""Mock a multisensor 6 node."""
node = Node(client, copy.deepcopy(multisensor_6_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="ecolink_door_sensor")
def legacy_binary_sensor_fixture(client, ecolink_door_sensor_state):
"""Mock a legacy_binary_sensor node."""
node = Node(client, copy.deepcopy(ecolink_door_sensor_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="hank_binary_switch")
def hank_binary_switch_fixture(client, hank_binary_switch_state):
"""Mock a binary switch node."""
node = Node(client, copy.deepcopy(hank_binary_switch_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="bulb_6_multi_color")
def bulb_6_multi_color_fixture(client, bulb_6_multi_color_state):
"""Mock a bulb 6 multi-color node."""
node = Node(client, copy.deepcopy(bulb_6_multi_color_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="light_color_null_values")
def light_color_null_values_fixture(client, light_color_null_values_state):
"""Mock a node with current color value item being null."""
node = Node(client, copy.deepcopy(light_color_null_values_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="eaton_rf9640_dimmer")
def eaton_rf9640_dimmer_fixture(client, eaton_rf9640_dimmer_state):
"""Mock a Eaton RF9640 (V4 compatible) dimmer node."""
node = Node(client, copy.deepcopy(eaton_rf9640_dimmer_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_schlage_be469")
def lock_schlage_be469_fixture(client, lock_schlage_be469_state):
"""Mock a schlage lock node."""
node = Node(client, copy.deepcopy(lock_schlage_be469_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_august_pro")
def lock_august_asl03_fixture(client, lock_august_asl03_state):
"""Mock a August Pro lock node."""
node = Node(client, copy.deepcopy(lock_august_asl03_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_radio_thermostat_ct100_plus")
def climate_radio_thermostat_ct100_plus_fixture(
client, climate_radio_thermostat_ct100_plus_state
):
"""Mock a climate radio thermostat ct100 plus node."""
node = Node(client, copy.deepcopy(climate_radio_thermostat_ct100_plus_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_radio_thermostat_ct100_plus_different_endpoints")
def climate_radio_thermostat_ct100_plus_different_endpoints_fixture(
client, climate_radio_thermostat_ct100_plus_different_endpoints_state
):
"""Mock a climate radio thermostat ct100 plus node with values on different endpoints."""
node = Node(
client,
copy.deepcopy(climate_radio_thermostat_ct100_plus_different_endpoints_state),
)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_adc_t3000")
def climate_adc_t3000_fixture(client, climate_adc_t3000_state):
"""Mock a climate ADC-T3000 node."""
node = Node(client, copy.deepcopy(climate_adc_t3000_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_adc_t3000_missing_setpoint")
def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state):
"""Mock a climate ADC-T3000 node with missing de-humidify setpoint."""
data = copy.deepcopy(climate_adc_t3000_state)
data["name"] = f"{data['name']} missing setpoint"
for value in data["values"][:]:
if (
value["commandClassName"] == "Humidity Control Setpoint"
and value["propertyKeyName"] == "De-humidifier"
):
data["values"].remove(value)
node = Node(client, data)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_adc_t3000_missing_mode")
def climate_adc_t3000_missing_mode_fixture(client, climate_adc_t3000_state):
"""Mock a climate ADC-T3000 node with missing mode setpoint."""
data = copy.deepcopy(climate_adc_t3000_state)
data["name"] = f"{data['name']} missing mode"
for value in data["values"]:
if value["commandClassName"] == "Humidity Control Mode":
states = value["metadata"]["states"]
for key in list(states.keys()):
if states[key] == "De-humidify":
del states[key]
node = Node(client, data)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_adc_t3000_missing_fan_mode_states")
def climate_adc_t3000_missing_fan_mode_states_fixture(client, climate_adc_t3000_state):
"""Mock a climate ADC-T3000 node with missing 'states' metadata on Thermostat Fan Mode."""
data = copy.deepcopy(climate_adc_t3000_state)
data["name"] = f"{data['name']} missing fan mode states"
for value in data["values"]:
if (
value["commandClassName"] == "Thermostat Fan Mode"
and value["property"] == "mode"
):
del value["metadata"]["states"]
node = Node(client, data)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_danfoss_lc_13")
def climate_danfoss_lc_13_fixture(client, climate_danfoss_lc_13_state):
"""Mock a climate radio danfoss LC-13 node."""
node = Node(client, copy.deepcopy(climate_danfoss_lc_13_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_eurotronic_spirit_z")
def climate_eurotronic_spirit_z_fixture(client, climate_eurotronic_spirit_z_state):
"""Mock a climate radio danfoss LC-13 node."""
node = Node(client, climate_eurotronic_spirit_z_state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_heatit_z_trm3_no_value")
def climate_heatit_z_trm3_no_value_fixture(
client, climate_heatit_z_trm3_no_value_state
):
"""Mock a climate radio HEATIT Z-TRM3 node."""
node = Node(client, copy.deepcopy(climate_heatit_z_trm3_no_value_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_heatit_z_trm3")
def climate_heatit_z_trm3_fixture(client, climate_heatit_z_trm3_state):
"""Mock a climate radio HEATIT Z-TRM3 node."""
node = Node(client, copy.deepcopy(climate_heatit_z_trm3_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_heatit_z_trm2fx")
def climate_heatit_z_trm2fx_fixture(client, climate_heatit_z_trm2fx_state):
"""Mock a climate radio HEATIT Z-TRM2fx node."""
node = Node(client, copy.deepcopy(climate_heatit_z_trm2fx_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="nortek_thermostat")
def nortek_thermostat_fixture(client, nortek_thermostat_state):
"""Mock a nortek thermostat node."""
node = Node(client, copy.deepcopy(nortek_thermostat_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="srt321_hrt4_zw")
def srt321_hrt4_zw_fixture(client, srt321_hrt4_zw_state):
"""Mock a HRT4-ZW / SRT321 / SRT322 thermostat node."""
node = Node(client, copy.deepcopy(srt321_hrt4_zw_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="aeotec_radiator_thermostat")
def aeotec_radiator_thermostat_fixture(client, aeotec_radiator_thermostat_state):
"""Mock a Aeotec thermostat node."""
node = Node(client, aeotec_radiator_thermostat_state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="nortek_thermostat_added_event")
def nortek_thermostat_added_event_fixture(client):
"""Mock a Nortek thermostat node added event."""
event_data = json.loads(load_fixture("zwave_js/nortek_thermostat_added_event.json"))
event = Event("node added", event_data)
return event
@pytest.fixture(name="nortek_thermostat_removed_event")
def nortek_thermostat_removed_event_fixture(client):
"""Mock a Nortek thermostat node removed event."""
event_data = json.loads(
load_fixture("zwave_js/nortek_thermostat_removed_event.json")
)
event = Event("node removed", event_data)
return event
@pytest.fixture(name="integration")
async def integration_fixture(hass, client):
"""Set up the zwave_js integration."""
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
client.async_send_command.reset_mock()
return entry
@pytest.fixture(name="chain_actuator_zws12")
def window_cover_fixture(client, chain_actuator_zws12_state):
"""Mock a window cover node."""
node = Node(client, copy.deepcopy(chain_actuator_zws12_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="fan_generic")
def fan_generic_fixture(client, fan_generic_state):
"""Mock a fan node."""
node = Node(client, copy.deepcopy(fan_generic_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="hs_fc200")
def hs_fc200_fixture(client, hs_fc200_state):
"""Mock a fan node."""
node = Node(client, copy.deepcopy(hs_fc200_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="leviton_zw4sf")
def leviton_zw4sf_fixture(client, leviton_zw4sf_state):
"""Mock a fan node."""
node = Node(client, copy.deepcopy(leviton_zw4sf_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="null_name_check")
def null_name_check_fixture(client, null_name_check_state):
"""Mock a node with no name."""
node = Node(client, copy.deepcopy(null_name_check_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="gdc_zw062")
def motorized_barrier_cover_fixture(client, gdc_zw062_state):
"""Mock a motorized barrier node."""
node = Node(client, copy.deepcopy(gdc_zw062_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="iblinds_v2")
def iblinds_cover_fixture(client, iblinds_v2_state):
"""Mock an iBlinds v2.0 window cover node."""
node = Node(client, copy.deepcopy(iblinds_v2_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="qubino_shutter")
def qubino_shutter_cover_fixture(client, qubino_shutter_state):
"""Mock a Qubino flush shutter node."""
node = Node(client, copy.deepcopy(qubino_shutter_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="aeotec_nano_shutter")
def aeotec_nano_shutter_cover_fixture(client, aeotec_nano_shutter_state):
"""Mock a Aeotec Nano Shutter node."""
node = Node(client, copy.deepcopy(aeotec_nano_shutter_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="fibaro_fgr222_shutter")
def fibaro_fgr222_shutter_cover_fixture(client, fibaro_fgr222_shutter_state):
"""Mock a Fibaro FGR222 Shutter node."""
node = Node(client, copy.deepcopy(fibaro_fgr222_shutter_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="aeon_smart_switch_6")
def aeon_smart_switch_6_fixture(client, aeon_smart_switch_6_state):
"""Mock an AEON Labs (ZW096) Smart Switch 6 node."""
node = Node(client, aeon_smart_switch_6_state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="ge_12730")
def ge_12730_fixture(client, ge_12730_state):
"""Mock a GE 12730 fan controller node."""
node = Node(client, copy.deepcopy(ge_12730_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="inovelli_lzw36")
def inovelli_lzw36_fixture(client, inovelli_lzw36_state):
"""Mock a Inovelli LZW36 fan controller node."""
node = Node(client, copy.deepcopy(inovelli_lzw36_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_id_lock_as_id150")
def lock_id_lock_as_id150(client, lock_id_lock_as_id150_state):
"""Mock an id lock id-150 lock node."""
node = Node(client, copy.deepcopy(lock_id_lock_as_id150_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_id_lock_as_id150_not_ready")
def node_not_ready(client, lock_id_lock_as_id150_state):
"""Mock an id lock id-150 lock node that's not ready."""
state = copy.deepcopy(lock_id_lock_as_id150_state)
state["ready"] = False
node = Node(client, state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_radio_thermostat_ct101_multiple_temp_units")
def climate_radio_thermostat_ct101_multiple_temp_units_fixture(
client, climate_radio_thermostat_ct101_multiple_temp_units_state
):
"""Mock a climate device with multiple temp units node."""
node = Node(
client, copy.deepcopy(climate_radio_thermostat_ct101_multiple_temp_units_state)
)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(
name="climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints"
)
def climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_fixture(
client,
climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state,
):
"""Mock a climate device with mode and setpoint on differenet endpoints node."""
node = Node(
client,
copy.deepcopy(
climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state
),
)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="vision_security_zl7432")
def vision_security_zl7432_fixture(client, vision_security_zl7432_state):
"""Mock a vision security zl7432 node."""
node = Node(client, copy.deepcopy(vision_security_zl7432_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="zen_31")
def zen_31_fixture(client, zen_31_state):
"""Mock a bulb 6 multi-color node."""
node = Node(client, copy.deepcopy(zen_31_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="wallmote_central_scene")
def wallmote_central_scene_fixture(client, wallmote_central_scene_state):
"""Mock a wallmote central scene node."""
node = Node(client, copy.deepcopy(wallmote_central_scene_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="ge_in_wall_dimmer_switch")
def ge_in_wall_dimmer_switch_fixture(client, ge_in_wall_dimmer_switch_state):
"""Mock a ge in-wall dimmer switch scene node."""
node = Node(client, copy.deepcopy(ge_in_wall_dimmer_switch_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="aeotec_zw164_siren")
def aeotec_zw164_siren_fixture(client, aeotec_zw164_siren_state):
"""Mock a aeotec zw164 siren node."""
node = Node(client, copy.deepcopy(aeotec_zw164_siren_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_popp_electric_strike_lock_control")
def lock_popp_electric_strike_lock_control_fixture(
client, lock_popp_electric_strike_lock_control_state
):
"""Mock a popp electric strike lock control node."""
node = Node(client, copy.deepcopy(lock_popp_electric_strike_lock_control_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="fortrezz_ssa1_siren")
def fortrezz_ssa1_siren_fixture(client, fortrezz_ssa1_siren_state):
"""Mock a fortrezz ssa1 siren node."""
node = Node(client, copy.deepcopy(fortrezz_ssa1_siren_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="fortrezz_ssa3_siren")
def fortrezz_ssa3_siren_fixture(client, fortrezz_ssa3_siren_state):
"""Mock a fortrezz ssa3 siren node."""
node = Node(client, copy.deepcopy(fortrezz_ssa3_siren_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="firmware_file")
def firmware_file_fixture():
"""Return mock firmware file stream."""
return io.BytesIO(bytes(10))
@pytest.fixture(name="zp3111_not_ready")
def zp3111_not_ready_fixture(client, zp3111_not_ready_state):
"""Mock a zp3111 4-in-1 sensor node in a not-ready state."""
node = Node(client, copy.deepcopy(zp3111_not_ready_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="zp3111")
def zp3111_fixture(client, zp3111_state):
"""Mock a zp3111 4-in-1 sensor node."""
node = Node(client, copy.deepcopy(zp3111_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="express_controls_ezmultipli")
def express_controls_ezmultipli_fixture(client, express_controls_ezmultipli_state):
"""Mock a Express Controls EZMultiPli node."""
node = Node(client, copy.deepcopy(express_controls_ezmultipli_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_home_connect_620")
def lock_home_connect_620_fixture(client, lock_home_connect_620_state):
"""Mock a Home Connect 620 lock node."""
node = Node(client, copy.deepcopy(lock_home_connect_620_state))
client.driver.controller.nodes[node.node_id] = node
return node
| [
"noreply@github.com"
] | elupus.noreply@github.com |
d2c84c2cfac6a26d53336cc831694263d2a11349 | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/munition/component/shared_enhanced_charge_composition.py | eebfa2798168c09b5f44a3fe87ea1a262e586d88 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 475 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/munition/component/shared_enhanced_charge_composition.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
ef2f8af259d7490fa0b6e5f825bd56b216c07e92 | 597b82737635e845fd5360e191f323669af1b2ae | /08_full_django/login_registration_2/login_registration_2/wsgi.py | 663215d1fbccb9d37f487ac45f6c023f09112572 | [] | no_license | twknab/learning-python | 1bd10497fbbe181a26f2070c147cb2fed6955178 | 75b76b2a607439aa2d8db675738adf8d3b8644df | refs/heads/master | 2021-08-08T08:50:04.337490 | 2017-11-10T00:28:45 | 2017-11-10T00:28:45 | 89,213,845 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | """
WSGI config for login_registration project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "login_registration_2.settings")
application = get_wsgi_application()
| [
"natureminded@users.noreply.github.com"
] | natureminded@users.noreply.github.com |
1abac6f5607e39fabc23ddc8411528b74bb9765f | 5c94e032b2d43ac347f6383d0a8f0c03ec3a0485 | /Push/sysex.py | 6322a4228abd634fc27012bc7522b1d1ea89d65f | [] | no_license | Elton47/Ableton-MRS-10.1.13 | 997f99a51157bd2a2bd1d2dc303e76b45b1eb93d | 54bb64ba5e6be52dd6b9f87678ee3462cc224c8a | refs/heads/master | 2022-07-04T01:35:27.447979 | 2020-05-14T19:02:09 | 2020-05-14T19:02:09 | 263,990,585 | 0 | 0 | null | 2020-05-14T18:12:04 | 2020-05-14T18:12:03 | null | UTF-8 | Python | false | false | 3,655 | py | # uncompyle6 version 3.6.7
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.17 (default, Dec 23 2019, 21:25:33)
# [GCC 4.2.1 Compatible Apple LLVM 11.0.0 (clang-1100.0.33.16)]
# Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push/sysex.py
# Compiled at: 2020-01-09 15:21:34
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import group, in_range
from pushbase.touch_strip_element import TouchStripModes, TouchStripStates
START = (240, 71, 127, 21)
CLEAR_LINE1 = START + (28, 0, 0, 247)
CLEAR_LINE2 = START + (29, 0, 0, 247)
CLEAR_LINE3 = START + (30, 0, 0, 247)
CLEAR_LINE4 = START + (31, 0, 0, 247)
WRITE_LINE1 = START + (24, 0, 69, 0)
WRITE_LINE2 = START + (25, 0, 69, 0)
WRITE_LINE3 = START + (26, 0, 69, 0)
WRITE_LINE4 = START + (27, 0, 69, 0)
WELCOME_MESSAGE = START + (1, 1, 247)
GOOD_BYE_MESSAGE = START + (1, 0, 247)
ALL_PADS_SENSITIVITY_PREFIX = START + (93, 0, 32)
PAD_SENSITIVITY_PREFIX = START + (90, 0, 33)
PAD_PARAMETER_PREFIX = START + (71, 0, 9)
DEFAULT_PEAK_SAMPLING_TIME = 50
DEFAULT_AFTERTOUCH_THRESHOLD = 0
DEFAULT_AFTERTOUCH_GATE_TIME = 500
SET_AFTERTOUCH_MODE = START + (92, 0, 1)
POLY_AFTERTOUCH = (0, )
MONO_AFTERTOUCH = (1, )
MODE_CHANGE = START + (98, 0, 1)
def make_pad_parameter_message(aftertouch_threshold=DEFAULT_AFTERTOUCH_THRESHOLD, peak_sampling_time=DEFAULT_PEAK_SAMPLING_TIME, aftertouch_gate_time=DEFAULT_AFTERTOUCH_GATE_TIME):
assert 0 <= aftertouch_threshold < 128
return to_bytes(peak_sampling_time, 4) + to_bytes(aftertouch_gate_time, 4) + (aftertouch_threshold,)
def to_sysex_int(number, unused_parameter_name):
return (
number >> 12 & 15, number >> 8 & 15, number >> 4 & 15, number & 15)
CALIBRATION_SET = START + (87, 0, 20) + to_sysex_int(215, 'Preload Scale Factor') + to_sysex_int(1000, 'Recalibration Interval') + to_sysex_int(200, 'Stuck Pad Detection Threshold') + to_sysex_int(0, 'Stuck Pad NoteOff Threshold Adder') + to_sysex_int(200, 'Pad Ignore Time') + (247, )
IDENTITY_ENQUIRY = (240, 126, 0, 6, 1, 247)
IDENTITY_PREFIX = (240, 126, 0, 6, 2, 71, 21, 0, 25)
DONGLE_ENQUIRY_PREFIX = START + (80, )
DONGLE_PREFIX = START + (81, )
def make_presentation_message(application):
return START + (
96,
0,
4,
65,
application.get_major_version(),
application.get_minor_version(),
application.get_bugfix_version(),
247)
TOUCHSTRIP_MODE_TO_VALUE = [
TouchStripModes.CUSTOM_PITCHBEND,
TouchStripModes.CUSTOM_VOLUME,
TouchStripModes.CUSTOM_PAN,
TouchStripModes.CUSTOM_DISCRETE,
TouchStripModes.CUSTOM_FREE,
TouchStripModes.PITCHBEND,
TouchStripModes.VOLUME,
TouchStripModes.PAN,
TouchStripModes.DISCRETE,
TouchStripModes.MODWHEEL]
def make_touch_strip_mode_message(mode):
return START + (99, 0, 1, TOUCHSTRIP_MODE_TO_VALUE.index(mode), 247)
TOUCHSTRIP_STATE_TO_VALUE = {TouchStripStates.STATE_OFF: 0,
TouchStripStates.STATE_HALF: 1,
TouchStripStates.STATE_FULL: 3}
def make_touch_strip_light_message(state):
state = [ TOUCHSTRIP_STATE_TO_VALUE[s] for s in state ]
group_size = 3
bytes = [ reduce(lambda byte, (i, state): byte | state << 2 * i, enumerate(state_group), 0) for state_group in group(state, group_size)
]
return START + (100, 0, 8) + tuple(bytes) + (247, )
def to_bytes(number, size):
u"""
turns the given value into tuple of 4bit bytes,
ordered from most significant to least significant byte
"""
assert in_range(number, 0, 1 << size * 4)
return tuple([ number >> offset & 15 for offset in xrange((size - 1) * 4, -1, -4) ]) | [
"ahmed.emerah@icloud.com"
] | ahmed.emerah@icloud.com |
aa7b172ff58d05379109ded6c2595ea34f13b028 | 673317f52e04401fd8c5f89282b56120ad6315df | /src/pretix/plugins/ticketoutputpdf/signals.py | 4b1ba025fab0a71b28fe112addcedb642a853ef0 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | barseghyanartur/pretix | 0218a20bc0f7a1bac59aa03bc83448c33eccdbff | 05bafd0db5a9048f585cc8431b92851e15ba87eb | refs/heads/master | 2020-03-18T22:17:20.328286 | 2018-05-29T08:39:41 | 2018-05-29T08:39:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | from functools import partial
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from pretix.base.models import QuestionAnswer
from pretix.base.signals import ( # NOQA: legacy import
event_copy_data, layout_text_variables, register_data_exporters,
register_ticket_outputs,
)
from pretix.presale.style import ( # NOQA: legacy import
get_fonts, register_fonts,
)
@receiver(register_ticket_outputs, dispatch_uid="output_pdf")
def register_ticket_outputs(sender, **kwargs):
from .ticketoutput import PdfTicketOutput
return PdfTicketOutput
@receiver(register_data_exporters, dispatch_uid="dataexport_pdf")
def register_data(sender, **kwargs):
from .exporters import AllTicketsPDF
return AllTicketsPDF
def get_answer(op, order, event, question_id):
try:
a = op.answers.get(question_id=question_id)
return str(a).replace("\n", "<br/>\n")
except QuestionAnswer.DoesNotExist:
return ""
@receiver(layout_text_variables, dispatch_uid="pretix_ticketoutputpdf_layout_text_variables_questions")
def variables_from_questions(sender, *args, **kwargs):
d = {}
for q in sender.questions.all():
d['question_{}'.format(q.pk)] = {
'label': _('Question: {question}').format(question=q.question),
'editor_sample': _('<Answer: {question}>').format(question=q.question),
'evaluate': partial(get_answer, question_id=q.pk)
}
return d
@receiver(signal=event_copy_data, dispatch_uid="pretix_ticketoutputpdf_copy_data")
def event_copy_data_receiver(sender, other, question_map, **kwargs):
layout = sender.settings.get('ticketoutput_pdf_layout', as_type=list)
if not layout:
return
for o in layout:
if o['type'] == 'textarea':
if o['content'].startswith('question_'):
o['content'] = 'question_{}'.format(question_map.get(int(o['content'][9:]), 0).pk)
sender.settings.set('ticketoutput_pdf_layout', list(layout))
| [
"mail@raphaelmichel.de"
] | mail@raphaelmichel.de |
37a055a130168f8bb3f4a7ce8dc15be15511155c | ac81f7f0160571a7e601d9808d424d2c407573b6 | /0392-Is-Subsequence.py | 3c57686253a10c3e7ae98adb8e18d2e9278d9c57 | [] | no_license | nirmalnishant645/LeetCode | 61d74c152deb0e7fb991065ee91f6f7102d7bbc6 | 8bdb4583187ee181ca626063d7684dcc64c80be3 | refs/heads/master | 2022-08-14T04:10:23.110116 | 2022-07-13T06:27:18 | 2022-07-13T06:27:18 | 227,960,574 | 53 | 19 | null | 2021-01-16T17:26:30 | 2019-12-14T03:31:54 | Python | UTF-8 | Python | false | false | 1,103 | py | '''
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
Follow up:
If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?
'''
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
s_index = t_index = 0
while s_index < len(s) and t_index < len(t):
if s[s_index] == t[t_index]:
s_index += 1
t_index += 1
return s_index == len(s)
| [
"noreply@github.com"
] | nirmalnishant645.noreply@github.com |
2ddee8e373f05456f1e32c5856b3f3f9bffed03c | 90b5a86b07745561267fde367259b9f48da3ca74 | /apps/categorys/migrations/0002_auto_20180507_1612.py | 6079d5926e69d480553e84fad584aec7caa588d6 | [] | no_license | enjoy-binbin/pyblog | fb8bfc6017595412850faf20ba4ce0c8e5ee761b | 47e93d67dbfd8acf58bfb7e2e15c0c6cce32ef6e | refs/heads/master | 2021-08-11T06:38:52.959002 | 2019-02-06T13:46:50 | 2019-02-06T13:46:50 | 166,924,461 | 2 | 0 | null | 2021-08-09T20:47:43 | 2019-01-22T03:59:51 | Python | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-05-07 16:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categorys', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='desc',
field=models.TextField(verbose_name='分类描述'),
),
]
| [
"binloveplay1314@qq.com"
] | binloveplay1314@qq.com |
0d33b3146a5d87f4bd3f2197443f3de225c55f6e | 1e9ef3df6b65d53127f1858dbf625cd31874fb11 | /REST-api/rest/migrations/0002_auto_20161022_2237.py | 35388c20b14429bdd7fd8eda173603248d6e7f11 | [] | no_license | Matvey-Kuk/openbmp-as-path-planet | 9007c9b86009b4576638b53f45e35d429c1ed55f | ddd2c83861aa7540bd9eb83e75b5072f1e20d322 | refs/heads/master | 2021-01-12T14:58:48.600762 | 2016-10-23T02:16:00 | 2016-10-23T02:16:00 | 71,657,356 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-22 22:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Prefix',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prefix', models.CharField(max_length=1000)),
],
),
migrations.RenameField(
model_name='pathupdate',
old_name='name',
new_name='path',
),
migrations.AddField(
model_name='pathupdate',
name='prefix',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='paths', to='rest.Prefix'),
preserve_default=False,
),
]
| [
"motakuk@gmail.com"
] | motakuk@gmail.com |
42341246ba9ecfedf89645e2ee529b7cfc00baef | e79c4d4a633e8578ef8fbadd5937140ad1761e5b | /src/wellsfargo/models/transfers.py | a85e657729c878b01103128f49763595bd26e565 | [
"ISC"
] | permissive | thelabnyc/django-oscar-wfrs | ca9a4737e8b6575bde21705179c67212ad47df16 | 61548f074ffd7ce41e31b3bc9a571d569a8f8248 | refs/heads/master | 2023-05-26T14:41:43.366400 | 2023-05-17T17:35:20 | 2023-05-17T17:35:20 | 59,534,130 | 1 | 2 | ISC | 2022-12-08T05:06:17 | 2016-05-24T02:32:09 | Python | UTF-8 | Python | false | false | 3,002 | py | from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.functional import cached_property
from oscar.core.loading import get_model
from oscar.models.fields import NullCharField
from ..core.constants import (
TRANS_TYPE_AUTH,
TRANS_TYPES,
TRANS_STATUSES,
)
from .mixins import AccountNumberMixin
class TransferMetadata(AccountNumberMixin, models.Model):
"""
Store WFRS specific metadata about a transfer
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("Requesting User"),
related_name="wfrs_transfers",
null=True,
blank=True,
on_delete=models.CASCADE,
)
merchant_name = NullCharField(_("Merchant Name"), max_length=200)
merchant_num = NullCharField(_("Merchant Number"), max_length=200)
merchant_reference = models.CharField(max_length=128, null=True)
amount = models.DecimalField(decimal_places=2, max_digits=12)
type_code = models.CharField(
_("Transaction Type"), choices=TRANS_TYPES, max_length=2
)
ticket_number = models.CharField(
_("Ticket Number"), null=True, blank=True, max_length=12
)
financing_plan = models.ForeignKey(
"wellsfargo.FinancingPlan",
verbose_name=_("Plan Number"),
null=True,
blank=False,
on_delete=models.SET_NULL,
)
auth_number = models.CharField(
_("Authorization Number"), null=True, blank=True, max_length=6, default="000000"
)
status = models.CharField(_("Status"), choices=TRANS_STATUSES, max_length=2)
message = models.TextField(_("Message"))
disclosure = models.TextField(_("Disclosure"))
created_datetime = models.DateTimeField(_("Created"), auto_now_add=True)
modified_datetime = models.DateTimeField(_("Modified"), auto_now=True)
@classmethod
def get_by_oscar_transaction(cls, transaction, type_code=TRANS_TYPE_AUTH):
return (
cls.objects.filter(merchant_reference=transaction.reference)
.filter(type_code=type_code)
.order_by("-created_datetime")
.first()
)
@property
def type_name(self):
return dict(TRANS_TYPES).get(self.type_code)
@property
def status_name(self):
return dict(TRANS_STATUSES).get(self.status)
@property
def financing_plan_number(self):
return self.financing_plan.plan_number if self.financing_plan else None
@cached_property
def order(self):
return self.get_order()
def get_oscar_transaction(self):
Transaction = get_model("payment", "Transaction")
try:
return Transaction.objects.get(reference=self.merchant_reference)
except Transaction.DoesNotExist:
return None
def get_order(self):
transaction = self.get_oscar_transaction()
if not transaction:
return None
return transaction.source.order
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
bd1c1e2f2d69867f68e05e068ba68387f6d45788 | a7ded5d3d19a98e61a44189cffe3703f7938e0db | /xero_python/payrolluk/models/earnings_rate.py | 8ce25c8cc1f945fa7a7d80094f28ab937d8fb96d | [
"MIT"
] | permissive | liseekeralbert/xero-python | dfd1076344f763d74f81f701e32600cf88bcc7b2 | d27ab1894ecd84d2a9af0ca91583593756b21ab3 | refs/heads/master | 2022-12-16T07:41:14.331308 | 2020-09-18T17:12:35 | 2020-09-18T17:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,527 | py | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.3.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class EarningsRate(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"earnings_rate_id": "str",
"name": "str",
"earnings_type": "str",
"rate_type": "str",
"type_of_units": "str",
"current_record": "bool",
"expense_account_id": "str",
"rate_per_unit": "float",
"multiple_of_ordinary_earnings_rate": "float",
"fixed_amount": "float",
}
attribute_map = {
"earnings_rate_id": "earningsRateID",
"name": "name",
"earnings_type": "earningsType",
"rate_type": "rateType",
"type_of_units": "typeOfUnits",
"current_record": "currentRecord",
"expense_account_id": "expenseAccountID",
"rate_per_unit": "ratePerUnit",
"multiple_of_ordinary_earnings_rate": "multipleOfOrdinaryEarningsRate",
"fixed_amount": "fixedAmount",
}
def __init__(
self,
earnings_rate_id=None,
name=None,
earnings_type=None,
rate_type=None,
type_of_units=None,
current_record=None,
expense_account_id=None,
rate_per_unit=None,
multiple_of_ordinary_earnings_rate=None,
fixed_amount=None,
): # noqa: E501
"""EarningsRate - a model defined in OpenAPI""" # noqa: E501
self._earnings_rate_id = None
self._name = None
self._earnings_type = None
self._rate_type = None
self._type_of_units = None
self._current_record = None
self._expense_account_id = None
self._rate_per_unit = None
self._multiple_of_ordinary_earnings_rate = None
self._fixed_amount = None
self.discriminator = None
if earnings_rate_id is not None:
self.earnings_rate_id = earnings_rate_id
self.name = name
self.earnings_type = earnings_type
self.rate_type = rate_type
self.type_of_units = type_of_units
if current_record is not None:
self.current_record = current_record
self.expense_account_id = expense_account_id
if rate_per_unit is not None:
self.rate_per_unit = rate_per_unit
if multiple_of_ordinary_earnings_rate is not None:
self.multiple_of_ordinary_earnings_rate = multiple_of_ordinary_earnings_rate
if fixed_amount is not None:
self.fixed_amount = fixed_amount
@property
def earnings_rate_id(self):
"""Gets the earnings_rate_id of this EarningsRate. # noqa: E501
Xero unique identifier for an earning rate # noqa: E501
:return: The earnings_rate_id of this EarningsRate. # noqa: E501
:rtype: str
"""
return self._earnings_rate_id
@earnings_rate_id.setter
def earnings_rate_id(self, earnings_rate_id):
"""Sets the earnings_rate_id of this EarningsRate.
Xero unique identifier for an earning rate # noqa: E501
:param earnings_rate_id: The earnings_rate_id of this EarningsRate. # noqa: E501
:type: str
"""
self._earnings_rate_id = earnings_rate_id
@property
def name(self):
"""Gets the name of this EarningsRate. # noqa: E501
Name of the earning rate # noqa: E501
:return: The name of this EarningsRate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EarningsRate.
Name of the earning rate # noqa: E501
:param name: The name of this EarningsRate. # noqa: E501
:type: str
"""
if name is None:
raise ValueError(
"Invalid value for `name`, must not be `None`"
) # noqa: E501
self._name = name
@property
def earnings_type(self):
"""Gets the earnings_type of this EarningsRate. # noqa: E501
Indicates how an employee will be paid when taking this type of earning # noqa: E501
:return: The earnings_type of this EarningsRate. # noqa: E501
:rtype: str
"""
return self._earnings_type
@earnings_type.setter
def earnings_type(self, earnings_type):
"""Sets the earnings_type of this EarningsRate.
Indicates how an employee will be paid when taking this type of earning # noqa: E501
:param earnings_type: The earnings_type of this EarningsRate. # noqa: E501
:type: str
"""
if earnings_type is None:
raise ValueError(
"Invalid value for `earnings_type`, must not be `None`"
) # noqa: E501
allowed_values = [
"Allowance",
"Backpay",
"Bonus",
"Commission",
"LumpSum",
"OtherEarnings",
"OvertimeEarnings",
"RegularEarnings",
"StatutoryAdoptionPay",
"StatutoryMaternityPay",
"StatutoryPaternityPay",
"StatutorySharedParentalPay",
"StatutorySickPay",
"Tips(Direct)",
"Tips(Non-Direct)",
"None",
] # noqa: E501
if earnings_type not in allowed_values:
raise ValueError(
"Invalid value for `earnings_type` ({0}), must be one of {1}".format( # noqa: E501
earnings_type, allowed_values
)
)
self._earnings_type = earnings_type
@property
def rate_type(self):
"""Gets the rate_type of this EarningsRate. # noqa: E501
Indicates the type of the earning rate # noqa: E501
:return: The rate_type of this EarningsRate. # noqa: E501
:rtype: str
"""
return self._rate_type
@rate_type.setter
def rate_type(self, rate_type):
"""Sets the rate_type of this EarningsRate.
Indicates the type of the earning rate # noqa: E501
:param rate_type: The rate_type of this EarningsRate. # noqa: E501
:type: str
"""
if rate_type is None:
raise ValueError(
"Invalid value for `rate_type`, must not be `None`"
) # noqa: E501
allowed_values = [
"RatePerUnit",
"MultipleOfOrdinaryEarningsRate",
"FixedAmount",
"None",
] # noqa: E501
if rate_type not in allowed_values:
raise ValueError(
"Invalid value for `rate_type` ({0}), must be one of {1}".format( # noqa: E501
rate_type, allowed_values
)
)
self._rate_type = rate_type
@property
def type_of_units(self):
"""Gets the type_of_units of this EarningsRate. # noqa: E501
The type of units used to record earnings # noqa: E501
:return: The type_of_units of this EarningsRate. # noqa: E501
:rtype: str
"""
return self._type_of_units
@type_of_units.setter
def type_of_units(self, type_of_units):
"""Sets the type_of_units of this EarningsRate.
The type of units used to record earnings # noqa: E501
:param type_of_units: The type_of_units of this EarningsRate. # noqa: E501
:type: str
"""
if type_of_units is None:
raise ValueError(
"Invalid value for `type_of_units`, must not be `None`"
) # noqa: E501
self._type_of_units = type_of_units
@property
def current_record(self):
"""Gets the current_record of this EarningsRate. # noqa: E501
Indicates whether an earning type is active # noqa: E501
:return: The current_record of this EarningsRate. # noqa: E501
:rtype: bool
"""
return self._current_record
@current_record.setter
def current_record(self, current_record):
"""Sets the current_record of this EarningsRate.
Indicates whether an earning type is active # noqa: E501
:param current_record: The current_record of this EarningsRate. # noqa: E501
:type: bool
"""
self._current_record = current_record
@property
def expense_account_id(self):
"""Gets the expense_account_id of this EarningsRate. # noqa: E501
The account that will be used for the earnings rate # noqa: E501
:return: The expense_account_id of this EarningsRate. # noqa: E501
:rtype: str
"""
return self._expense_account_id
@expense_account_id.setter
def expense_account_id(self, expense_account_id):
"""Sets the expense_account_id of this EarningsRate.
The account that will be used for the earnings rate # noqa: E501
:param expense_account_id: The expense_account_id of this EarningsRate. # noqa: E501
:type: str
"""
if expense_account_id is None:
raise ValueError(
"Invalid value for `expense_account_id`, must not be `None`"
) # noqa: E501
self._expense_account_id = expense_account_id
@property
def rate_per_unit(self):
"""Gets the rate_per_unit of this EarningsRate. # noqa: E501
Default rate per unit (optional). Only applicable if RateType is RatePerUnit # noqa: E501
:return: The rate_per_unit of this EarningsRate. # noqa: E501
:rtype: float
"""
return self._rate_per_unit
@rate_per_unit.setter
def rate_per_unit(self, rate_per_unit):
"""Sets the rate_per_unit of this EarningsRate.
Default rate per unit (optional). Only applicable if RateType is RatePerUnit # noqa: E501
:param rate_per_unit: The rate_per_unit of this EarningsRate. # noqa: E501
:type: float
"""
self._rate_per_unit = rate_per_unit
@property
def multiple_of_ordinary_earnings_rate(self):
"""Gets the multiple_of_ordinary_earnings_rate of this EarningsRate. # noqa: E501
This is the multiplier used to calculate the rate per unit, based on the employee’s ordinary earnings rate. For example, for time and a half enter 1.5. Only applicable if RateType is MultipleOfOrdinaryEarningsRate # noqa: E501
:return: The multiple_of_ordinary_earnings_rate of this EarningsRate. # noqa: E501
:rtype: float
"""
return self._multiple_of_ordinary_earnings_rate
@multiple_of_ordinary_earnings_rate.setter
def multiple_of_ordinary_earnings_rate(self, multiple_of_ordinary_earnings_rate):
"""Sets the multiple_of_ordinary_earnings_rate of this EarningsRate.
This is the multiplier used to calculate the rate per unit, based on the employee’s ordinary earnings rate. For example, for time and a half enter 1.5. Only applicable if RateType is MultipleOfOrdinaryEarningsRate # noqa: E501
:param multiple_of_ordinary_earnings_rate: The multiple_of_ordinary_earnings_rate of this EarningsRate. # noqa: E501
:type: float
"""
self._multiple_of_ordinary_earnings_rate = multiple_of_ordinary_earnings_rate
@property
def fixed_amount(self):
"""Gets the fixed_amount of this EarningsRate. # noqa: E501
Optional Fixed Rate Amount. Applicable for FixedAmount Rate # noqa: E501
:return: The fixed_amount of this EarningsRate. # noqa: E501
:rtype: float
"""
return self._fixed_amount
@fixed_amount.setter
def fixed_amount(self, fixed_amount):
"""Sets the fixed_amount of this EarningsRate.
Optional Fixed Rate Amount. Applicable for FixedAmount Rate # noqa: E501
:param fixed_amount: The fixed_amount of this EarningsRate. # noqa: E501
:type: float
"""
self._fixed_amount = fixed_amount
| [
"sid.maestre@gmail.com"
] | sid.maestre@gmail.com |
6a702164df517737cd177ab547688f038cb51aa1 | 57ddfddd1e11db649536a8ed6e19bf5312d82d71 | /AtCoder/ABC0/ABC038/ABC038-C.py | 5004183698b5aec8e51581e7b45a0868ffd3856f | [] | no_license | pgDora56/ProgrammingContest | f9e7f4bb77714dc5088c2287e641c0aa760d0f04 | fdf1ac5d1ad655c73208d98712110a3896b1683d | refs/heads/master | 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | n = int(input())
a = list(map(int, input().split()))
a.append(0)
prev = 10**6
count = 0
total = n
for v in a:
if prev >= v:
total += int(count*(count+1)/2)
count = 0
else:
count += 1
prev = v
print(total) | [
"doradora.prog@gmail.com"
] | doradora.prog@gmail.com |
a77641cc9909f854dbf944e75cc53a83aaf49355 | c6db8eccba0f863e464fa23e7c8c5f27d6da277b | /CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/documentation/_24_tables/_24_8_scrolling/__init__.py | 89ee147b6740fdb7a946347181501d484ef04a64 | [] | no_license | corridda/Studies | ceabb94f48bd03a31e4414e9af841d6a9b007cf9 | 1aacf52f2762e05a416c9e73ebe20794cb5d21cf | refs/heads/master | 2023-02-05T18:51:04.217528 | 2023-01-28T09:21:03 | 2023-01-28T09:21:03 | 216,492,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | """Scrolling
https://dearpygui.readthedocs.io/en/latest/documentation/tables.html#scrolling
""" | [
"corridda@yandex.ru"
] | corridda@yandex.ru |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.