blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29a7f8fd4f7d2a2ab86528e4452628e868896892
|
46b9f8dd70d2da13ceadfc3ca5ac6fb0ec82acf1
|
/examples/solar_cell/log.py
|
ced1c1cb910fc96c1a639a06053c97d3b4c77d03
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
misaelnieto/devsim
|
4a2d16705c0f669af4ab4b8cc68ff8957607388c
|
1d614ba92c4abea7734df5ee7ecd81e8bca3b12c
|
refs/heads/master
| 2020-03-15T08:44:11.854865
| 2018-08-13T19:58:25
| 2018-08-13T19:58:25
| 123,771,828
| 0
| 0
|
Apache-2.0
| 2018-03-04T08:51:41
| 2018-03-04T08:51:41
| null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
def infog(msg):
print(OKGREEN + msg + ENDC)
def info(msg):
print(OKBLUE + msg + ENDC)
def warn(msg):
print(WARNING + msg + ENDC)
def err(msg):
print(FAIL + msg + ENDC)
|
[
"nnieto@noenieto.com"
] |
nnieto@noenieto.com
|
5cbbb3255cb564286de601d5a17ec543b88b7f58
|
b2b03fe08e5b97f2a53852538c738aa60677a2af
|
/python/tests/unit/test_maasdriver_vlan.py
|
6094efea069d8060b8ca4c388d31aa163f90046c
|
[
"Apache-2.0"
] |
permissive
|
spyd3rweb/drydock
|
8685b82f340f590f75a3893244486754f77c048f
|
9d1c65dc87807b694d00564bb9fa4fdd25297dc6
|
refs/heads/master
| 2020-09-02T09:51:42.220866
| 2020-04-05T18:53:10
| 2020-04-05T18:53:10
| 219,194,440
| 0
| 0
|
Apache-2.0
| 2019-11-17T05:41:12
| 2019-11-02T18:12:00
| null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for the maasdriver node_results routine.'''
import pytest
from drydock_provisioner.drivers.node.maasdriver.models.vlan import Vlan
from drydock_provisioner.drivers.node.maasdriver.errors import RackControllerConflict
class TestMaasVlan():
def test_add_rack_controller(self, mocker):
'''Test vlan model method for setting a managing rack controller.'''
# A object to return that looks like a requests response
# object wrapping a MAAS API response
class MockedResponse():
status_code = 200
vlan_fields = {'name': 'test', 'dhcp_on': True, 'mtu': 1500}
primary_rack = "asdf79"
secondary_rack = "asdf80"
tertiary_rack = "asdf81"
api_client = mocker.MagicMock()
api_client.get.return_value = MockedResponse()
vlan_obj = Vlan(api_client, **vlan_fields)
vlan_obj.add_rack_controller(primary_rack)
assert vlan_obj.primary_rack == primary_rack
vlan_obj.add_rack_controller(secondary_rack)
assert vlan_obj.secondary_rack == secondary_rack
with pytest.raises(RackControllerConflict):
vlan_obj.add_rack_controller(tertiary_rack)
|
[
"sh8121@att.com"
] |
sh8121@att.com
|
8122cd63318c83fba3251e64209f9f3899bd2f3b
|
35942792e6dbec7862dd7bbc1aaec2b76ec0bc85
|
/ABC/C/c110.py
|
433dbf7458b7562b7e2b7332698102a7372d8afb
|
[] |
no_license
|
hokekiyoo/AtCoder
|
97f870421b513a5366681d1e05ba1e5038dfa077
|
2be1558c71a3ad8e1852645df050bca494b3afca
|
refs/heads/master
| 2020-04-27T17:51:11.337193
| 2019-10-28T11:42:47
| 2019-10-28T11:42:47
| 174,541,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from collections import Counter
S = input()
T = input()
import numpy as np
S_num = dict(Counter(S)).values()
T_num = dict(Counter(T)).values()
Ss = np.array([s for s in S_num])
Ts = np.array([s for s in T_num])
if len(Ss) != len(Ts):
print("No")
else:
if all(np.sort(Ss)==np.sort(Ts)):
print("Yes")
else:
print("No")
## 別解?
"""
普通にsortedとできるっぽい。わざわざnumpy配列にしなくてOK
from collections import Counter
s, t = input(), input()
c1 = sorted(Counter(s).values())
c2 = sorted(Counter(t).values())
print('Yes') if c1 == c2 else print('No')
"""
|
[
"imslotter1@gmail.com"
] |
imslotter1@gmail.com
|
5706b504bbfd62978d8f2c01925c25b21fbda95d
|
8a72dc41c3628e44c777981d4a4735643084ed95
|
/W5/sales_file_io.py
|
4f4304f81479d640fdaf5e725dfe7a68748130d7
|
[] |
no_license
|
kakalimahapatra/PythonExplore
|
9ed0c9a2d290931fd3882e3d586a9fc026c09a92
|
61ed3d9796e2f1bd0f958893f5d2f6e9762f7a66
|
refs/heads/master
| 2020-04-22T09:11:50.318057
| 2015-05-09T01:47:08
| 2015-05-09T01:47:08
| 34,756,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
# Name - Kakali Mahapatra
# File I/O
def main():
#reading the file SALES.txt
grandtotal=0
readfile = open('SALES.txt','r')
print("writing these total values into the TOTAL file")
print("Please check the TOTAL.txt file for the result")
outfile = open('TOTAL.txt','w')
print("name", " ","total", file=outfile)
for i in range(16):
data = readfile.readline()
stringdata = data.split()
#print(stringdata)
name = stringdata[0]
print(name)
sale1 = eval(stringdata[1])
sale2 = eval(stringdata[2])
sale3 = eval(stringdata[3])
sale4 = eval(stringdata[4])
total = sale1+sale2+sale3+sale4
print(total)
grandtotal = grandtotal+total
print(name," ", total, file=outfile)
print("Total", " ",grandtotal, file=outfile)
main()
|
[
"getkakali@gmail.com"
] |
getkakali@gmail.com
|
515505b793747253c2090d1532f184922247092c
|
5494fe26fdaa2e69d2b3f5e298383fbea1044f88
|
/moveit_ros/planning_interface/test/python_move_group_ns.py
|
88efc06858d17d9c63c874f793b3b32ff50f71d6
|
[
"BSD-3-Clause"
] |
permissive
|
mamoll/moveit
|
6b70e41ac5ff28acfee80c48c4d4b070729d3b69
|
353e5dec62ce1060e920c527cf17309cf516831b
|
refs/heads/kinetic-devel
| 2023-03-10T18:17:15.693036
| 2019-09-12T08:44:09
| 2019-09-12T08:44:09
| 212,438,759
| 2
| 0
|
BSD-3-Clause
| 2019-10-02T20:50:25
| 2019-10-02T20:50:24
| null |
UTF-8
|
Python
| false
| false
| 3,977
|
py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: William Baker
#
# This test is used to ensure planning with a MoveGroupInterface is
# possbile if the robot's move_group node is in a different namespace
import unittest
import numpy as np
import rospy
import rostest
import os
from moveit_ros_planning_interface._moveit_move_group_interface import MoveGroupInterface
class PythonMoveGroupNsTest(unittest.TestCase):
PLANNING_GROUP = "manipulator"
PLANNING_NS = "test_ns/"
@classmethod
def setUpClass(self):
self.group = MoveGroupInterface(self.PLANNING_GROUP, "%srobot_description"%self.PLANNING_NS, self.PLANNING_NS)
@classmethod
def tearDown(self):
pass
def check_target_setting(self, expect, *args):
if len(args) == 0:
args = [expect]
self.group.set_joint_value_target(*args)
res = self.group.get_joint_value_target()
self.assertTrue(np.all(np.asarray(res) == np.asarray(expect)),
"Setting failed for %s, values: %s" % (type(args[0]), res))
def test_target_setting(self):
n = self.group.get_variable_count()
self.check_target_setting([0.1] * n)
self.check_target_setting((0.2,) * n)
self.check_target_setting(np.zeros(n))
self.check_target_setting([0.3] * n, {name: 0.3 for name in self.group.get_active_joints()})
self.check_target_setting([0.5] + [0.3]*(n-1), "joint_1", 0.5)
def plan(self, target):
self.group.set_joint_value_target(target)
return self.group.compute_plan()
def test_validation(self):
current = np.asarray(self.group.get_current_joint_values())
plan1 = self.plan(current + 0.2)
plan2 = self.plan(current + 0.2)
# first plan should execute
self.assertTrue(self.group.execute(plan1))
# second plan should be invalid now (due to modified start point) and rejected
self.assertFalse(self.group.execute(plan2))
# newly planned trajectory should execute again
plan3 = self.plan(current)
self.assertTrue(self.group.execute(plan3))
if __name__ == '__main__':
PKGNAME = 'moveit_ros_planning_interface'
NODENAME = 'moveit_test_python_move_group'
rospy.init_node(NODENAME)
rostest.rosrun(PKGNAME, NODENAME, PythonMoveGroupNsTest)
# suppress cleanup segfault
os._exit(0)
|
[
"dave@picknik.ai"
] |
dave@picknik.ai
|
a38cb63d4933a71216ca298c2595eaceb6005c82
|
d32fd3dce3d7a3f6b3c0c47d21e9d21e78e140e1
|
/day1/ex6_math.py
|
1cf29b6d6ff36c00318ac3a5633a929a55d1a004
|
[
"Apache-2.0"
] |
permissive
|
ktbyers/pynet_ons
|
0fe77d14d5e1f119396c1f72d98eaeb56849c2ab
|
7e84060f547ee8346a6ecb2db68a89d0ddf17aa6
|
refs/heads/master
| 2021-01-17T17:30:58.832361
| 2016-10-05T23:23:02
| 2016-10-05T23:23:02
| 63,434,341
| 2
| 13
| null | 2016-08-01T19:02:47
| 2016-07-15T16:01:39
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
#!/usr/bin/env python
num1 = int(raw_input("Enter first number: "))
num2 = int(raw_input("Enter second number: "))
print "\n\nSum: {}".format(num1 + num2)
print "Difference: {}".format(num1 - num2)
print "Product: {}".format(num1 * num2)
print "Division: {:.2f}".format(num1/float(num2))
print
|
[
"ktbyers@twb-tech.com"
] |
ktbyers@twb-tech.com
|
3bdf1c017d00a66aed4a8b527f024a2fd8af0a95
|
552beb5d4f118e4dbe9484b3a7615d9911c03fc3
|
/check.py
|
c3e81bd4341511c54a89c738ec81e71cf29c772c
|
[] |
no_license
|
WangHaoming/HLS
|
684d489a1796cd4e5c565c211b86bec0dd48ea20
|
297eef667b551a52ef1ab081e236ce23a76f242c
|
refs/heads/master
| 2021-01-18T11:08:38.997236
| 2015-06-04T09:23:11
| 2015-06-04T09:23:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
'''
Created on 2013-4-24
@author: xweiyan
'''
import fileinput
import glob
import hls
import os
import re
import string
import sys
def readfiles(path):
for line in fileinput.input(glob.glob(path + "/*.txt")):
if fileinput.isfirstline():
sys.stderr.write("--reading %s--\n" % fileinput.filename())
sys.stdout.write(str(fileinput.lineno()) + " " + string.lower(line))
def checkfile(file):
content = open(file).read().strip()
data = hls.M3U8(content)
print "playlist length is " + str(len(data.segments))
for no, segment in enumerate(data.segments):
m = re.match("^#EXTINF\:\d{1,2}(\.\d+)?\,\\n(\w{15})-(\d{2})-(\d+).ts$", str(segment))
if str(no) != m.group(4):
print "the record file: "+ file +"is not regular"
print "the error line is " + m.group(4)
print "the record file:" +file +" is regular"
def checkTxt(path):
for file in os.listdir(path):
if os.path.splitext(file)[1] == ".txt":
checkfile(os.path.join(path,file))
if __name__ == "__main__":
# readfiles(r"c:\stream\4")
checkfile(r"c:\Users\xweiyan\Desktop\01.txt")
# checkfile(r"c:\stream\4\04.txt")
|
[
"weiyang@wistronits.com"
] |
weiyang@wistronits.com
|
d1fda44ec807b70aafa3fff5927fd9ecf8ae40a6
|
a4fa45a00051bdb4c0f182bbe29d154170c3af9b
|
/report_printing.py
|
420927ae82e8ab5d514f1dea5f3e5203db14ff8f
|
[] |
no_license
|
yanghaihuo/diabetes_prediction_pyqt5
|
82f55442d4dafd8b9e6aa4ad48f0db98b64ad345
|
bcbd14c3941f662e79cbdef5cdf155c567bec96f
|
refs/heads/master
| 2022-11-18T14:32:50.495408
| 2020-07-21T07:53:31
| 2020-07-21T07:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
from jinja2 import Environment, FileSystemLoader
from weasyprint import HTML
#['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']
def print_report(fname,arr,res):
res=round(res[0][0][0]*100,2)
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("template.html")
template_vars = {"name" : fname,
"age": arr[7],
"preg":arr[0],
"glucose":arr[1],
"bp":arr[2],
"sk_thick":arr[3],
"insu":arr[4],
"bmi":arr[5],
"dpi":arr[6],
"result":res
}
html_out = template.render(template_vars)
fname="reports/"+fname+"_report.pdf"
HTML(string=html_out).write_pdf(fname)
|
[
"nitishkumarchauhan1996@gmail.com"
] |
nitishkumarchauhan1996@gmail.com
|
546a1923ba578b58a263e4e4a8c6151cc1b740ea
|
84b04d0787cf4cca686f54dcb4ca8eb0a480bdd5
|
/src/plonetheme/kasteeldehaar/tests/test_robot.py
|
637d955684aa80abb6b0c6a48ffd0cdc2dd7c458
|
[] |
no_license
|
plone-ve/plonetheme.kasteeldehaar
|
357c1399d2d14d1b07cbd507521af4bfd4182897
|
136e0304b935f3ffb085824ed85d7a71a71924d4
|
refs/heads/master
| 2023-08-25T15:10:11.249942
| 2016-11-04T16:58:00
| 2016-11-04T16:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
# -*- coding: utf-8 -*-
from plonetheme.kasteeldehaar.testing import plonetheme.kasteeldehaar_ACCEPTANCE_TESTING # noqa
from plone.app.testing import ROBOT_TEST_LEVEL
from plone.testing import layered
import os
import robotsuite
import unittest
def test_suite():
suite = unittest.TestSuite()
current_dir = os.path.abspath(os.path.dirname(__file__))
robot_dir = os.path.join(current_dir, 'robot')
robot_tests = [
os.path.join('robot', doc) for doc in os.listdir(robot_dir)
if doc.endswith('.robot') and doc.startswith('test_')
]
for robot_test in robot_tests:
robottestsuite = robotsuite.RobotTestSuite(robot_test)
robottestsuite.level = ROBOT_TEST_LEVEL
suite.addTests([
layered(
robottestsuite,
layer=plonetheme.kasteeldehaar_ACCEPTANCE_TESTING
),
])
return suite
|
[
"andreslb1@gmail.com"
] |
andreslb1@gmail.com
|
d65289fb5d30eaac8f86b4cc9a51781e50c8359d
|
8945ff1d0a2ebde75ccb0906c29c1c92afd3419f
|
/readparqutespark.py
|
14e6f20f5efb3cd4ecad47049514d4532e6159e0
|
[] |
no_license
|
nikhilpatil19/assignment
|
d60787fe554994ca54e0011da7b0706b19ab8954
|
d031fb3804874fd1f7967e126043455b37e85ed9
|
refs/heads/master
| 2020-05-13T22:25:26.765548
| 2019-04-16T11:04:18
| 2019-04-16T11:04:18
| 181,667,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
sc = SparkSession.builder \
.master("local") \
.enableHiveSupport() \
.appName("parquets") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sqlContext = HiveContext(sc)
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# to read parquet file
df1 = sqlContext.read.parquet('file:////home/mapr/Nikhil/userdata1.parquet')
df2 = sqlContext.read.parquet('file:////home/mapr/Nikhil/userdata2.parquet')
df3 = sqlContext.read.parquet('file:////home/mapr/Nikhil/userdata3.parquet')
df4 = sqlContext.read.parquet('file:////home/mapr/Nikhil/userdata4.parquet')
df5 = sqlContext.read.parquet('file:////home/mapr/Nikhil/userdata5.parquet')
join = df1.union(df2)
join = join.union(df3)
join = join.union(df4)
join = join.union(df5)
table1 = join.createOrReplaceTempView("sample_table")
table1 = sc.sql("create table hive_t_1 as select * from sample_table")
table1.write.option("header", "true").csv('file:////home/mapr/Nikhil/out.csv')
print('All things done right')
|
[
"nikhil.patil@emtecinc.com"
] |
nikhil.patil@emtecinc.com
|
912198bc71c0aca72cd66b68a560dd698503898a
|
72200c201fcc6d2ebed915e902ecca35469b904f
|
/server/ca/models.py
|
4584a48b6628705110851e08974e633e5e46d84a
|
[
"MIT"
] |
permissive
|
knaou/mysign
|
a86f68c78b2b84f163af5afb1f40ca1cdb95187a
|
83a2748f2e3a69bc8741bc6a4ee2bb508a8aadba
|
refs/heads/master
| 2021-11-03T05:31:43.581688
| 2020-05-28T16:05:47
| 2020-05-28T16:05:47
| 176,100,477
| 0
| 0
|
MIT
| 2021-09-01T01:57:14
| 2019-03-17T12:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
from django.db import models
class CertificateAuthority(models.Model):
name = models.CharField(max_length=255, null=False)
description = models.TextField(null=False, blank=True, default='')
next_serial = models.IntegerField(null=False)
key_pem = models.TextField(null=False)
csr_pem = models.TextField(null=False)
cert_pem = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Certificate(models.Model):
certificate_authority = models.ForeignKey(CertificateAuthority, related_name='certificates', on_delete=models.CASCADE)
name = models.CharField(max_length=255, null=False)
description = models.TextField(null=False, blank=True, default='')
serial = models.IntegerField(null=False)
key_pem = models.TextField(null=False)
csr_pem = models.TextField(null=False)
cert_pem = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('certificate_authority', 'serial',)
|
[
"monaou@gmail.com"
] |
monaou@gmail.com
|
9203c91286bc5ec6669b399df3679ab852e02685
|
35f102cb79a609770ee4d2bccd4c9b79d4379295
|
/setup.py
|
90c54bd55e7322d5ef4c3507cd8326528359c88d
|
[
"MIT"
] |
permissive
|
ahitrin/SiebenApp
|
b404aab2fb248db4a8bec727b25c73aa574e23e8
|
07782976bddecb1e3ba1ca2ab03ccbad024beda1
|
refs/heads/master
| 2023-08-16T16:25:32.409525
| 2023-08-14T06:53:07
| 2023-08-14T07:26:14
| 74,811,457
| 18
| 1
|
MIT
| 2023-09-11T06:08:01
| 2016-11-26T07:28:17
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
#!/usr/bin/env python3
from distutils.core import setup
setup(
name='SiebenApp',
version='0.12',
description='An experimental dependency-aware goal manager',
author='Andrey Hitrin',
author_email='andrey.hitrin@gmail.com',
url='https://github.com/ahitrin/SiebenApp',
packages=['siebenapp'],
scripts=['clieben', 'sieben', 'sieben-manage'],
)
|
[
"andrey.hitrin@gmail.com"
] |
andrey.hitrin@gmail.com
|
7cfb4549c04fd2a922479184e4f8fa07cafb908d
|
56417f91c7a66ba5ba77380aa8f1433299f0fadc
|
/ranker/asf.py
|
2d305c1732594750aa95b60fbf19bdcd105bfaa8
|
[] |
no_license
|
zbanks/rankazoo
|
ef6a1b7836c7c9acab9eb806d85ba4a068d75102
|
a6972c7bf48c559e846d3adaa4cde8bac088fa88
|
refs/heads/master
| 2021-01-10T20:14:12.301878
| 2012-12-08T07:53:13
| 2012-12-08T07:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
from django.db.models import CharField
from django.utils.encoding import force_unicode
from django.template.defaultfilters import slugify
def _get_field(instance, name):
try:
return getattr(instance, name)
except AttributeError:
raise ValueError("Model %s has no field '%s'" % \
(instance.__class__.__name__, name))
class AutoSlugField(CharField):
""" A SlugField that automatically populate itself using the value of another
field.
In addition to CharField's usual parameters you can specify:
populate_from (mandatory): the name of the field to be used for the slug
creation. ValueError will be raised at the
object save() time if the field does not exist.
slugify_func: the function to apply on the value of the field.
If unspecified django.template.defaultfilters.slugify will be
used.
append_field: the name of a field that will be appended to the slug, or
None. ValueError will be raised at the object save() time if
the field does not exist.
prepend_field: the name of a field that will be prepended to the slug, or
None. ValueError will be raised at the object save() time if
the field does not exist.
field_separator: the separator between the slug and the {pre, ap}pended
fields. The default value is u'-'.
Unless explicitly set otherwise, the field will be created with the
'editable' and 'db_index' parameters set respectively to False and
True. """
def __init__(self, *args, **kwargs):
# Set editable=False if not explicitly set
if 'editable' not in kwargs:
kwargs['editable'] = False
# Set db_index=True if not explicitly set
if 'db_index' not in kwargs:
kwargs['db_index'] = True
populate_from = kwargs.pop('populate_from', None)
slugify_func = kwargs.pop('slugify_func', slugify)
append_field = kwargs.pop('append_field', None)
prepend_field = kwargs.pop('prepend_field', None)
field_separator = kwargs.pop('field_separator', u'-')
if populate_from is None:
raise ValueError("missing 'populate_from' argument")
else:
self._populate_from = populate_from
self._slugify_func = slugify_func
self._prepend_field = prepend_field
self._append_field = append_field
self._field_separator = field_separator
super(AutoSlugField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
populate_from = _get_field(model_instance, self._populate_from)
make_slug = self._slugify_func
chunks = list()
if self._prepend_field is not None:
prepend_field = _get_field(model_instance, self._prepend_field)
# Prepend the field's value only if it is not empty
if prepend_field:
chunks.append(force_unicode(prepend_field))
chunks.append(make_slug(populate_from))
if self._append_field is not None:
append_field = _get_field(model_instance, self._append_field)
# Append the field's value only if it is not empty
if append_field:
chunks.append(force_unicode(append_field))
value = self._field_separator.join(chunks)
setattr(model_instance, self.attname, value)
return value
def get_internal_type(self):
return 'SlugField'
|
[
"zbanks@mit.edu"
] |
zbanks@mit.edu
|
9f900007269828e38296a9cb405a59c496d3de71
|
2479f74eec86d34c65f7c3b132346ca303e3cc23
|
/TrueHome/TrueHome/wsgi.py
|
41a3f3ce490255137f3a52f83adcb3749fc0b377
|
[
"MIT"
] |
permissive
|
AletzB/true_home_test
|
2cdfa11161bc5258221e2d7ea2c0449ba73622ef
|
fc03fdf1ef09fe6283a4cd706c06e0a5a413a36d
|
refs/heads/main
| 2023-03-05T15:26:54.785018
| 2021-02-16T22:33:01
| 2021-02-16T22:33:01
| 339,537,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for TrueHome project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TrueHome.settings')
application = get_wsgi_application()
|
[
"alexanderbermudez@usantotomas.edu.co"
] |
alexanderbermudez@usantotomas.edu.co
|
d64822947bd318ef999a252c8a3923e1a6f107a8
|
f654f5f07dd8109c0ee31ba89dd4804e6b288343
|
/src/programy/utils/oob/email.py
|
7ce0678ee5141d87a71f7ce51bfe85ecb32f96c5
|
[
"MIT"
] |
permissive
|
sprinteroz/program-y
|
3d1f5f28e4f3be770705d4bef15410b8b78f19da
|
454c6bde225dce7c3fb01c549d46249248caf7b5
|
refs/heads/master
| 2021-01-19T16:05:25.636700
| 2017-08-22T03:56:33
| 2017-08-22T03:56:33
| 100,986,551
| 1
| 0
| null | 2017-08-21T19:43:43
| 2017-08-21T19:43:43
| null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
import logging
import xml.etree.ElementTree as ET
from programy.utils.oob.oob import OutOfBandProcessor
"""
<oob>
<email>
<to>recipient</to>
<subject>subject text</subject>
<body>body text</body>
</email>
</oob>
"""
class EmailOutOfBandProcessor(OutOfBandProcessor):
def __init__(self):
OutOfBandProcessor.__init__(self)
self._to = None
self._subject = None
self._body = None
def parse_oob_xml(self, oob: ET.Element):
for child in oob:
if child.tag == 'to':
self._to = child.text
elif child.tag == 'subject':
self._subject = child.text
elif child.tag == 'body':
self._body = child.text
else:
logging.error ("Unknown child element [%s] in email oob"%(child.tag))
if self._to is not None and \
self._subject is not None and \
self._body is not None:
return True
logging.error("Invalid email oob command")
return False
def execute_oob_command(self, bot, clientid):
logging.info("EmailOutOfBandProcessor: Emailing=%s", self._to)
return "EMAIL"
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
910d89742125c89f6d83d759cdbd1a70be21888b
|
b29589f95734682663ae6cd40ab00eb0a94b6d87
|
/longwave/lblnew_20160916/study__g1_threshold/h2o/conc_None/band02_wn_340_540/nv_1000/dv_0.001/ng_11/g_ascending_k_descending/refPTs_P_1_T_250__P_10_T_250__P_500_T_250/ng_refs_2__3__6/ng_adju_-2__-2__0/getabsth_auto__auto__auto/absth_dlogN_uniform__dlogN_uniform__dlogN_uniform/klin_none/atmpro_saw/wgt_k_1/wgt_0.6_0.6__0.6_0.6_0.6__0.6_0.6_0.6_0.6_0.6_0.6/wgt_flux_1/w_diffuse_1.66_1.66__1.8_1.8_1.8__1.8_1.66_1.45_1.45_1.45_1.45/option_compute_ktable_0/option_compute_btable_0/crd_d5931a1/param.py
|
3f670efc4d7a78f1fba5f8514ea4cd44e2aa502f
|
[] |
no_license
|
qAp/offline_radiation_notebooks
|
02c2b2414ef1410f235776001a668f7df0b9f1cf
|
44fb62391c27e4e314ad68ae3e91f6111b3172c5
|
refs/heads/master
| 2020-04-15T14:31:34.675322
| 2019-07-08T04:45:54
| 2019-07-08T04:45:54
| 43,118,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
DIR_FORTRAN = '/chia_cluster/home/jackyu/radiation/crd/LW/examples/separate_g_groups/study__lblnew_g1_threshold/h2o/conc_None/band02_wn_340_540/nv_1000/dv_0.001/ng_11/g_ascending_k_descending/refPTs_P_1_T_250__P_10_T_250__P_500_T_250/ng_refs_2__3__6/ng_adju_-2__-2__0/getabsth_auto__auto__auto/absth_dlogN_uniform__dlogN_uniform__dlogN_uniform/klin_none/atmpro_saw/wgt_k_1/wgt_0.6_0.6__0.6_0.6_0.6__0.6_0.6_0.6_0.6_0.6_0.6/wgt_flux_1/w_diffuse_1.66_1.66__1.8_1.8_1.8__1.8_1.66_1.45_1.45_1.45_1.45/option_compute_ktable_0/option_compute_btable_0/crd_d5931a1'
PARAM = {'molecule': 'h2o', 'band': '2', 'vmin': 340, 'vmax': 540, 'ref_pts': [(1, 250), (10, 250), (500, 250)], 'ng_refs': [2, 3, 6], 'ng_adju': [-2, -2, 0], 'klin': 0, 'option_wgt_k': 1, 'wgt': [(0.6, 0.6), (0.6, 0.6, 0.6), (0.6, 0.6, 0.6, 0.6, 0.6, 0.6)], 'w_diffuse': [(1.66, 1.66), (1.8, 1.8, 1.8), (1.8, 1.66, 1.45, 1.45, 1.45, 1.45)], 'commitnumber': 'd5931a1', 'conc': None, 'dv': 0.001, 'nv': 1000, 'option_wgt_flux': 1, 'option_compute_ktable': 0, 'option_compute_btable': 0, 'atmpro': 'saw', 'tsfc': 257}
|
[
"llacque@gmail.com"
] |
llacque@gmail.com
|
5e494f738e2347aa42a5db0272e5ee6fb9c74abf
|
310fbab72b4b4f1c308a7ddefd34cb04e54b1fa0
|
/care/migrations/0003_patient_status.py
|
12be556c703104420aa886fc4548c291b78f1309
|
[
"MIT"
] |
permissive
|
GituMbugua/patient-care
|
767748c764b2fd9269f2328a17be18edbfa09007
|
79935ff27ebef5c160ac77e0c958c31c6dccb3f6
|
refs/heads/master
| 2021-08-30T00:04:17.584295
| 2017-12-15T10:59:11
| 2017-12-15T10:59:11
| 113,864,824
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 17:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('care', '0002_auto_20171213_1204'),
]
operations = [
migrations.AddField(
model_name='patient',
name='status',
field=models.CharField(blank=True, choices=[('green', 'SAFE'), ('yellow', 'MINIMAL INJURY'), ('red', 'SEVERE INJURY'), ('black', 'DECEASED')], max_length=30),
),
]
|
[
"gmbugua38@gmail.com"
] |
gmbugua38@gmail.com
|
257df738efd0e547ba5ba634c311402e70e970cf
|
683066a6da961f3539037d2763dca05bbb317280
|
/zip.py
|
3dd7b5163052f50e93a095da97cdf270cc46cdc1
|
[] |
no_license
|
zmwieand/PDF-Scrapper
|
df41c223c8c4514774ae1c7660fd231ff7765cd4
|
e430b2862943365f7299f4154ce807f7fca8b9fd
|
refs/heads/master
| 2016-09-05T19:33:01.360604
| 2015-07-21T04:06:10
| 2015-07-21T04:06:10
| 39,423,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import os
import zipfile
from shutil import copyfile
def download_clicked(selected_list):
# make a list of all file names to be downloaded
cp_files(selected_list)
zip_folder()
# clean out Download/ directory
rm_files()
def cp_files(selected_files):
for a in selected_files:
current = "samples/" + a
destination = "Download/" + a
copyfile(current, destination)
def rm_files():
path = 'Download/'
for a in os.listdir(path):
os.remove(path + a)
def zip_folder():
foo = zipfile.ZipFile('Resumes.zip', 'w')
for root, dirs, files in os.walk('Download/'):
for f in files:
foo.write(os.path.join(root, f))
foo.close()
my_list = ['ZachWieandResume.pdf']
download_clicked(my_list)
|
[
"zmwieand@gmail.com"
] |
zmwieand@gmail.com
|
636eabbfe9b1909c7aaa44fde421170464adefb2
|
7cf04200a04a92a682f50bbd3866ec1e0052278b
|
/ddosdb/ddosdb/apps.py
|
d476e18379ef2a359e7ff3b1dafdf87d0542971c
|
[] |
no_license
|
ddosgrid/ddosdb
|
ca2a75816eeb81e45f25a9ee2d1d2b81cd496271
|
8491535c245d56fcf770fc3d7689f2ae4bc14559
|
refs/heads/master
| 2023-02-21T07:42:48.986929
| 2021-01-21T14:09:22
| 2021-01-21T14:09:22
| 294,374,673
| 0
| 0
| null | 2020-09-10T10:14:37
| 2020-09-10T10:14:36
| null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from django.apps import AppConfig
class DdosdbConfig(AppConfig):
name = 'ddosdb'
|
[
"remco.poortinga@surfnet.nl"
] |
remco.poortinga@surfnet.nl
|
f5db9f6c7200aa09c359fa4156c99124cbaf9b9a
|
a7e09640c081cf858f30c3cc3fe2d6ffc986eb7c
|
/gui/system/migrations/0008_auto_20170906_2335.py
|
ead8a168e06533db3ffd002005ee3b25fcc68f3b
|
[] |
no_license
|
cbwest3/freenas
|
3fbeffe66c78a375843f138afd1ee306954a9c87
|
9947174014dd740145d540f03c1849a851f3b6e7
|
refs/heads/master
| 2021-04-30T13:59:53.975592
| 2018-02-12T05:25:55
| 2018-02-12T05:25:55
| 121,202,118
| 1
| 0
| null | 2018-02-12T05:01:39
| 2018-02-12T05:01:38
| null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-06 23:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0007_auto_201708211858'),
]
operations = [
migrations.AddField(
model_name='certificate',
name='cert_san',
field=models.TextField(blank=True, help_text='Multi-domain support. Enter additional space separated domains', null=True, verbose_name='Subject Alternate Names'),
),
migrations.AddField(
model_name='certificateauthority',
name='cert_san',
field=models.TextField(blank=True, help_text='Multi-domain support. Enter additional space separated domains', null=True, verbose_name='Subject Alternate Names'),
),
]
|
[
"wg@FreeBSD.org"
] |
wg@FreeBSD.org
|
9d2fa34a66d6dbc7159a496377e64a378cf8bf8a
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/P.O.R.-master/pirates/npc/BossAI.py
|
20a0702863d316eba876ab90f147f6b3362cec96
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
from direct.distributed import DistributedObjectAI
class BossAI(DistributedObjectAI.DistributedObjectAI):
def ___init___(self, air):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
def announceGenerate(self):
DistributedObjectAI.DistributedObjectAI.announceGenerate(self)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
def delete(self):
DistributedObjectAI.DistributedObjectAI.delete(self)
def disable(self):
DistributedObjectAI.DistributedObjectAI.disable(self)
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
f47be1e90574e504511a04096a3f2df63f0e990b
|
6f3899860af3546672092072e77401aa07eb78b7
|
/Week5 HW/Week5HW-synthetic3.py
|
dd50652fa984fd26929659f4ef588676d9811ec6
|
[] |
no_license
|
winnie800821/EE559_Pattern_Recognition
|
2f0ad4d270ddf0cb01f401a857e6580868d44f6e
|
fc080c7a810a2bf6594be756957ec4843b09524a
|
refs/heads/master
| 2021-03-28T05:32:54.268336
| 2020-03-17T06:43:49
| 2020-03-17T06:43:49
| 247,841,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,910
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.spatial.distance import cdist
np.set_printoptions(threshold=np.inf)
import plotDecBoundaries_revised as pDBR
feature_train = np.loadtxt(open("feature_train.csv"),delimiter=",")
label_train = np.loadtxt(open("label_train.csv"),delimiter=",")
training_data=np.zeros([label_train.shape[0],feature_train.shape[1]+1])
training_data[:,0:2]=feature_train[:,:]
training_data[:,2]=label_train
data_column=len(training_data[0])
data_row=len(training_data)
training=np.zeros([data_row,data_column])
for i in range(len(training)):
if training_data[i,2]==2:
training[i,0] = (-1)
training[i,1]=(-1)*training_data[i,0]
training[i,2] = (-1) * training_data[i, 1]
else:
training[i, 0] = 1
training[i, 1] = training_data[i, 0]
training[i, 2] = training_data[i, 1]
np.random.shuffle(training)
data_w=[0.1,0.1,0.1]
data_k=[0.1,0.1,0.1]
W_modifytimes=0
for epoch in range(1,10000):
errorpoint = 0
for i in range(data_row):
a=np.inner(data_k,training[i])
if a<0:
errorpoint = errorpoint + 1
data_k[0]=data_k[0]+training[i][0]
data_k[1] = data_k[1] + training[i][1]
data_k[2] = data_k[2] + training[i][2]
W_modifytimes = W_modifytimes + 1
data_w = np.insert(data_w, len(data_w), data_k, axis=0)
if errorpoint==0:
# print('epoch is',(epoch))
break
if epoch==1000:
# print('epoch is 1000', (epoch))
break
size=len(data_w)/3
data_w=np.reshape(data_w,(int(size),3))
#print(data_w)
#print(W_modifytimes)
final_w=data_w[len(data_w)-1]
print(final_w)
err=0
for i in range(data_row):
innervalue=np.inner(final_w,training[i])
if innervalue<0:
err=err+1
errorrate=err/len(training)
print("The error rate of of training data %.5f" % errorrate)
label_train=training_data[:,2]
pDBR.plotDecBoundaries_revised(training_data, label_train, final_w)
##############testing
feature_test = np.loadtxt(open("feature_test.csv"),delimiter=",")
label_test = np.loadtxt(open("label_test.csv"),delimiter=",")
testing_data=np.zeros([len(label_test),1+len(feature_test[0])])
testing_data[:,0:2]=feature_test[:,:]
testing_data[:,2]=label_test
testing=np.zeros([len(testing_data),len(testing_data[0])])
for i in range(len(testing_data)):
if testing_data[i,2]==2:
testing[i,0] = -1
testing[i,1]=(-1)*testing_data[i,0]
testing[i,2] = (-1) * testing_data[i,1]
else:
testing[i, 0] = 1
testing[i, 1] = testing_data[i, 0]
testing[i, 2] = testing_data[i, 1]
error_test=0
for i in range(len(testing_data)):
test_inner=np.inner(final_w,testing[i])
if test_inner<0:
error_test=error_test+1
errrate_test=error_test/len(testing)
print("The error rate of testing data is %.5f" % errrate_test)
|
[
"60027808+winnie800821@users.noreply.github.com"
] |
60027808+winnie800821@users.noreply.github.com
|
f07504b3b4c9717a85031b9b16b66da1b901b37c
|
309d452ec97f7003e2ef681bde102986c373ebaf
|
/djangoAdmin/asgi.py
|
59e54533ae58cfc71ac7cd20b79abb86954c498f
|
[] |
no_license
|
CleytonCLois/APS-2020
|
b2c99488a8e2ddef35e32163fdeddb64b6c00450
|
479330a416798e8ca6158f055f59ecbdd46a96a2
|
refs/heads/master
| 2023-07-11T10:07:38.138180
| 2020-11-18T00:00:36
| 2020-11-18T00:00:36
| 265,688,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for djangoAdmin project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoAdmin.settings')
application = get_asgi_application()
|
[
"cleytonclois@gmail.com"
] |
cleytonclois@gmail.com
|
c41d4a8545a558b242743537e1ac944437a90c6d
|
448a18bb54062a5e477f17adc3e2dbf7f5d19586
|
/preparation/get_density_map_gaussian.py
|
a122a91d4b95a63824a9e382f05cccc8cf12269a
|
[] |
no_license
|
guoswang/Crowd-Counting
|
ae65e3f567e96ea7d31f9099e5ab757abee5b153
|
1c1e1b3f712039a7411dd405ff01c984cc9d5673
|
refs/heads/master
| 2020-04-16T13:29:45.938637
| 2018-09-25T14:52:33
| 2018-09-25T14:52:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
import numpy as np
import math
def matlab_style_gauss2D(shape=(3, 3), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def get_density_map_gaussian(im, points):
im_density = np.zeros(im.shape)
[h, w] = im_density.shape
for j in range(0, len(points)):
f_sz = 15
sigma = 4.0
# H = matlab.fspecial('Gaussian', [f_sz, f_sz], sigma)
H = matlab_style_gauss2D([f_sz, f_sz], sigma)
x = min(w, max(1, abs(int(math.floor(points[j, 0])))))
y = min(h, max(1, abs(int(math.floor(points[j, 1])))))
if x > w or y > h:
continue
x1 = x - int(np.floor(f_sz / 2))
y1 = y - int(np.floor(f_sz / 2))
x2 = x + int(np.floor(f_sz / 2))
y2 = y + int(np.floor(f_sz / 2))
dfx1 = 0
dfy1 = 0
dfx2 = 0
dfy2 = 0
change_H = False
if x1 < 1:
dfx1 = abs(x1) + 1
x1 = 1
change_H = True
if y1 < 1:
dfy1 = abs(y1) + 1
y1 = 1
change_H = True
if x2 > w:
dfx2 = x2 - w
x2 = w
change_H = True
if y2 > h:
dfy2 = y2 - h
y2 = h
change_H = True
x1h = 1 + dfx1
y1h = 1 + dfy1
x2h = f_sz - dfx2
y2h = f_sz - dfy2
if change_H:
# H = matlab.fspecial('Gaussian', [double(y2h - y1h + 1), double(x2h - x1h + 1)], sigma)
H = matlab_style_gauss2D([float(y2h - y1h + 1), float(x2h - x1h + 1)], sigma)
im_density[y1-1: y2, x1-1: x2] = im_density[y1-1: y2, x1-1: x2] + H
return im_density
|
[
"609012488@qq.com"
] |
609012488@qq.com
|
cb860b9132b59035ea6c32bbac91d56cf9d58630
|
827e04177a3bec11aaa2a73eb124538e6712d7d8
|
/blog/migrations/0005_blogpost_views.py
|
3e51c827359ab40ec46b500b7538db4de86bd0f3
|
[] |
no_license
|
iamajaykharat/TechnoWorld-Dynamic-Blog
|
de938f11925346c2047e3436ecd57522096758ae
|
d1dad8f7c451e7516b76cea3623b74dac87a70ac
|
refs/heads/master
| 2023-08-15T03:13:12.745049
| 2020-07-18T11:15:53
| 2020-07-18T11:15:53
| 280,628,874
| 0
| 0
| null | 2021-09-22T19:27:27
| 2020-07-18T10:00:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
# Generated by Django 3.0.8 on 2020-07-18 09:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_blogcomment'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='views',
field=models.IntegerField(default=0, verbose_name='Post Views'),
),
]
|
[
"iamajaykharat@gmail.com"
] |
iamajaykharat@gmail.com
|
d5d7b8d6c78ac89d67efcee7fda2c0c241f8e41c
|
9b0b746bffa1bc4b54ee46397d4ca837c0a62733
|
/SocialMedia Project/backends/apps/userprofile/migrations/0002_userprofile_bio.py
|
8605f48c0553749091fcf2da8045831155f47930
|
[] |
no_license
|
indalyadav56/Django-React-Projects
|
548b1132f713b8466e6254a70c4c69ac8f6ad328
|
a01b7a770f77ddf804caf0ee78b50a6a38ace0ee
|
refs/heads/main
| 2023-06-23T06:26:28.399644
| 2021-07-09T23:23:15
| 2021-07-09T23:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Generated by Django 3.1 on 2021-05-23 11:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='bio',
field=models.TextField(blank=True, null=True),
),
]
|
[
"yadavindal269@gmail.com"
] |
yadavindal269@gmail.com
|
f4b6d2cb02e5d3c46fa681499e99a27767d36b5e
|
7e24d101e793afa60cb4252f3b9bf8726c104a9c
|
/petaldata/datasets/stripe/reports/abstract_stripe_report.py
|
8bbc43f639030b11e5cbc35f9ccbe8208d974f47
|
[
"MIT"
] |
permissive
|
petaldata/petaldata-python
|
42fbe496ee259a2ffd4f083f5d420f28be273e44
|
6d3b7bdc8f2d6f6ef86f7e61606ffebed168c81d
|
refs/heads/master
| 2020-05-16T14:34:28.724775
| 2019-06-06T22:47:49
| 2019-06-06T22:47:49
| 183,106,671
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,209
|
py
|
import pandas as pd
import datetime
from datetime import datetime
from datetime import date
import pygsheets
import petaldata
from petaldata.datasets.stripe.reports import query_filters
from petaldata.datasets.stripe.reports.adjusted_invoices import AdjustedInvoices
class AbstractStripeReport(object):
def __init__(self,invoices,tz='UTC',end_time=datetime.now().astimezone()):
"""
Parameters
----------
invoices : stripe.Invoice or stripe.reports.AdjustedInvoices
tz : str
The timezone all report data should be presented with.
end_time: tz-aware datetime
Returns
-------
AbstractStripeReport
"""
self.df = invoices.df
self.tz = tz
if invoices.__class__.__name__ == 'Invoices':
self.df = AdjustedInvoices(invoices,tz='UTC',end_time=datetime.now().astimezone()).df
self._gsheet_client = None
self.end_timestamp = self.setup_time(end_time,tz=tz)
@staticmethod
def strip_frame_tz(df):
print("Stripping timezone from datetime columns.")
df = df.copy()
for col in df.columns:
if 'datetime64' in str(df[col].dtype):
df[col] = df[col].apply(lambda x:datetime.replace(x,tzinfo=None))
return df
@staticmethod
def setup_time(dt,tz=None):
t=pd.Timestamp(dt)
t=t.tz_convert(tz)
return t
@staticmethod
def cents_to_dollars(df,cols=None):
print("Converting cents to dollars. Cols=",cols)
df[cols]=df[cols]/100
return df
def gsheet_client(self,creds):
if self._gsheet_client is None:
self._gsheet_client = pygsheets.authorize(custom_credentials=creds.with_scopes(['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive.metadata.readonly']))
return self._gsheet_client;
def find_or_create_wks(self,sh,worksheet_title):
try:
wks = sh.worksheet_by_title(worksheet_title)
print("\t...opening existing worksheet. title=",worksheet_title)
except pygsheets.exceptions.WorksheetNotFound:
print("\t...creating new worksheet. title=",worksheet_title)
wks = sh.add_worksheet(worksheet_title)
return wks
def to_gsheet(self,creds,spreadsheet_title=None,worksheet_title=None):
"""
Parameters
----------
creds : google.oauth2.service_account.Credentials
Google Authentication Credentials.
spreadsheet_title : str
The title of the Google spreadsheet to update. The spreadsheet must exist and the user associated with the creds must have read/write access to the sheet.
worksheet_title: str
The title of the worksheet to update (a worksheet is within a spreadsheet). The worksheet will be created if it doesn't exist.
Returns
-------
None
"""
frame = self.to_frame()
print("Opening Google Sheet...title=",spreadsheet_title)
# Must share sheet with "client_email" from JSON creds
sh = self.gsheet_client(creds).open(spreadsheet_title)
wks = self.find_or_create_wks(sh,worksheet_title)
print("\t...updating worksheet")
wks.clear()
wks.set_dataframe(self.strip_frame_tz(frame),(1,1), copy_index=True, nan="")
wks.cell('A1').value = frame.index.name
print("\t...Done.")
|
[
"derek.haynes@gmail.com"
] |
derek.haynes@gmail.com
|
dae4dad98f402fcd4f26493669a7c17624c0be8f
|
e7e4b4ee58aeb8d55ee4d301ff6debaaf6c1e3ff
|
/app/constansts.py
|
60726dba9c0dd5a19ed7d997680ee0426737782f
|
[] |
no_license
|
eduna75/news-reader
|
ef724a25536902d56259806f42a9b2d7948550b6
|
d18775f52a6f6db8be1527a04c29f85ab2a90e67
|
refs/heads/master
| 2023-05-27T03:01:07.780629
| 2020-02-19T14:33:18
| 2020-02-19T14:33:18
| 30,923,274
| 0
| 0
| null | 2019-10-05T12:21:36
| 2015-02-17T15:33:43
|
HTML
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
# User role
ADMIN = 0
STAFF = 1
USER = 2
ROLE = {
ADMIN: 'admin',
STAFF: 'staff',
USER: 'user',
}
# User status
INACTIVE = 0
NEW = 1
ACTIVE = 2
STATUS = {
INACTIVE: 'inactive',
NEW: 'new',
ACTIVE: 'active',
}
|
[
"justus@justus.justus"
] |
justus@justus.justus
|
2a63560da1481242bfff318fc858f7393a06df42
|
44080d747fd7454c11d575baaa4dcd17acc2b362
|
/gamelogic.py
|
9f18ae845471796a06a89c45078b8bb4589a2b0b
|
[] |
no_license
|
oltyan/mathmagical
|
545fd38e552becc50067558dcaf0bb652c253fdb
|
73cc3869323d4914044098bfa918f03613eb0b0d
|
refs/heads/master
| 2021-09-02T06:21:43.263209
| 2017-12-31T01:01:44
| 2017-12-31T01:01:44
| 109,418,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
import random
def get_continue(hp,enemy_count):
if hp > 0 and enemy_count > 0:
return True
else:
return False
def choose_difficulty():
print("Please Select a difficulty: \n"
"1. Easy (5 mosters, 0-10 numbers\n"
"2. Challenge (8 monsters, 0-20\n"
"3. Brutal (10 monsters, 0-100\n")
selection_made = False
while selection_made == False:
try:
difficulty = input("Selection: ")
if int(difficulty) not in [1, 2, 3]:
raise Exception
selection_made = True
except Exception as e:
print("Invalid Input")
return difficulty
def additions(difficulty):
# Change parameters based on difficulty settings
if int(difficulty) == 1:
print("Difficulty set to Easy")
max_int = 10
if int(difficulty) == 2:
print("Difficulty set to Challenging")
max_int = 20
if int(difficulty) == 3:
print("Difficulty set to Brutal")
max_int = 100
hp = 3
enemy_count = 5
repeat = True
while repeat == True:
a = random.randint(0, max_int)
b = random.randint(0, max_int)
problem_string = str(a) + " + " + str(b) + " = ???"
print(problem_string)
problem = a + b
answer = input("Answer: ")
if int(answer) == int(problem):
print("Correct")
enemy_count -= 1
else:
print("Incorrect")
hp -= 1
hp_remaining = "HP Remaining: " + str(hp)
enemy_remaining = "Enemies Left: " +str(enemy_count)
print(hp_remaining)
print(enemy_remaining)
repeat = get_continue(hp, enemy_count)
def main():
additions(choose_difficulty())
|
[
"oltyan@gmail.com"
] |
oltyan@gmail.com
|
06aaaeed472b7854be2301e974e4720c1124be28
|
ae0de2f4bc8e7436a4c6e789b6249fc51a9404d0
|
/Speaker_Verification/src/layers/batch_preprocess.py
|
16c0077eb3fcfa2ec82ff3d5c5d709a650256c1c
|
[
"MIT"
] |
permissive
|
TaeYoon2/KerasSpeakerEmbedding
|
69e1046ea401c7813fd032b5bd0fc9f2700d4a3b
|
a4a53ac0c209a283cab4969d61305f056a99b6c3
|
refs/heads/master
| 2020-11-25T20:27:05.072887
| 2019-12-18T12:28:54
| 2019-12-18T12:28:54
| 228,831,223
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
import tensorflow as tf
class Batch_preprocess(tf.keras.layers.Layer):
def __init__(self, sr, fft_hop, fft_window, nfft, nmel, mode, window, hop, **kwargs):
self.sr = sr
self.fft_hop = fft_hop / 1000
self.fft_window = fft_window / 1000
self.nfft = nfft
self.nmel = nmel
if mode == 'ms': # time in milisecond
self.window = window / 1000
self.hop = hop / 1000
self.frame_per_win = int((self.window - self.fft_window)/self.fft_hop + 1)
self.frame_per_hop = int((self.hop - self.fft_window)/self.fft_hop + 1)
elif mode == 'frame':
self.frame_per_win = self.window
self.frame_per_hop = self.hop
super(Batch_preprocess, self).__init__(**kwargs)
def _mel(self, sig):
### batch mel extraction
stfts = tf.signal.stft(sig, frame_length=int(self.fft_window * self.sr), frame_step=int(self.fft_hop * self.sr), fft_length=self.nfft)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1].value
lower_edge_hertz, upper_edge_hertz = 80.0, 7600.0
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(self.nmel, num_spectrogram_bins, self.sr, lower_edge_hertz, upper_edge_hertz)
mel_spectrograms = tf.tensordot(spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6) # [seq_length, num_mel]
return log_mel_spectrograms
def slice_bathces(self, list_mels):
'''slice each mel of wavs and put them together'''
# 자른 멜을 모을 리스트, 멜 사이즈 리스트 텐서
batch_mels = []
sizes_per_mel = None
for list_mel in list_mels:
lastidx = tf.shape(list_mel)[0] - self.frame_per_win + 1
batch_mel = tf.map_fn(lambda i: tf.slice(list_mel, begin=tf.pad([i], paddings=[[0,1]]),size=[self.frame_per_win, self.nmel],name="sliding_window"), tf.range(lastidx, delta=self.frame_per_hop), dtype=tf.float32)
size_per_mel = tf.shape(batch_mel)[:1]
if sizes_per_mel is None:
sizes_per_mel = size_per_mel
else:
sizes_per_mel = tf.concat([sizes_per_mel, size_per_mel],axis=0)
batch_mels.append(batch_mel)
# 자른 멜을 합친다.
long_batch = tf.concat(batch_mels,axis=0)
return (long_batch, sizes_per_mel)
def call(self, inputs):
sig = tf.squeeze(inputs[0],axis=0)
sig_lengths = tf.squeeze(inputs[1],axis=0)
mel_lengths = tf.map_fn(lambda x : (tf.math.floordiv(tf.math.subtract(x, int(self.fft_window*self.sr)), int(self.fft_hop*self.sr)))+1, sig_lengths)
ragged_wavs = tf.RaggedTensor.from_row_lengths(values=sig, row_lengths=sig_lengths)
sparse_wavs = ragged_wavs.to_tensor(default_value=0)
sparse_mels = self._mel(sparse_wavs)
ragged_mels = tf.RaggedTensor.from_tensor(sparse_mels, lengths=mel_lengths)
size_splits = ragged_mels.row_lengths()
list_mels = tf.split(ragged_mels.flat_values, mel_lengths, axis=0, num=size_splits[0], name="list_mels")
# 배치 멜 + 길이 정보
long_batch, sizes_per_mel = self.slice_bathces(list_mels)
return (long_batch, sizes_per_mel)
|
[
"lty92@naver.com"
] |
lty92@naver.com
|
1cb1a5a7d714a543f49ebabf54a2b5ea99009cc6
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/DFL260-MIB.py
|
125dad8d74ee966bee6ebfc13c5c740e0dec552b
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 96,348
|
py
|
#
# PySNMP MIB module DFL260-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DFL260-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:42:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
iso, Unsigned32, IpAddress, Integer32, TimeTicks, Counter64, enterprises, Counter32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Gauge32, MibIdentifier, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Unsigned32", "IpAddress", "Integer32", "TimeTicks", "Counter64", "enterprises", "Counter32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Gauge32", "MibIdentifier", "NotificationType", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
dfl260_MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 1, 2)).setLabel("dfl260-MIB")
dfl260_MIB.setRevisions(('2010-09-02 11:39', '2010-03-30 09:00', '2009-11-10 09:16', '2008-11-18 16:05', '2008-10-14 12:27', '2007-10-31 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: dfl260_MIB.setRevisionsDescriptions(('Added 64-bit counters', 'Added values for SMTP ALG objects.', 'Added values for opened and closed connections per second', 'Added value for timer usage', 'Added values for memory usage and TCP buffer usage', 'Initial version.',))
if mibBuilder.loadTexts: dfl260_MIB.setLastUpdated('201009021139Z')
if mibBuilder.loadTexts: dfl260_MIB.setOrganization('D-Link Corporation')
if mibBuilder.loadTexts: dfl260_MIB.setContactInfo('Postal: D-Link Corporation No. 289, Sinhu 3rd Road, Neihu District, Taipei City 114, Taiwan, R.O.C. Tel: +886-2-66000123 Fax: +886-2-55509988')
if mibBuilder.loadTexts: dfl260_MIB.setDescription('The MIB module for D-Link DFL-260 series product.')
dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 171))
netdefendMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20))
utmFirewall = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2))
dfl260 = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1))
dfl260OS = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1))
dfl260OSStats = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2))
dfl260reg = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2))
dfl260MibModules = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 1))
dfl260MibConfs = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 2))
dfl260MibObjectGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3))
dfl260System = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1))
dfl260SysCpuLoad = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysCpuLoad.setStatus('current')
if mibBuilder.loadTexts: dfl260SysCpuLoad.setDescription('The system cpu load.')
dfl260SysForwardedBits = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysForwardedBits.setStatus('current')
if mibBuilder.loadTexts: dfl260SysForwardedBits.setDescription('The number of bits forwarded through the gateway.')
dfl260SysForwardedPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysForwardedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260SysForwardedPackets.setDescription('Total number of forwarded packets.')
dfl260SysBuffUse = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysBuffUse.setStatus('current')
if mibBuilder.loadTexts: dfl260SysBuffUse.setDescription('The current number of buffers in use.')
dfl260SysConns = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysConns.setStatus('current')
if mibBuilder.loadTexts: dfl260SysConns.setDescription('The numer of connections.')
dfl260SysPerStateCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6))
dfl260SysPscTcpSyn = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscTcpSyn.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscTcpSyn.setDescription('Number of TCP connections in the SYN state.')
dfl260SysPscTcpOpen = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscTcpOpen.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscTcpOpen.setDescription('Number of TCP connections in the OPEN state.')
dfl260SysPscTcpFin = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscTcpFin.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscTcpFin.setDescription('Number of TCP connections in the FIN state.')
dfl260SysPscUdp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscUdp.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscUdp.setDescription('Number of UDP connections.')
dfl260SysPscIcmp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscIcmp.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscIcmp.setDescription('Number of ICMP connections.')
dfl260SysPscOther = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 6, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysPscOther.setStatus('current')
if mibBuilder.loadTexts: dfl260SysPscOther.setDescription('Number of other connections.')
dfl260IfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7), )
if mibBuilder.loadTexts: dfl260IfStatsTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsTable.setDescription('A table of DFL-260 specific interfaces statistics')
dfl260IfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfStatsIndex"))
if mibBuilder.loadTexts: dfl260IfStatsEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsEntry.setDescription('The row in a table of DFL-260 specific interface statistics')
dfl260IfStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfStatsIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsIndex.setDescription('Index of a row in dfl260SysIfStatsTable')
dfl260IfName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfName.setStatus('current')
if mibBuilder.loadTexts: dfl260IfName.setDescription('The name of the interface.')
dfl260IfFragsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfFragsIn.setStatus('current')
if mibBuilder.loadTexts: dfl260IfFragsIn.setDescription('Number of IP packet fragments received in the interface.')
dfl260IfFragReassOk = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfFragReassOk.setStatus('current')
if mibBuilder.loadTexts: dfl260IfFragReassOk.setDescription('Number of complete IP packets successfully reassembled from the fragments received in the interface.')
dfl260IfFragReassFail = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfFragReassFail.setStatus('current')
if mibBuilder.loadTexts: dfl260IfFragReassFail.setDescription('Number of packets that could not be reassembled, either due to resource starvation, illegal fragmentation, or just packet loss.')
dfl260IfPktsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfPktsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfPktsInCnt.setDescription('Number of packets received by the interface.')
dfl260IfPktsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfPktsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfPktsOutCnt.setDescription('Number of packets sent by the interface')
dfl260IfBitsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfBitsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfBitsInCnt.setDescription('Number of bits received by the interface')
dfl260IfBitsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfBitsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfBitsOutCnt.setDescription('Number of bits sent by the interface')
dfl260IfPktsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfPktsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfPktsTotCnt.setDescription('Totat number of packets transmited by the interface')
dfl260IfBitsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfBitsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfBitsTotCnt.setDescription('Totat number of bits transmited by the interface')
dfl260IfHCPktsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCPktsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCPktsInCnt.setDescription('Number of packets received by the interface. This object is a 64-bit version of dfl260IfPktsInCnt.')
dfl260IfHCPktsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCPktsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCPktsOutCnt.setDescription('Number of packets sent by the interface. This object is a 64-bit version of dfl260IfPktsOutCnt.')
dfl260IfHCBitsInCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCBitsInCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCBitsInCnt.setDescription('Number of bits received by the interface. This object is a 64-bit version of dfl260IfBitsInCnt.')
dfl260IfHCBitsOutCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCBitsOutCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCBitsOutCnt.setDescription('Number of bits sent by the interface. This object is a 64-bit version of dfl260IfBitsOutCnt.')
dfl260IfHCPktsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCPktsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCPktsTotCnt.setDescription('Totat number of packets transmited by the interface. This object is a 64-bit version of dfl260IfPktsTotCnt.')
dfl260IfHCBitsTotCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 7, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfHCBitsTotCnt.setStatus('current')
if mibBuilder.loadTexts: dfl260IfHCBitsTotCnt.setDescription('Totat number of bits transmited by the interface. This object is a 64-bit version of dfl260IfBitsTotCnt.')
dfl260IfRxRingTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8), )
if mibBuilder.loadTexts: dfl260IfRxRingTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingTable.setDescription('A table of DFL-260 specific interface Rx ring statistics')
dfl260IfRxRingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfRxRingIndex"))
if mibBuilder.loadTexts: dfl260IfRxRingEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingEntry.setDescription('The row in a table of DFL-260 specific interface Rx ring statistics.')
dfl260IfRxRingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfRxRingIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingIndex.setDescription('Index of a row in dfl260IfRxRingTable.')
dfl260IfRxRingFifoErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxRingFifoErrors.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingFifoErrors.setDescription('Rx Ring number of FIFO errors.')
dfl260IfRxDespools = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxDespools.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxDespools.setDescription('Number of despool events per second.')
dfl260IfRxAvgUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxAvgUse.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxAvgUse.setDescription('Rx Ring average usage.')
dfl260IfRxRingSaturation = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfRxRingSaturation.setStatus('current')
if mibBuilder.loadTexts: dfl260IfRxRingSaturation.setDescription('Rx Ring sturation. Percentage of ring use per despool event when the ring has been more than half full.')
dfl260RxRingFlooded = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 8, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RxRingFlooded.setStatus('current')
if mibBuilder.loadTexts: dfl260RxRingFlooded.setDescription('Rx Ring number of despool events for which the ring has been completely flooded')
dfl260IfTxRingTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9), )
if mibBuilder.loadTexts: dfl260IfTxRingTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingTable.setDescription('A table of DFL-260 specific interface Tx ring statistics')
dfl260IfTxRingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfTxRingIndex"))
if mibBuilder.loadTexts: dfl260IfTxRingEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingEntry.setDescription('The row in a table of DFL-260 specific interface Tx ring statistics.')
dfl260IfTxRingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfTxRingIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingIndex.setDescription('Index of a row in dfl260IfRxRingTable.')
dfl260IfTxDespools = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfTxDespools.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxDespools.setDescription('Tx Ring number of despool event per second (polls when there is at least one buffer in the ring)')
dfl260IfTxAvgUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfTxAvgUse.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxAvgUse.setDescription('Tx Ring number of despool events when the ring has been completely flooded')
dfl260IfTxRingSaturation = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfTxRingSaturation.setStatus('current')
if mibBuilder.loadTexts: dfl260IfTxRingSaturation.setDescription('Tx Ring percentage of use per despool event when the ring has been more than half full.')
dfl260RxTingFlooded = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 9, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RxTingFlooded.setStatus('current')
if mibBuilder.loadTexts: dfl260RxTingFlooded.setDescription('Tx Ring number of despool events for in which the ring has been completely flooded')
dfl260IfVlanStatsTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10), )
if mibBuilder.loadTexts: dfl260IfVlanStatsTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanStatsTable.setDescription('A table of DFL-260 VLAN statistics')
dfl260IfVlanStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IfVlanIndex"))
if mibBuilder.loadTexts: dfl260IfVlanStatsEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanStatsEntry.setDescription('The row in a table of dfl260IfVlanStatsTable. Each has an index equal to the ifIndex of the corresponding physical interface')
dfl260IfVlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IfVlanIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanIndex.setDescription('Extended index of a row in dfl260IfVlanStatsTable.')
dfl260IfVlanUntaggedInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInPkts.setDescription('Number of untaged packets untagged packets received by the interface.')
dfl260IfVlanUntaggedOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutPkts.setDescription('Number of untagged packets sent by the interface.')
dfl260IfVlanUntaggedTotPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotPkts.setDescription('Total number of untagged packets processed by the interface.')
dfl260IfVlanUntaggedInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInOctets.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedInOctets.setDescription('Total number of octects in untagged packets received by the interface.')
dfl260IfVlanUntaggedOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutOctets.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedOutOctets.setDescription('Total number of octects in untagged packets sent by the interface.')
dfl260IfVlanUntaggedTotOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 10, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotOctets.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanUntaggedTotOctets.setDescription('Total number of octects in untagged packets processed by the interface.')
dfl260HWSensorTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11), )
if mibBuilder.loadTexts: dfl260HWSensorTable.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorTable.setDescription('Table of hardware sensors.')
dfl260HWSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260HWSensorIndex"))
if mibBuilder.loadTexts: dfl260HWSensorEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorEntry.setDescription('Entry of table of hardware sensors.')
dfl260HWSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260HWSensorIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorIndex.setDescription('Index of the entries of the sensor table.')
dfl260HWSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HWSensorName.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorName.setDescription('The description of the sensor.')
dfl260HWSensorValue = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HWSensorValue.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorValue.setDescription('The value of the sensor.')
dfl260HWSensorUnit = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 11, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HWSensorUnit.setStatus('current')
if mibBuilder.loadTexts: dfl260HWSensorUnit.setDescription('The description of the unit of the value mesured by sensor.')
dfl260SysMemUsage = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysMemUsage.setStatus('current')
if mibBuilder.loadTexts: dfl260SysMemUsage.setDescription('The current memory usage.')
dfl260SysTCPUsage = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13))
dfl260SysTCPRecvSmall = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPRecvSmall.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPRecvSmall.setDescription('Small TCP receive windows usage.')
dfl260SysTCPRecvLarge = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPRecvLarge.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPRecvLarge.setDescription('Large TCP receive windows usage.')
dfl260SysTCPSendSmall = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPSendSmall.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPSendSmall.setDescription('Small TCP send windows usage.')
dfl260SysTCPSendLarge = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 13, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTCPSendLarge.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPSendLarge.setDescription('Large TCP send windows usage.')
dfl260SysTimerUsage = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysTimerUsage.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTimerUsage.setDescription('The current number of timers in use.')
dfl260SysConnOPS = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysConnOPS.setStatus('current')
if mibBuilder.loadTexts: dfl260SysConnOPS.setDescription('The number of connections opened per second.')
dfl260SysConnCPS = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 16), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysConnCPS.setStatus('current')
if mibBuilder.loadTexts: dfl260SysConnCPS.setDescription('The number of connections closed per second.')
dfl260SysHCForwardedBits = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SysHCForwardedBits.setStatus('current')
if mibBuilder.loadTexts: dfl260SysHCForwardedBits.setDescription('The number of bits forwarded through the gateway.')
dfl260VPN = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2))
dfl260IPsec = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1))
dfl260IPsecGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1))
dfl260IPsecPhaseOneActive = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneActive.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneActive.setDescription('Number of Phase-1 active negotiations')
dfl260IPsecPhaseOneAggrModeDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneAggrModeDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneAggrModeDone.setDescription('Number of Phase-1 aggressive mode negotiations.')
dfl260IPsecQuickModeActive = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecQuickModeActive.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecQuickModeActive.setDescription('Number of quick mode active negotiations.')
dfl260IPsecPhaseOneDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneDone.setDescription('Number of Phase-1 negotiations done.')
dfl260IPsecPhaseOneFailed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneFailed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneFailed.setDescription('Number of Phase-1 negotiations failed.')
dfl260IPsecPhaseOneRekeyed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecPhaseOneRekeyed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecPhaseOneRekeyed.setDescription('Number of Phase-1 negotiations rekeyed.')
dfl260IPsecQuickModeDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecQuickModeDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecQuickModeDone.setDescription('Number of quick mode negotiations done.')
dfl260IPsecQuickModeFailed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecQuickModeFailed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecQuickModeFailed.setDescription('Number of quick mode negotiations failed.')
dfl260IPsecInfoDone = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInfoDone.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInfoDone.setDescription('Number of informational exchanges done. (Not available in IKEv1 implementations)')
dfl260IPsecInfoFailed = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInfoFailed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInfoFailed.setDescription('Number of informational exchanges failed. (Not available in IKEv1 implementations)')
dfl260IPsecInOctetsComp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInOctetsComp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInOctetsComp.setDescription('Total octets in before decompression.')
dfl260IPsecInOctetsUncomp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInOctetsUncomp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInOctetsUncomp.setDescription('Total octets in after decompression.')
dfl260IPsecOutOctetsComp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutOctetsComp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutOctetsComp.setDescription('Total octets out after compression.')
dfl260IPsecOutOctetsUncomp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutOctetsUncomp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutOctetsUncomp.setDescription('Total octets out before compression.')
dfl260IPsecForwardedOctetsComp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsComp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsComp.setDescription('Total octets forwarded after compression.')
dfl260IPsecForwardedOctetsUcomp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsUcomp.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecForwardedOctetsUcomp.setDescription('Total octets forwarded before compression.')
dfl260IPsecInPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecInPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecInPackets.setDescription('Total packets in.')
dfl260IPsecOutPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutPackets.setDescription('Total packets Out.')
dfl260IPsecForwardedPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecForwardedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecForwardedPackets.setDescription('Total packets forwarded.')
dfl260IPsecActiveTransforms = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 20), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecActiveTransforms.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecActiveTransforms.setDescription('Number of currently active transforms.')
dfl260IPsecTotalTransforms = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecTotalTransforms.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecTotalTransforms.setDescription('Total number of transform records created.')
dfl260IPsecOutOfTransforms = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecOutOfTransforms.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecOutOfTransforms.setDescription('Number of packets dropped due to no available transform object. (Not available in IKEv1 implementations)')
dfl260IPsecTotalRekeys = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 2, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPsecTotalRekeys.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecTotalRekeys.setDescription('Total number of rekeys performed.')
dfl260Rules = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3))
dfl260RuleUseTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2), )
if mibBuilder.loadTexts: dfl260RuleUseTable.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUseTable.setDescription('A list of general rules usage statistics.')
dfl260RuleUseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260RuleIndex"))
if mibBuilder.loadTexts: dfl260RuleUseEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUseEntry.setDescription('The statistics over a rule usage.')
dfl260RuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260RuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleIndex.setDescription('The rule usage index.')
dfl260RuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleName.setDescription('The name of the rule.')
dfl260RuleUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 3, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260RuleUse.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUse.setDescription('The number of times o rule was used.')
dfl260IPPools = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4))
dfl260IPPoolsNumber = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolsNumber.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolsNumber.setDescription('The number of ip pools')
dfl260IPPoolTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2), )
if mibBuilder.loadTexts: dfl260IPPoolTable.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolTable.setDescription('A list of IP pools')
dfl260IPPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260IPPoolIndex"))
if mibBuilder.loadTexts: dfl260IPPoolEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolEntry.setDescription('The attributes of an ip pool')
dfl260IPPoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260IPPoolIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolIndex.setDescription('The ip pool index')
dfl260IPPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolName.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolName.setDescription('The ip pool name')
dfl260IPPoolPrepare = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolPrepare.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolPrepare.setDescription('Number of IP pool objects in prepare mode.')
dfl260IPPoolFree = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolFree.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolFree.setDescription('Number of available IPs in the pool.')
dfl260IPPoolMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolMisses.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolMisses.setDescription('Mumber of missed IP pool negotiations for other reasons than lack of available IP numbers.')
dfl260IPPoolClientFails = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolClientFails.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolClientFails.setDescription('Number of failed IP pool transactions.')
dfl260IPPoolUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 4, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260IPPoolUsed.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolUsed.setDescription('Number of IP numbers in use from the pool.')
dfl260DHCPServer = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5))
dfl260DHCPTotalRejected = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPTotalRejected.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPTotalRejected.setDescription('Total number of rejected packets (all rules).')
dfl260DHCPRuleTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2), )
if mibBuilder.loadTexts: dfl260DHCPRuleTable.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleTable.setDescription('A list of all DHCP server rules usage statistics.')
dfl260DHCPRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260DHCPRuleIndex"))
if mibBuilder.loadTexts: dfl260DHCPRuleEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleEntry.setDescription('The attributes of a DHCP server rule statistics.')
dfl260DHCPRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260DHCPRuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleIndex.setDescription('The DHCP server rule index')
dfl260DHCPRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleName.setDescription('The DHCP server rule name.')
dfl260DHCPRuleUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRuleUsage.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleUsage.setDescription('Number of used IPs in the pool.')
dfl260DHCPRuleUsagePercent = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRuleUsagePercent.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRuleUsagePercent.setDescription('The percentage of the used IPs in relation to the IP pool size.')
dfl260DHCPActiveClients = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPActiveClients.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPActiveClients.setDescription('Number of currently active clients.')
dfl260DHCPActiveClientsPercent = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPActiveClientsPercent.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPActiveClientsPercent.setDescription('The number of currently active clients as a percentage of the pool size.')
dfl260DHCPRejectedRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRejectedRequests.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRejectedRequests.setDescription('Number of rejected requests matching the current rule.')
dfl260DHCPTotalLeases = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 5, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPTotalLeases.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPTotalLeases.setDescription('Total number of leases in the pool.')
dfl260UserAuth = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6))
dfl260UserAuthHTTPUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthHTTPUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthHTTPUsers.setDescription('Number of currently logged in HTTP users.')
dfl260UserAuthXAUTHUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthXAUTHUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthXAUTHUsers.setDescription('Number of currently logged in XAUTH users.')
dfl260UserAuthHTTPSUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthHTTPSUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthHTTPSUsers.setDescription('Number of currently logged in HTTPS users.')
dfl260UserAuthPPPUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthPPPUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthPPPUsers.setDescription('Number of currently logged in PPP users.')
dfl260UserAuthEAPUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthEAPUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthEAPUsers.setDescription('Number of currently logged in EAP users.')
dfl260UserAuthRuleUseTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6), )
if mibBuilder.loadTexts: dfl260UserAuthRuleUseTable.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleUseTable.setDescription('Table of user authentication rule usage.')
dfl260UserAuthRuleUseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260UserAuthRuleIndex"))
if mibBuilder.loadTexts: dfl260UserAuthRuleUseEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleUseEntry.setDescription('The attributes of an authentication rule usage statistics.')
dfl260UserAuthRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260UserAuthRuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleIndex.setDescription('The rule usage index.')
dfl260UserAuthRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthRuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleName.setDescription('The name of the rule.')
dfl260UserAuthRuleUse = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 6, 6, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260UserAuthRuleUse.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthRuleUse.setDescription('The number of times o rule was used.')
dfl260LinkMonitor = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7))
dfl260LinkMonGrp = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonGrp.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrp.setDescription('The number of groups of monitored links')
dfl260LinkMonGrpTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2), )
if mibBuilder.loadTexts: dfl260LinkMonGrpTable.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpTable.setDescription('Table of link monitor groups.')
dfl260LinkMonGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260LinkMonGrpIndex"))
if mibBuilder.loadTexts: dfl260LinkMonGrpEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpEntry.setDescription('The attributes of a link monitor group')
dfl260LinkMonGrpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260LinkMonGrpIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpIndex.setDescription('The index row in the table of link monitor groups.')
dfl260LinkMonGrpName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonGrpName.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpName.setDescription('The link monitor group name.')
dfl260LinkMonGrpHostsUp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonGrpHostsUp.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonGrpHostsUp.setDescription('The percentage of monitored hosts available.')
dfl260LinkMonHostTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3), )
if mibBuilder.loadTexts: dfl260LinkMonHostTable.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostTable.setDescription('Table of link monitored hosts in a link monnitor group.')
dfl260LinkMonHostEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260LinkMonGrpIndex"), (0, "DFL260-MIB", "dfl260LinkMonHostIndex"))
if mibBuilder.loadTexts: dfl260LinkMonHostEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostEntry.setDescription('The attributes of a monitored host.')
dfl260LinkMonHostIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260LinkMonHostIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostIndex.setDescription('The index an host in the table of link monitor hosts.')
dfl260LinkMonHostId = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonHostId.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostId.setDescription('The monitored host identifier.')
dfl260LinkMonHostShortTermLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonHostShortTermLoss.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostShortTermLoss.setDescription('The percentage of short term losst packets.')
dfl260LinkMonHostPacketsLost = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 7, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260LinkMonHostPacketsLost.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonHostPacketsLost.setDescription('Total number of lost monitoring packets.')
dfl260Pipes = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8))
dfl260PipeUsers = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeUsers.setDescription('The current number of users, as defined by the grouping settings of each pipe, being tracked in the pipes system. Note that this value corresponds to the number of users active in each time slice of 1/20th of a second, and not to the number of users having open connections.')
dfl260PipeTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2), )
if mibBuilder.loadTexts: dfl260PipeTable.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeTable.setDescription('Table of pipes')
dfl260PipeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260PipeIndex"))
if mibBuilder.loadTexts: dfl260PipeEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeEntry.setDescription('A entry of the pipes table')
dfl260PipeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260PipeIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeIndex.setDescription('The pipe index')
dfl260PipeName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeName.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeName.setDescription('The name of the pipe')
dfl260PipeMinPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeMinPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeMinPrec.setDescription('The minimum of the range of pipe precedences.')
dfl260PipeMaxPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeMaxPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeMaxPrec.setDescription('The maximum of the range of pipe precedences.')
dfl260PipeDefPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeDefPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeDefPrec.setDescription('The precedence assigned to a packet for which has not one allready done by a Pipe Rule.')
dfl260PipeNumPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeNumPrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeNumPrec.setDescription('The number of pipe precedences')
dfl260PipeNumUsers = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeNumUsers.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeNumUsers.setDescription('The current number of users, as defined by the grouping settings of each pipe, being tracked in the pipes system. This value corresponds to the number of users active in each time slice and not to the number of users having open connections.')
dfl260PipeCurrentBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeCurrentBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeCurrentBps.setDescription('The current throughput of a pipe, in bits per second, as a sum of the corresponding values for all precedences.')
dfl260PipeCurrentPps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeCurrentPps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeCurrentPps.setDescription('The current throughput of a pipe, in packets per second, as a sum of the corresponding values for all precedences.')
dfl260PipeDelayedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeDelayedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeDelayedPackets.setDescription('The number of times packets have been delayed as a result of a pipe, or pipe user having used up its allotted bandwidth. Note that one single packet may be delayed several times; if a pipe is really full, this count may exceed the number of packets actually passing through the pipe.')
dfl260PipeDropedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipeDropedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipeDropedPackets.setDescription('The number of packets dropped by a pipe. Packets are dropped when CorePlus is running out of packet buffers. This occurs when excessive amounts of packets need to be queued for later delivery. The packet dropped is always the one that has been queued the longest time globally, which means that the connection suffering from packet loss will be the one most overloading the system.')
dfl260PipePrecTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3), )
if mibBuilder.loadTexts: dfl260PipePrecTable.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecTable.setDescription('There is a one to many relation between a pipe and its precedences. The number of precedences is a instance attribute of each pipe. This table extends the pipes table in order to express the relation between a pipe and the respective precedences.')
dfl260PipePrecEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260PipeIndex"), (0, "DFL260-MIB", "dfl260PipePrecIndex"))
if mibBuilder.loadTexts: dfl260PipePrecEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecEntry.setDescription('An entry of the table of pipe pecedences. These table entries are chracterized by been indexed by two values. The first index the same as the pipe index of the corresponding row in table of pipes and the second index is the index of the set of precedences of the corresponding pipe.')
dfl260PipePrecIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260PipePrecIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecIndex.setDescription('Index of a precedence of a specific pipe. This is the second index of the entries of pipe precedence table described by object dfl260PipePrecEntry.')
dfl260PipePrec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrec.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrec.setDescription('The precedence value')
dfl260PipePrecBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecBps.setDescription('The current throughput of the pipe, in bits per second, with the corresponding precedence.')
dfl260PipePrecTotalPps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecTotalPps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecTotalPps.setDescription('The current throughput of the pipe precedence, in packets per second.')
dfl260PipePrecReservedBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecReservedBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecReservedBps.setDescription('The current bandwidth allocated to the precedence.')
dfl260PipePrecDynLimBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDynLimBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDynLimBps.setDescription('The current bandwidth limit limit applied the precedence.')
dfl260PipePrecDynUsrLimBps = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDynUsrLimBps.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDynUsrLimBps.setDescription('The current precedence bandwidth limit per user of the pipe.')
dfl260PipePrecDelayedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDelayedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDelayedPackets.setDescription('The number of times packets have been delayed as a result of a precedence, or pipe user having used up its allotted bandwidth. Note that one single packet may be delayed several times; if a pipe is really full, this count may exceed the number of packets of this precedence actually passing through the pipe.')
dfl260PipePrecDropedPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 8, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260PipePrecDropedPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260PipePrecDropedPackets.setDescription('The number of pipe dropped packets with the corresponding precedence.')
dfl260ALG = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9))
dfl260AlgSessions = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260AlgSessions.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgSessions.setDescription('Total ALG sessions')
dfl260AlgConnections = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260AlgConnections.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgConnections.setDescription('Total ALG connections')
dfl260AlgTCPStreams = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260AlgTCPStreams.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgTCPStreams.setDescription('Total ALG TCP streams')
dfl260HttpAlg = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4))
dfl260HttpAlgTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1), )
if mibBuilder.loadTexts: dfl260HttpAlgTable.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTable.setDescription('Table of HTTP ALG objects.')
dfl260HttpAlgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260HttpAlgIndex"))
if mibBuilder.loadTexts: dfl260HttpAlgEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgEntry.setDescription('A row of the table of HTTP ALG objects.')
dfl260HttpAlgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260HttpAlgIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgIndex.setDescription('The index of an entry of the Table of HTTP ALG objects.')
dfl260HttpAlgName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgName.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgName.setDescription('The name of an HTTP ALG object.')
dfl260HttpAlgTotalRequested = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgTotalRequested.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTotalRequested.setDescription('Total number of URL requests.')
dfl260HttpAlgTotalAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgTotalAllowed.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTotalAllowed.setDescription('Total number of allowed URL requests.')
dfl260HttpAlgTotalBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgTotalBlocked.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgTotalBlocked.setDescription('Total number of blocked URL requests.')
dfl260HttpAlgCntFltTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2), )
if mibBuilder.loadTexts: dfl260HttpAlgCntFltTable.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltTable.setDescription('Table of HTTP ALG content filtering rules.')
dfl260HttpAlgCntFltEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260HttpAlgIndex"), (0, "DFL260-MIB", "dfl260HttpAlgCntFltIndex"))
if mibBuilder.loadTexts: dfl260HttpAlgCntFltEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltEntry.setDescription('Entry of the table of HTTP ALG content filtering rules.')
dfl260HttpAlgCntFltIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260HttpAlgCntFltIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltIndex.setDescription('The index of an entry of the Table of HTTP ALG content filtering objects.')
dfl260HttpAlgCntFltName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltName.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltName.setDescription('The name of the a HTTP ALG content fitering.')
dfl260HttpAlgCntFltRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltRequests.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltRequests.setDescription('Total number of URLs intercepted by a content filtering object.')
dfl260HttpAlgCntFltAllowed = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltAllowed.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltAllowed.setDescription('Total number of URLs intercepted and allowed by a content filtering object.')
dfl260HttpAlgCntFltBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 4, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HttpAlgCntFltBlocked.setStatus('current')
if mibBuilder.loadTexts: dfl260HttpAlgCntFltBlocked.setDescription('Total number of URLs intercepted and blocked by a content filtering object.')
dfl260SmtpAlg = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5))
dfl260SmtpAlgTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1), )
if mibBuilder.loadTexts: dfl260SmtpAlgTable.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTable.setDescription('Table of SMTP ALG objects.')
dfl260SmtpAlgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260SmtpAlgIndex"))
if mibBuilder.loadTexts: dfl260SmtpAlgEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgEntry.setDescription('A row of the table of SMTP ALG objects.')
dfl260SmtpAlgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260SmtpAlgIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgIndex.setDescription('The index of an entry of the Table of SMTP ALG objects.')
dfl260SmtpAlgName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgName.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgName.setDescription('The name of an SMTP ALG object.')
dfl260SmtpAlgTotCheckedSes = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgTotCheckedSes.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTotCheckedSes.setDescription('Total sessions checked by the SMTP ALG of corresponding index.')
dfl260SmtpAlgTotSpamSes = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgTotSpamSes.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTotSpamSes.setDescription('Total spam sessions detected by the SMTP ALG of corresponding index.')
dfl260SmtpAlgTotDroppedSes = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgTotDroppedSes.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgTotDroppedSes.setDescription('Total deroped sessions for the SMTP ALG of corresponding index.')
dfl260SmtpAlgDnsBlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2), )
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlTable.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlTable.setDescription('Table of SMTP ALG DNS balck list objects.')
dfl260SmtpAlgDnsBlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260SmtpAlgIndex"), (0, "DFL260-MIB", "dfl260SmtpAlgDnsBlIndex"))
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlEntry.setDescription('A row of the table of SMTP ALG DNS black list objects.')
dfl260SmtpAlgDnsBlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlIndex.setDescription('The index of an entry of the SMTP ALG DNS black list objects.')
dfl260SmtpAlgDnsBlName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlName.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlName.setDescription('The SMTP DNS black list name.')
dfl260SmtpAlgDnsBlChecked = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlChecked.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlChecked.setDescription('Total sessions checked againt SMTP ALG DNS black list of corresponding index')
dfl260SmtpAlgDnsBlMatched = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlMatched.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlMatched.setDescription('Total sessions that matched SMTP ALG DNS black list of corresponding index')
dfl260SmtpAlgDnsBlFailChecks = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 9, 5, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlFailChecks.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgDnsBlFailChecks.setDescription('Total failed checks for the SMTP ALG DNS black list of corresponding index')
dfl260DHCPRelay = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11))
dfl260DHCPRelayCurClients = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayCurClients.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayCurClients.setDescription('Total DHCP relay active relayed clients.')
dfl260DHCPRelayCurTrans = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayCurTrans.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayCurTrans.setDescription('Ongoing DHCP relay transactions.')
dfl260DHCPRelayRejected = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRejected.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRejected.setDescription('Total DHCP relay packets rejected.')
dfl260DHCPRelayRuleTable = MibTable((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4), )
if mibBuilder.loadTexts: dfl260DHCPRelayRuleTable.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleTable.setDescription('Table of DHCP relay rules.')
dfl260DHCPRelayRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1), ).setIndexNames((0, "DFL260-MIB", "dfl260DHCPRelayRuleIndex"))
if mibBuilder.loadTexts: dfl260DHCPRelayRuleEntry.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleEntry.setDescription('Entry of the table of DHCP relay rules')
dfl260DHCPRelayRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dfl260DHCPRelayRuleIndex.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleIndex.setDescription('Index of the table of DHCP relay rules.')
dfl260DHCPRelayRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleName.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleName.setDescription('Display name of a DHCP relay rule')
dfl260DHCPRelayRuleHits = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleHits.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleHits.setDescription('Number of the times the DHCP relay rule with corresponding index was used.')
dfl260DHCPRelayRuleCurClients = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleCurClients.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleCurClients.setDescription('Number of ctive relayed clients by the DHCP relay rule with corresponding index.')
dfl260DHCPRelayRuleRejCliPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejCliPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejCliPkts.setDescription('Number of client packets rejected by a rule.')
dfl260DHCPRelayRuleRejSrvPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 11, 4, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejSrvPkts.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayRuleRejSrvPkts.setDescription('Number of DHCP server packets rejected by the DHCP relay rule with the corresponding index.')
dfl260HA = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12))
dfl260HASyncSendQueueLength = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendQueueLength.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendQueueLength.setDescription('Size of the queue used for the High Availability sync interface.')
dfl260HASyncSendQueueUsagePkt = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsagePkt.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsagePkt.setDescription('High Availability Sync interface queue usage in number of packets.')
dfl260HASyncSendQueueUsageOct = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsageOct.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendQueueUsageOct.setDescription('High Availability Sync interface queue usage in number of octects.')
dfl260HASyncSentPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSentPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSentPackets.setDescription('Number High Availability packets sent on Sync.')
dfl260HASyncSendResentPackets = MibScalar((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 1, 2, 12, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dfl260HASyncSendResentPackets.setStatus('current')
if mibBuilder.loadTexts: dfl260HASyncSendResentPackets.setDescription('Number of High Availability packets resent on Sync.')
dfl260StatsConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 2, 1))
dfl260StatsRegGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1))
dfl260SystemObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 1)).setObjects(("DFL260-MIB", "dfl260SysCpuLoad"), ("DFL260-MIB", "dfl260SysForwardedBits"), ("DFL260-MIB", "dfl260SysForwardedPackets"), ("DFL260-MIB", "dfl260SysBuffUse"), ("DFL260-MIB", "dfl260SysConns"), ("DFL260-MIB", "dfl260HWSensorName"), ("DFL260-MIB", "dfl260HWSensorValue"), ("DFL260-MIB", "dfl260HWSensorUnit"), ("DFL260-MIB", "dfl260SysMemUsage"), ("DFL260-MIB", "dfl260SysTimerUsage"), ("DFL260-MIB", "dfl260SysConnOPS"), ("DFL260-MIB", "dfl260SysConnCPS"), ("DFL260-MIB", "dfl260SysHCForwardedBits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260SystemObjectGroup = dfl260SystemObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260SystemObjectGroup.setDescription('System statistics Group')
dfl260IPsecObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 2)).setObjects(("DFL260-MIB", "dfl260IPsecPhaseOneActive"), ("DFL260-MIB", "dfl260IPsecPhaseOneAggrModeDone"), ("DFL260-MIB", "dfl260IPsecQuickModeActive"), ("DFL260-MIB", "dfl260IPsecPhaseOneDone"), ("DFL260-MIB", "dfl260IPsecPhaseOneFailed"), ("DFL260-MIB", "dfl260IPsecPhaseOneRekeyed"), ("DFL260-MIB", "dfl260IPsecQuickModeDone"), ("DFL260-MIB", "dfl260IPsecQuickModeFailed"), ("DFL260-MIB", "dfl260IPsecInfoDone"), ("DFL260-MIB", "dfl260IPsecInfoFailed"), ("DFL260-MIB", "dfl260IPsecInOctetsComp"), ("DFL260-MIB", "dfl260IPsecInOctetsUncomp"), ("DFL260-MIB", "dfl260IPsecOutOctetsComp"), ("DFL260-MIB", "dfl260IPsecOutOctetsUncomp"), ("DFL260-MIB", "dfl260IPsecForwardedOctetsComp"), ("DFL260-MIB", "dfl260IPsecForwardedOctetsUcomp"), ("DFL260-MIB", "dfl260IPsecInPackets"), ("DFL260-MIB", "dfl260IPsecOutPackets"), ("DFL260-MIB", "dfl260IPsecForwardedPackets"), ("DFL260-MIB", "dfl260IPsecActiveTransforms"), ("DFL260-MIB", "dfl260IPsecTotalTransforms"), ("DFL260-MIB", "dfl260IPsecOutOfTransforms"), ("DFL260-MIB", "dfl260IPsecTotalRekeys"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IPsecObjectGroup = dfl260IPsecObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IPsecObjectGroup.setDescription('IPsec Group')
dfl260StateCountersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 3)).setObjects(("DFL260-MIB", "dfl260SysPscTcpSyn"), ("DFL260-MIB", "dfl260SysPscTcpOpen"), ("DFL260-MIB", "dfl260SysPscTcpFin"), ("DFL260-MIB", "dfl260SysPscUdp"), ("DFL260-MIB", "dfl260SysPscIcmp"), ("DFL260-MIB", "dfl260SysPscOther"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260StateCountersGroup = dfl260StateCountersGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260StateCountersGroup.setDescription('Per state counters')
dfl260IPPoolGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 4)).setObjects(("DFL260-MIB", "dfl260IPPoolsNumber"), ("DFL260-MIB", "dfl260IPPoolName"), ("DFL260-MIB", "dfl260IPPoolPrepare"), ("DFL260-MIB", "dfl260IPPoolFree"), ("DFL260-MIB", "dfl260IPPoolMisses"), ("DFL260-MIB", "dfl260IPPoolClientFails"), ("DFL260-MIB", "dfl260IPPoolUsed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IPPoolGroup = dfl260IPPoolGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IPPoolGroup.setDescription('IP pool entry objects group')
dfl260DHCPServerGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 5)).setObjects(("DFL260-MIB", "dfl260DHCPTotalRejected"), ("DFL260-MIB", "dfl260DHCPRuleName"), ("DFL260-MIB", "dfl260DHCPRuleUsage"), ("DFL260-MIB", "dfl260DHCPRuleUsagePercent"), ("DFL260-MIB", "dfl260DHCPActiveClients"), ("DFL260-MIB", "dfl260DHCPActiveClientsPercent"), ("DFL260-MIB", "dfl260DHCPRejectedRequests"), ("DFL260-MIB", "dfl260DHCPTotalLeases"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260DHCPServerGroup = dfl260DHCPServerGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPServerGroup.setDescription('DHCP server rules objects.')
dfl260RuleUseGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 6)).setObjects(("DFL260-MIB", "dfl260RuleName"), ("DFL260-MIB", "dfl260RuleUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260RuleUseGroup = dfl260RuleUseGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260RuleUseGroup.setDescription('Rule use objects.')
dfl260UserAuthGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 7)).setObjects(("DFL260-MIB", "dfl260UserAuthHTTPUsers"), ("DFL260-MIB", "dfl260UserAuthXAUTHUsers"), ("DFL260-MIB", "dfl260UserAuthHTTPSUsers"), ("DFL260-MIB", "dfl260UserAuthPPPUsers"), ("DFL260-MIB", "dfl260UserAuthEAPUsers"), ("DFL260-MIB", "dfl260UserAuthRuleName"), ("DFL260-MIB", "dfl260UserAuthRuleUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260UserAuthGroup = dfl260UserAuthGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260UserAuthGroup.setDescription('User auth objects.')
dfl260IfStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 8)).setObjects(("DFL260-MIB", "dfl260IfName"), ("DFL260-MIB", "dfl260IfFragsIn"), ("DFL260-MIB", "dfl260IfFragReassOk"), ("DFL260-MIB", "dfl260IfFragReassFail"), ("DFL260-MIB", "dfl260IfPktsInCnt"), ("DFL260-MIB", "dfl260IfPktsOutCnt"), ("DFL260-MIB", "dfl260IfBitsInCnt"), ("DFL260-MIB", "dfl260IfBitsOutCnt"), ("DFL260-MIB", "dfl260IfPktsTotCnt"), ("DFL260-MIB", "dfl260IfBitsTotCnt"), ("DFL260-MIB", "dfl260IfHCPktsInCnt"), ("DFL260-MIB", "dfl260IfHCPktsOutCnt"), ("DFL260-MIB", "dfl260IfHCBitsInCnt"), ("DFL260-MIB", "dfl260IfHCBitsOutCnt"), ("DFL260-MIB", "dfl260IfHCPktsTotCnt"), ("DFL260-MIB", "dfl260IfHCBitsTotCnt"), ("DFL260-MIB", "dfl260IfRxRingFifoErrors"), ("DFL260-MIB", "dfl260IfRxDespools"), ("DFL260-MIB", "dfl260IfRxAvgUse"), ("DFL260-MIB", "dfl260IfRxRingSaturation"), ("DFL260-MIB", "dfl260RxRingFlooded"), ("DFL260-MIB", "dfl260IfTxDespools"), ("DFL260-MIB", "dfl260IfTxAvgUse"), ("DFL260-MIB", "dfl260IfTxRingSaturation"), ("DFL260-MIB", "dfl260RxTingFlooded"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IfStatsGroup = dfl260IfStatsGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IfStatsGroup.setDescription('DFL-260 interface statistics group.')
dfl260LinkMonitorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 9)).setObjects(("DFL260-MIB", "dfl260LinkMonGrp"), ("DFL260-MIB", "dfl260LinkMonGrpName"), ("DFL260-MIB", "dfl260LinkMonGrpHostsUp"), ("DFL260-MIB", "dfl260LinkMonHostId"), ("DFL260-MIB", "dfl260LinkMonHostShortTermLoss"), ("DFL260-MIB", "dfl260LinkMonHostPacketsLost"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260LinkMonitorGroup = dfl260LinkMonitorGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260LinkMonitorGroup.setDescription('DFL-260 link monitor statistics group')
dfl260PipesObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 10)).setObjects(("DFL260-MIB", "dfl260PipeUsers"), ("DFL260-MIB", "dfl260PipeName"), ("DFL260-MIB", "dfl260PipeMinPrec"), ("DFL260-MIB", "dfl260PipeMaxPrec"), ("DFL260-MIB", "dfl260PipeDefPrec"), ("DFL260-MIB", "dfl260PipeNumPrec"), ("DFL260-MIB", "dfl260PipeNumUsers"), ("DFL260-MIB", "dfl260PipeCurrentBps"), ("DFL260-MIB", "dfl260PipeCurrentPps"), ("DFL260-MIB", "dfl260PipeDelayedPackets"), ("DFL260-MIB", "dfl260PipeDropedPackets"), ("DFL260-MIB", "dfl260PipePrec"), ("DFL260-MIB", "dfl260PipePrecBps"), ("DFL260-MIB", "dfl260PipePrecTotalPps"), ("DFL260-MIB", "dfl260PipePrecReservedBps"), ("DFL260-MIB", "dfl260PipePrecDynLimBps"), ("DFL260-MIB", "dfl260PipePrecDynUsrLimBps"), ("DFL260-MIB", "dfl260PipePrecDelayedPackets"), ("DFL260-MIB", "dfl260PipePrecDropedPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260PipesObjectGroup = dfl260PipesObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260PipesObjectGroup.setDescription('DFL-260 pipes statistics group')
dfl260DHCPRelayObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 12)).setObjects(("DFL260-MIB", "dfl260DHCPRelayCurClients"), ("DFL260-MIB", "dfl260DHCPRelayCurTrans"), ("DFL260-MIB", "dfl260DHCPRelayRejected"), ("DFL260-MIB", "dfl260DHCPRelayRuleName"), ("DFL260-MIB", "dfl260DHCPRelayRuleHits"), ("DFL260-MIB", "dfl260DHCPRelayRuleCurClients"), ("DFL260-MIB", "dfl260DHCPRelayRuleRejCliPkts"), ("DFL260-MIB", "dfl260DHCPRelayRuleRejSrvPkts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260DHCPRelayObjectGroup = dfl260DHCPRelayObjectGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260DHCPRelayObjectGroup.setDescription('DFL-260 DHCP relay statistics group')
dfl260AlgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 13)).setObjects(("DFL260-MIB", "dfl260AlgSessions"), ("DFL260-MIB", "dfl260AlgConnections"), ("DFL260-MIB", "dfl260AlgTCPStreams"), ("DFL260-MIB", "dfl260HttpAlgName"), ("DFL260-MIB", "dfl260HttpAlgTotalRequested"), ("DFL260-MIB", "dfl260HttpAlgTotalAllowed"), ("DFL260-MIB", "dfl260HttpAlgTotalBlocked"), ("DFL260-MIB", "dfl260HttpAlgCntFltName"), ("DFL260-MIB", "dfl260HttpAlgCntFltRequests"), ("DFL260-MIB", "dfl260HttpAlgCntFltAllowed"), ("DFL260-MIB", "dfl260HttpAlgCntFltBlocked"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260AlgGroup = dfl260AlgGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260AlgGroup.setDescription('DFL-260 HTTP ALG statistics group')
dfl260HAGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 14)).setObjects(("DFL260-MIB", "dfl260HASyncSendQueueLength"), ("DFL260-MIB", "dfl260HASyncSendQueueUsagePkt"), ("DFL260-MIB", "dfl260HASyncSendQueueUsageOct"), ("DFL260-MIB", "dfl260HASyncSentPackets"), ("DFL260-MIB", "dfl260HASyncSendResentPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260HAGroup = dfl260HAGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260HAGroup.setDescription('DFL-260 HA statistics group')
dfl260IfVlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 15)).setObjects(("DFL260-MIB", "dfl260IfVlanUntaggedInPkts"), ("DFL260-MIB", "dfl260IfVlanUntaggedOutPkts"), ("DFL260-MIB", "dfl260IfVlanUntaggedTotPkts"), ("DFL260-MIB", "dfl260IfVlanUntaggedInOctets"), ("DFL260-MIB", "dfl260IfVlanUntaggedOutOctets"), ("DFL260-MIB", "dfl260IfVlanUntaggedTotOctets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260IfVlanGroup = dfl260IfVlanGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260IfVlanGroup.setDescription('DFL-260 VLAN statistics group')
dfl260SmtpAlgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 16)).setObjects(("DFL260-MIB", "dfl260SmtpAlgName"), ("DFL260-MIB", "dfl260SmtpAlgTotCheckedSes"), ("DFL260-MIB", "dfl260SmtpAlgTotSpamSes"), ("DFL260-MIB", "dfl260SmtpAlgTotDroppedSes"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlName"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlChecked"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlMatched"), ("DFL260-MIB", "dfl260SmtpAlgDnsBlFailChecks"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260SmtpAlgGroup = dfl260SmtpAlgGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260SmtpAlgGroup.setDescription('Clavister SMTP ALG objects group')
dfl260SysTCPGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 3, 1, 17)).setObjects(("DFL260-MIB", "dfl260SysTCPRecvSmall"), ("DFL260-MIB", "dfl260SysTCPRecvLarge"), ("DFL260-MIB", "dfl260SysTCPSendSmall"), ("DFL260-MIB", "dfl260SysTCPSendLarge"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260SysTCPGroup = dfl260SysTCPGroup.setStatus('current')
if mibBuilder.loadTexts: dfl260SysTCPGroup.setDescription('DFL-260 TCP buffer usage group')
dfl260StatsCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 171, 20, 2, 1, 2, 2, 1, 1)).setObjects(("DFL260-MIB", "dfl260SystemObjectGroup"), ("DFL260-MIB", "dfl260IPsecObjectGroup"), ("DFL260-MIB", "dfl260StateCountersGroup"), ("DFL260-MIB", "dfl260IPPoolGroup"), ("DFL260-MIB", "dfl260DHCPServerGroup"), ("DFL260-MIB", "dfl260RuleUseGroup"), ("DFL260-MIB", "dfl260UserAuthGroup"), ("DFL260-MIB", "dfl260IfStatsGroup"), ("DFL260-MIB", "dfl260LinkMonitorGroup"), ("DFL260-MIB", "dfl260PipesObjectGroup"), ("DFL260-MIB", "dfl260DHCPRelayObjectGroup"), ("DFL260-MIB", "dfl260AlgGroup"), ("DFL260-MIB", "dfl260HAGroup"), ("DFL260-MIB", "dfl260IfVlanGroup"), ("DFL260-MIB", "dfl260SmtpAlgGroup"), ("DFL260-MIB", "dfl260SysTCPGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dfl260StatsCompliance = dfl260StatsCompliance.setStatus('current')
if mibBuilder.loadTexts: dfl260StatsCompliance.setDescription('Module Compliance')
mibBuilder.exportSymbols("DFL260-MIB", dfl260PipeName=dfl260PipeName, dfl260StatsConformance=dfl260StatsConformance, dfl260PipePrecDropedPackets=dfl260PipePrecDropedPackets, dfl260IfFragReassFail=dfl260IfFragReassFail, dfl260DHCPRelayObjectGroup=dfl260DHCPRelayObjectGroup, dfl260DHCPRelayRuleIndex=dfl260DHCPRelayRuleIndex, dfl260IfBitsOutCnt=dfl260IfBitsOutCnt, dfl260IPsecForwardedPackets=dfl260IPsecForwardedPackets, dfl260UserAuthEAPUsers=dfl260UserAuthEAPUsers, dfl260SmtpAlgDnsBlEntry=dfl260SmtpAlgDnsBlEntry, dfl260SmtpAlgDnsBlName=dfl260SmtpAlgDnsBlName, dfl260RuleUseEntry=dfl260RuleUseEntry, dfl260LinkMonGrpIndex=dfl260LinkMonGrpIndex, dfl260IPPoolTable=dfl260IPPoolTable, dfl260IPsecOutPackets=dfl260IPsecOutPackets, dfl260IPsecQuickModeDone=dfl260IPsecQuickModeDone, dfl260PipeNumPrec=dfl260PipeNumPrec, dfl260DHCPRelayRuleName=dfl260DHCPRelayRuleName, dfl260SysConns=dfl260SysConns, dfl260PipeEntry=dfl260PipeEntry, dfl260RxTingFlooded=dfl260RxTingFlooded, dfl260SmtpAlgTotDroppedSes=dfl260SmtpAlgTotDroppedSes, dfl260SmtpAlgName=dfl260SmtpAlgName, dfl260DHCPRelayRuleHits=dfl260DHCPRelayRuleHits, dfl260SysForwardedPackets=dfl260SysForwardedPackets, dfl260SysTimerUsage=dfl260SysTimerUsage, dfl260HWSensorTable=dfl260HWSensorTable, dfl260IPsecPhaseOneFailed=dfl260IPsecPhaseOneFailed, utmFirewall=utmFirewall, dfl260DHCPRuleUsage=dfl260DHCPRuleUsage, dfl260LinkMonHostEntry=dfl260LinkMonHostEntry, dfl260IfTxRingTable=dfl260IfTxRingTable, dfl260Rules=dfl260Rules, dfl260PipeCurrentPps=dfl260PipeCurrentPps, dfl260IfVlanUntaggedTotPkts=dfl260IfVlanUntaggedTotPkts, dfl260UserAuthXAUTHUsers=dfl260UserAuthXAUTHUsers, dfl260IfRxDespools=dfl260IfRxDespools, dfl260SmtpAlgDnsBlMatched=dfl260SmtpAlgDnsBlMatched, dfl260RuleUseTable=dfl260RuleUseTable, dfl260PipePrecIndex=dfl260PipePrecIndex, dfl260StatsRegGroups=dfl260StatsRegGroups, dfl260IPsecForwardedOctetsUcomp=dfl260IPsecForwardedOctetsUcomp, dfl260StateCountersGroup=dfl260StateCountersGroup, dfl260IfVlanIndex=dfl260IfVlanIndex, dfl260IfStatsTable=dfl260IfStatsTable, dfl260IPsecInfoDone=dfl260IPsecInfoDone, dfl260HttpAlgEntry=dfl260HttpAlgEntry, dfl260SysHCForwardedBits=dfl260SysHCForwardedBits, dfl260IfVlanUntaggedOutOctets=dfl260IfVlanUntaggedOutOctets, dfl260IPsecPhaseOneRekeyed=dfl260IPsecPhaseOneRekeyed, dfl260SysPscIcmp=dfl260SysPscIcmp, dfl260DHCPRuleIndex=dfl260DHCPRuleIndex, dfl260HASyncSentPackets=dfl260HASyncSentPackets, dfl260UserAuthHTTPSUsers=dfl260UserAuthHTTPSUsers, dfl260PipeDefPrec=dfl260PipeDefPrec, dfl260RuleName=dfl260RuleName, dfl260DHCPRelayRuleTable=dfl260DHCPRelayRuleTable, dfl260IfBitsTotCnt=dfl260IfBitsTotCnt, dfl260PipePrecReservedBps=dfl260PipePrecReservedBps, dfl260DHCPServerGroup=dfl260DHCPServerGroup, dfl260HttpAlgCntFltEntry=dfl260HttpAlgCntFltEntry, dfl260HASyncSendQueueUsagePkt=dfl260HASyncSendQueueUsagePkt, dfl260PipePrecEntry=dfl260PipePrecEntry, dfl260HttpAlgTotalBlocked=dfl260HttpAlgTotalBlocked, dfl260SmtpAlgTotCheckedSes=dfl260SmtpAlgTotCheckedSes, dfl260LinkMonGrp=dfl260LinkMonGrp, dfl260DHCPRuleTable=dfl260DHCPRuleTable, dfl260PipeIndex=dfl260PipeIndex, dfl260IfHCBitsInCnt=dfl260IfHCBitsInCnt, dfl260LinkMonGrpEntry=dfl260LinkMonGrpEntry, dfl260RuleUseGroup=dfl260RuleUseGroup, dfl260DHCPTotalRejected=dfl260DHCPTotalRejected, dfl260IfTxRingSaturation=dfl260IfTxRingSaturation, dfl260DHCPRuleUsagePercent=dfl260DHCPRuleUsagePercent, dfl260IfStatsGroup=dfl260IfStatsGroup, dfl260IPsecTotalRekeys=dfl260IPsecTotalRekeys, dfl260DHCPRejectedRequests=dfl260DHCPRejectedRequests, dfl260LinkMonHostId=dfl260LinkMonHostId, dfl260PipeNumUsers=dfl260PipeNumUsers, dfl260IPsecPhaseOneActive=dfl260IPsecPhaseOneActive, dfl260SmtpAlgTotSpamSes=dfl260SmtpAlgTotSpamSes, dfl260UserAuthRuleIndex=dfl260UserAuthRuleIndex, dfl260IPsecInOctetsUncomp=dfl260IPsecInOctetsUncomp, dfl260SysCpuLoad=dfl260SysCpuLoad, dfl260SysTCPUsage=dfl260SysTCPUsage, dfl260SysTCPSendSmall=dfl260SysTCPSendSmall, dfl260IPsecTotalTransforms=dfl260IPsecTotalTransforms, dfl260HASyncSendResentPackets=dfl260HASyncSendResentPackets, netdefendMgmt=netdefendMgmt, dfl260DHCPActiveClients=dfl260DHCPActiveClients, dfl260IPsecActiveTransforms=dfl260IPsecActiveTransforms, dfl260SmtpAlg=dfl260SmtpAlg, dfl260IfFragReassOk=dfl260IfFragReassOk, dfl260UserAuthGroup=dfl260UserAuthGroup, dfl260PipeMinPrec=dfl260PipeMinPrec, dfl260SysPscTcpFin=dfl260SysPscTcpFin, dfl260IfTxRingEntry=dfl260IfTxRingEntry, dfl260UserAuthRuleUseTable=dfl260UserAuthRuleUseTable, dfl260IPsecInOctetsComp=dfl260IPsecInOctetsComp, dfl260PipePrecDelayedPackets=dfl260PipePrecDelayedPackets, dfl260DHCPRelayCurClients=dfl260DHCPRelayCurClients, dfl260HttpAlgTotalRequested=dfl260HttpAlgTotalRequested, dfl260reg=dfl260reg, dfl260HASyncSendQueueUsageOct=dfl260HASyncSendQueueUsageOct, dfl260AlgConnections=dfl260AlgConnections, dfl260DHCPRelayRuleCurClients=dfl260DHCPRelayRuleCurClients, dfl260UserAuthPPPUsers=dfl260UserAuthPPPUsers, dfl260IPPoolIndex=dfl260IPPoolIndex, dfl260DHCPTotalLeases=dfl260DHCPTotalLeases, dfl260LinkMonGrpName=dfl260LinkMonGrpName, dfl260IPsecQuickModeFailed=dfl260IPsecQuickModeFailed, dfl260PipeMaxPrec=dfl260PipeMaxPrec, dfl260IfVlanStatsEntry=dfl260IfVlanStatsEntry, dfl260HttpAlgTotalAllowed=dfl260HttpAlgTotalAllowed, dlink=dlink, dfl260IPPoolEntry=dfl260IPPoolEntry, dfl260HttpAlgTable=dfl260HttpAlgTable, dfl260IPPoolMisses=dfl260IPPoolMisses, dfl260OS=dfl260OS, dfl260SysMemUsage=dfl260SysMemUsage, dfl260IPsecOutOfTransforms=dfl260IPsecOutOfTransforms, dfl260DHCPRelay=dfl260DHCPRelay, dfl260RuleUse=dfl260RuleUse, dfl260DHCPRelayRejected=dfl260DHCPRelayRejected, dfl260IfVlanStatsTable=dfl260IfVlanStatsTable, dfl260UserAuthRuleName=dfl260UserAuthRuleName, dfl260PipeDropedPackets=dfl260PipeDropedPackets, dfl260UserAuthRuleUse=dfl260UserAuthRuleUse, dfl260IfVlanUntaggedOutPkts=dfl260IfVlanUntaggedOutPkts, dfl260SmtpAlgDnsBlChecked=dfl260SmtpAlgDnsBlChecked, dfl260IfRxRingIndex=dfl260IfRxRingIndex, dfl260PipesObjectGroup=dfl260PipesObjectGroup, dfl260IfRxRingTable=dfl260IfRxRingTable, dfl260LinkMonGrpHostsUp=dfl260LinkMonGrpHostsUp, dfl260SysPscTcpOpen=dfl260SysPscTcpOpen, dfl260LinkMonHostPacketsLost=dfl260LinkMonHostPacketsLost, dfl260SysPscTcpSyn=dfl260SysPscTcpSyn, dfl260HASyncSendQueueLength=dfl260HASyncSendQueueLength, dfl260HttpAlgCntFltAllowed=dfl260HttpAlgCntFltAllowed, dfl260SysForwardedBits=dfl260SysForwardedBits, dfl260UserAuthHTTPUsers=dfl260UserAuthHTTPUsers, dfl260IfVlanGroup=dfl260IfVlanGroup, dfl260IPPools=dfl260IPPools, dfl260HttpAlgCntFltRequests=dfl260HttpAlgCntFltRequests, dfl260RxRingFlooded=dfl260RxRingFlooded, dfl260IfHCPktsInCnt=dfl260IfHCPktsInCnt, dfl260HttpAlg=dfl260HttpAlg, dfl260IPPoolUsed=dfl260IPPoolUsed, dfl260SysTCPGroup=dfl260SysTCPGroup, dfl260IPsecQuickModeActive=dfl260IPsecQuickModeActive, dfl260SmtpAlgIndex=dfl260SmtpAlgIndex, dfl260HAGroup=dfl260HAGroup, dfl260DHCPRelayCurTrans=dfl260DHCPRelayCurTrans, dfl260StatsCompliance=dfl260StatsCompliance, dfl260HWSensorName=dfl260HWSensorName, dfl260PipePrec=dfl260PipePrec, dfl260IPPoolClientFails=dfl260IPPoolClientFails, dfl260SystemObjectGroup=dfl260SystemObjectGroup, dfl260IPPoolName=dfl260IPPoolName, dfl260IPPoolFree=dfl260IPPoolFree, dfl260SysPerStateCounters=dfl260SysPerStateCounters, dfl260IfName=dfl260IfName, dfl260SysTCPRecvLarge=dfl260SysTCPRecvLarge, dfl260ALG=dfl260ALG, dfl260IfHCBitsTotCnt=dfl260IfHCBitsTotCnt, dfl260LinkMonHostTable=dfl260LinkMonHostTable, dfl260HttpAlgCntFltName=dfl260HttpAlgCntFltName, dfl260IfRxRingSaturation=dfl260IfRxRingSaturation, dfl260HWSensorIndex=dfl260HWSensorIndex, dfl260SmtpAlgTable=dfl260SmtpAlgTable, dfl260IfHCPktsTotCnt=dfl260IfHCPktsTotCnt, dfl260HWSensorEntry=dfl260HWSensorEntry, dfl260IPsecGlobal=dfl260IPsecGlobal, dfl260PipePrecBps=dfl260PipePrecBps, dfl260HA=dfl260HA, dfl260Pipes=dfl260Pipes, dfl260IfStatsEntry=dfl260IfStatsEntry, dfl260IfVlanUntaggedTotOctets=dfl260IfVlanUntaggedTotOctets, dfl260IfPktsTotCnt=dfl260IfPktsTotCnt, dfl260DHCPRuleName=dfl260DHCPRuleName, dfl260DHCPRelayRuleEntry=dfl260DHCPRelayRuleEntry, dfl260IPPoolPrepare=dfl260IPPoolPrepare, dfl260SysTCPRecvSmall=dfl260SysTCPRecvSmall, dfl260UserAuthRuleUseEntry=dfl260UserAuthRuleUseEntry, dfl260SmtpAlgDnsBlIndex=dfl260SmtpAlgDnsBlIndex, dfl260IfPktsInCnt=dfl260IfPktsInCnt, dfl260IfFragsIn=dfl260IfFragsIn, dfl260IPPoolsNumber=dfl260IPPoolsNumber, dfl260PipePrecDynLimBps=dfl260PipePrecDynLimBps, dfl260IfStatsIndex=dfl260IfStatsIndex, dfl260IfRxAvgUse=dfl260IfRxAvgUse, dfl260PipeDelayedPackets=dfl260PipeDelayedPackets, dfl260SmtpAlgEntry=dfl260SmtpAlgEntry, dfl260SysConnCPS=dfl260SysConnCPS, dfl260IfTxDespools=dfl260IfTxDespools, dfl260IfRxRingFifoErrors=dfl260IfRxRingFifoErrors, dfl260SmtpAlgDnsBlFailChecks=dfl260SmtpAlgDnsBlFailChecks, dfl260SysPscUdp=dfl260SysPscUdp, dfl260DHCPActiveClientsPercent=dfl260DHCPActiveClientsPercent, dfl260IfVlanUntaggedInPkts=dfl260IfVlanUntaggedInPkts, dfl260PipePrecDynUsrLimBps=dfl260PipePrecDynUsrLimBps, dfl260IfHCPktsOutCnt=dfl260IfHCPktsOutCnt, dfl260IPsecPhaseOneAggrModeDone=dfl260IPsecPhaseOneAggrModeDone, dfl260IfHCBitsOutCnt=dfl260IfHCBitsOutCnt, dfl260IPsecOutOctetsUncomp=dfl260IPsecOutOctetsUncomp, dfl260SysPscOther=dfl260SysPscOther, dfl260HttpAlgCntFltIndex=dfl260HttpAlgCntFltIndex, dfl260IfVlanUntaggedInOctets=dfl260IfVlanUntaggedInOctets, dfl260PipeUsers=dfl260PipeUsers, dfl260IfTxRingIndex=dfl260IfTxRingIndex, dfl260IfTxAvgUse=dfl260IfTxAvgUse, dfl260HttpAlgIndex=dfl260HttpAlgIndex, dfl260AlgSessions=dfl260AlgSessions, dfl260AlgTCPStreams=dfl260AlgTCPStreams, dfl260IPsecOutOctetsComp=dfl260IPsecOutOctetsComp, dfl260SmtpAlgGroup=dfl260SmtpAlgGroup, dfl260HWSensorUnit=dfl260HWSensorUnit, dfl260IPsecInPackets=dfl260IPsecInPackets, dfl260LinkMonitor=dfl260LinkMonitor, dfl260UserAuth=dfl260UserAuth, dfl260_MIB=dfl260_MIB, dfl260OSStats=dfl260OSStats, dfl260MibObjectGroups=dfl260MibObjectGroups, dfl260HWSensorValue=dfl260HWSensorValue, dfl260IPsec=dfl260IPsec, dfl260DHCPRelayRuleRejSrvPkts=dfl260DHCPRelayRuleRejSrvPkts, dfl260SysConnOPS=dfl260SysConnOPS, dfl260LinkMonHostIndex=dfl260LinkMonHostIndex, dfl260IPsecPhaseOneDone=dfl260IPsecPhaseOneDone, dfl260HttpAlgName=dfl260HttpAlgName, dfl260SysBuffUse=dfl260SysBuffUse, dfl260PipeTable=dfl260PipeTable, PYSNMP_MODULE_ID=dfl260_MIB, dfl260AlgGroup=dfl260AlgGroup, dfl260VPN=dfl260VPN, dfl260PipePrecTable=dfl260PipePrecTable, dfl260IPsecObjectGroup=dfl260IPsecObjectGroup, dfl260System=dfl260System, dfl260HttpAlgCntFltTable=dfl260HttpAlgCntFltTable, dfl260PipeCurrentBps=dfl260PipeCurrentBps, dfl260IfPktsOutCnt=dfl260IfPktsOutCnt, dfl260PipePrecTotalPps=dfl260PipePrecTotalPps, dfl260HttpAlgCntFltBlocked=dfl260HttpAlgCntFltBlocked, dfl260=dfl260, dfl260DHCPServer=dfl260DHCPServer, dfl260IfRxRingEntry=dfl260IfRxRingEntry, dfl260IfBitsInCnt=dfl260IfBitsInCnt, dfl260LinkMonGrpTable=dfl260LinkMonGrpTable, dfl260RuleIndex=dfl260RuleIndex, dfl260MibConfs=dfl260MibConfs, dfl260DHCPRuleEntry=dfl260DHCPRuleEntry, dfl260IPsecForwardedOctetsComp=dfl260IPsecForwardedOctetsComp, dfl260IPPoolGroup=dfl260IPPoolGroup, dfl260IPsecInfoFailed=dfl260IPsecInfoFailed)
mibBuilder.exportSymbols("DFL260-MIB", dfl260DHCPRelayRuleRejCliPkts=dfl260DHCPRelayRuleRejCliPkts, dfl260LinkMonHostShortTermLoss=dfl260LinkMonHostShortTermLoss, dfl260LinkMonitorGroup=dfl260LinkMonitorGroup, dfl260SmtpAlgDnsBlTable=dfl260SmtpAlgDnsBlTable, dfl260SysTCPSendLarge=dfl260SysTCPSendLarge, dfl260MibModules=dfl260MibModules)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
3d76da29d6541bdb7ca2bb2cb1dd9e631ec40f74
|
bbe420ca711776140452b5639b935e37feb7d8ea
|
/Task1/chapter26.py
|
f2970c154dab1352dd124f107528c2c59dac5de3
|
[] |
no_license
|
shkhaider2015/artificial_intelligence
|
9f2f156ab2cadcdc38f2c576f1af66667f21dc86
|
785e00dac28cec64a4b35068dbef0d1dcfc5ff3c
|
refs/heads/master
| 2020-03-28T20:27:33.406289
| 2018-11-22T09:11:25
| 2018-11-22T09:11:25
| 149,072,716
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
Data = {
"Name:": "Usama Riaz",
"ID:": "1500-2015",
"Age:": "21",
"DOB:": "29/05/1997",
"Location:": "Karachi"
}
for a,b in Data.items():
print(a,b)
|
[
"43218437+shkhaider2015@users.noreply.github.com"
] |
43218437+shkhaider2015@users.noreply.github.com
|
df93f65d97755a6b38917bb204e856bbf90a7efd
|
a0947c2778742aec26b1c0600ceca17df42326cd
|
/Python/PythonInADay2/CSV-Files-Drill/37of79-119.py
|
7707852ef6e09fc8a3743fa9851f02ce0b0f3c43
|
[] |
no_license
|
JohnCDunn/Course-Work-TTA
|
5758319d4607114914ba9723328658bed8fb2024
|
8c4f60d51007dac2ac4cceb84b0f9666e143c0d7
|
refs/heads/master
| 2021-01-10T16:37:02.609879
| 2016-02-01T18:05:38
| 2016-02-01T18:05:38
| 49,983,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,560
|
py
|
import wx, db_program
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(800,600))
panel = wx.Panel(self)
# Creating the menu bar
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
exitItem = fileMenu.Append(wx.NewId(), "Exit")
menuBar.Append(fileMenu, "File")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.exitProgram, exitItem)
self.CreateStatusBar()
# Setup Add New Character UI
# Create static box
wx.StaticBox(panel, label='Add a new character', pos=(20,40), size=(280,190))
# Text for name, gender etc
wx.StaticText(panel, label='Name:', pos=(30,70))
wx.StaticText(panel, label='Gender:', pos=(30,110))
wx.StaticText(panel, label='Age:', pos=(30,150))
wx.StaticText(panel, label='Occupation:', pos=(30,190))
# Single line text boxes
self.sName = wx.TextCtrl(panel, size=(150, -1), pos=(130,70))
self.sGen = wx.TextCtrl(panel, size=(150, -1), pos=(130,110))
self.sAge = wx.SpinCtrl(panel, value='0', pos=(130, 150), size=(70, 25))
self.sOcc = wx.TextCtrl(panel, size=(150, -1), pos=(130,190))
# Save button
save = wx.Button(panel, label="Add Character", pos=(100, 230))
save.Bind(wx.EVT_BUTTON, self.addCharacter)
# Setup the Table UI
# Setup table as listCtrl
self.listCtrl = wx.ListCtrl(panel, size=(400,400), pos=(350,40), style=wx.LC_REPORT |wx.BORDER_SUNKEN)
# Add columns to listCtrl
self.listCtrl.InsertColumn(0, "ID")
self.listCtrl.InsertColumn(1, "Name")
self.listCtrl.InsertColumn(2, "Gender")
self.listCtrl.InsertColumn(3, "Age")
self.listCtrl.InsertColumn(4, "Occupation")
# Add data to the list control
self.fillListCtrl()
# Run onSelect function when item is selected
self.listCtrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onSelect)
# Setup a delete button
deleteBtn = wx.Button(panel, label="Delete", pos=(640, 450))
# Bind delete button to onDelete function
deleteBtn.Bind(wx.EVT_BUTTON, self.onDelete)
# Setup Update Character UI
# Create static box
wx.StaticBox(panel, label='Update a character', pos=(20,340), size=(280,190))
# Text for name, gender etc
wx.StaticText(panel, label='Name:', pos=(30,370))
wx.StaticText(panel, label='Gender:', pos=(30,410))
wx.StaticText(panel, label='Age:', pos=(30,450))
wx.StaticText(panel, label='Occupation:', pos=(30,490))
# Single line text boxes
self.sNameU = wx.TextCtrl(panel, size=(150, -1), pos=(130,370))
self.sGenU = wx.TextCtrl(panel, size=(150, -1), pos=(130,410))
self.sAgeU = wx.SpinCtrl(panel, value='0', pos=(130, 450), size=(70, 25))
self.sOccU = wx.TextCtrl(panel, size=(150, -1), pos=(130,490))
# Save button
saveUpdate = wx.Button(panel, label="Update Character", pos=(100, 530))
saveUpdate.Bind(wx.EVT_BUTTON, self.updateCharacter)
def addCharacter(self, event):
name = self.sName.GetValue()
gen = self.sGen.GetValue()
age = self.sAge.GetValue()
occ = self.sOcc.GetValue()
# Checking if variables have a value
if (name == '') or (gen == '') or (age == '') or (occ == ''):
# Alert user that a variable is empty
dlg = wx.MessageDialog(None, \
'Some character details are missing. Enter values in each text box.', \
'Missing Details', wx.OK)
dlg.ShowModal()
dlg.Destroy()
return False
# Adding character to database
db_program.newCharacter(name, gen, age, occ)
print db_program.viewAll()
# Empty text boxes when finished.
self.sName.Clear()
self.sGen.Clear()
self.sOcc.Clear()
self.sAge.SetValue(0)
# Update list control
self.fillListCtrl()
def exitProgram(self, event):
self.Destroy()
def fillListCtrl(self):
# Get data from the database
self.allData = db_program.viewAll()
# Delete old data before adding new data
self.listCtrl.DeleteAllItems()
# Append data to the table
for row in self.allData:
# Loop though and append data
self.listCtrl.Append(row)
def onDelete(self, event):
# Delete the character
db_program.deleteCharacter(self.selectedId)
# Refresh the table
self.fillListCtrl()
def onSelect(self, event):
# Get the id of the selected row
self.selectedId = event.GetText()
# Get index of selected row
index = event.GetIndex()
# Get character info
charInfo = self.allData[index]
print charInfo
# Set value of update text boxes
self.sNameU.SetValue(charInfo[1])
self.sGenU.SetValue(charInfo[2])
self.sAgeU.SetValue(charInfo[3])
self.sOccU.SetValue(charInfo[4])
def updateCharacter(self, event):
pass
app = wx.App()
frame = Frame("Python GUI")
frame.Show()
app.MainLoop()
|
[
"JohnClydeDunn@Gmail.com"
] |
JohnClydeDunn@Gmail.com
|
de5eea017c032a61da30c0fc838bd5c7842e7b01
|
7ad35f591c8ac671022eb02fa0de3f76b767345b
|
/_static/scripts/part2/helloobject/run_hello.py
|
dff12d4f74edd27ded2abce5bb1db3c176a9443b
|
[
"CC-BY-4.0"
] |
permissive
|
mattsinc/learning_gem5
|
8d68ca4290ef34312d75580c98797fe3ce47f265
|
702f89f6b657e5d9201978333e56f2882e6f49fe
|
refs/heads/master
| 2021-01-23T08:33:19.830467
| 2018-09-14T05:02:38
| 2018-09-14T05:05:51
| 102,510,868
| 0
| 0
| null | 2017-09-05T17:29:40
| 2017-09-05T17:29:40
| null |
UTF-8
|
Python
| false
| false
| 2,359
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jason Lowe-Power
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power
""" Simple config/run script for the HelloObject
This is probably the simplest gem5 config file you can possibly create.
It creates a Root object and one *very* simple SimObject and simulates the
system. Since there are no events, this "simulation" should finish immediately
"""
# import the m5 (gem5) library created when gem5 is built
import m5
# import all of the SimObjects
from m5.objects import *
# set up the root SimObject and start the simulation
root = Root(full_system = False)
# Create an instantiation of the simobject you created
root.hello = HelloObject()
# instantiate all of the objects we've created above
m5.instantiate()
print "Beginning simulation!"
exit_event = m5.simulate()
print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())
|
[
"power.jg@gmail.com"
] |
power.jg@gmail.com
|
db0aaaaee506d0a2bec90b0566b95ef7f862eee9
|
2dc50cd2aaf094bb4ceafc06d0a32633dc2b0c6b
|
/imagenet/models_cifar/resnet2.py
|
bb780ffbcf985c6bf700b75255f586ece5a8acb1
|
[] |
no_license
|
jaykuo2005/RBNN
|
3a17b57ad75e9e94a27e36b18d7fdcf7707f7080
|
f7df8d66bf4b67030fc5a399a37be6df94332e64
|
refs/heads/master
| 2023-03-24T09:11:48.088776
| 2021-03-15T05:15:32
| 2021-03-15T05:15:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,704
|
py
|
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
__all__ =['resnet18A_1w1a','resnet18B_1w1a','resnet18C_1w1a','resnet18_1w1a']
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = BinarizeConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_channel, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = num_channel[0]
self.conv1 = nn.Conv2d(3, num_channel[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_channel[0])
self.layer1 = self._make_layer(block, num_channel[0], num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, num_channel[1], num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, num_channel[2], num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, num_channel[3], num_blocks[3], stride=2)
self.linear = nn.Linear(num_channel[3]*block.expansion, num_classes)
self.bn2 = nn.BatchNorm1d(num_channel[3]*block.expansion)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet18A_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,32,64,128],**kwargs)
def resnet18B_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,64,128,256],**kwargs)
def resnet18C_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,64,128,256],**kwargs)
def resnet18_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,128,256,512],**kwargs)
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = resnet18_1w1a()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
|
[
"791411501@qq.com"
] |
791411501@qq.com
|
41e10feedfe876d5f99cdf53a89cb472419a65d8
|
1ab04c85e37f1d5fc49dcf8054fd551104a1ad58
|
/helloworld.py
|
6ed25b2f85330ac67172666f6458a4af939a189a
|
[] |
no_license
|
htetphyoaung97/Athina
|
33a4286d4bf5998fbbad9b0be25d4c13246ffc87
|
0c55df96604656b5efa05fd65d11c6b0337e7392
|
refs/heads/master
| 2022-11-16T17:55:16.234385
| 2020-07-17T08:50:13
| 2020-07-17T08:50:13
| 280,368,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
input1 = input("Enter Your First Name:")
input2 = input("Enter Your Last Name:")
result = input1 + " " + input2
print(result)
|
[
"dev.htetphyoaung@gmail.com"
] |
dev.htetphyoaung@gmail.com
|
1695a61483d9bea6d281bfd9b4b698c7c37bdbc1
|
1e1efdc1900ec0242dfb1c7a425f5b7874241054
|
/NLG_Rap_Bot/CreateIndex_NLG.py
|
545bd4a7ba00a8d9198e353635bffcc58e0d3007
|
[] |
no_license
|
liam9/FreeStyle
|
01bcda77a16ef1b2beacab9840ba521f6dbc09ed
|
b65a4ac668cb2dc24c689cec20aa3509d472d327
|
refs/heads/master
| 2020-03-26T15:57:09.864565
| 2018-08-17T05:47:32
| 2018-08-17T05:47:32
| 145,073,467
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
'''
This file indexes the lyrics so that the rhymes can be found very quickly
'''
import sys,os
dir = os.path.dirname(__file__)
'''
This function indexes the lyrics. The index acts like a dictionary where the key is the rhyme and the value is
the sentence which the rhyme is in.
'''
def IndexLyrics_NLG():
index = []
#declare output files
lyricsFileCompressed = "LyricsCompressed" + ".txt"
lyricsFileCompressed = os.path.join(dir, "Text", lyricsFileCompressed)
indexFile = "Index" + ".txt"
indexFile = os.path.join(dir,"Text", indexFile)
#make a an index for the lyrics
with open(indexFile, "w") as file:
f=open(lyricsFileCompressed)
lines=f.readlines()
for x in range(0, len(lines)-1):
try:
#store the last word in the line in the index
word_list = lines[x].split()
index.append(word_list[-1])
file.write(word_list[-1])
file.write("\n")
except:
print("Lyrics skipped because of encoding")
return index
|
[
"noreply@github.com"
] |
liam9.noreply@github.com
|
915d93ebfb350f0981dab2804861e7fe19306cc7
|
b0ede55e98d454f558e5397369f9265893deedb5
|
/SWEA/D3/4698_special_prime.py
|
7e90335df7e3a59a8beb755177ef82b1905f53a7
|
[] |
no_license
|
YeonggilGo/python_practice
|
5ff65852900c4c6769d541af16f74a27a67920ec
|
43082568b5045a8efc1d596074bdca3e66b2fed1
|
refs/heads/master
| 2023-06-22T02:09:31.906745
| 2023-06-17T01:27:22
| 2023-06-17T01:27:22
| 280,361,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
primes = []
numbers = [True] * 1000001
numbers[0], numbers[1] = False, False
for i in range(2, 1000001):
if numbers[i]:
primes.append(i)
for j in range(i, 1000001, i):
numbers[j] = False
T = int(input())
for tc in range(1, T + 1):
D, A, B = map(int, input().split())
ans = 0
for prime in primes:
if prime < A:
continue
elif prime > B:
break
if str(D) in str(prime):
ans += 1
print(f'#{tc} {ans}')
|
[
"dudrlf1859@naver.com"
] |
dudrlf1859@naver.com
|
ac25687848306c6a9cff59e9ab2267b666d426d9
|
71f00ed87cd980bb2f92c08b085c5abe40a317fb
|
/BestOreo/W2V_CNN_robustchecking.py
|
e2d480f65e769e2d5e81dee662d63187818b0ca9
|
[] |
no_license
|
factoryofthesun/Rao-NLP
|
2bd8269a8eed1cb352c14c8fde88e3111ccca088
|
87f9723f5ee51bd21310d58c3425a2a7271ec3c5
|
refs/heads/master
| 2023-04-18T08:54:08.370155
| 2020-06-09T23:24:07
| 2020-06-09T23:24:07
| 248,070,291
| 0
| 1
| null | 2021-04-30T21:13:04
| 2020-03-17T20:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 9,146
|
py
|
# -*- coding: utf-8 -*-
"""Shuaiqi_train.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11DfSUkrGQfPsEtfHoJxewiBcqBM8OlfZ
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
import tensorflow as tf
import numpy as np
import pandas as pd
import W2V_Helpers.data_helpers as data_helpers
from W2V_Helpers.w2v_kfold import train_word2vec
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Input, MaxPooling1D, Convolution1D, Embedding
from keras.layers.merge import Concatenate
from keras.backend import clear_session
from keras.preprocessing import sequence
from sklearn.model_selection import KFold
import time
# ---------------------- Parameters section -------------------
#
# Model type. See Kim Yoon's Convolutional Neural Networks for Sentence Classification, Section 3
model_type = "CNN-non-static" # CNN-rand|CNN-non-static|CNN-static
t0 = time.time()
# Data source
from pathlib import Path
data_path = str(Path(__file__).parent / "../Data")
train_data_path = data_path + "/mturk_train.csv"
train_data_x_col = "inputtext"
train_data_y_cols = ["rating1", "rating2", "rating3", "rating4", "rating5"]
output_dir = "output"
#models_dir = "models"
# Model Hyperparameters
embedding_dim = 50
filter_sizes = (3, 8)
num_filters = 8
dropout_prob = (0.7, 0.9)
hidden_dims = 70
# Training parameters
batch_size = 64
num_epochs = 50
# Prepossessing parameters
sequence_length = 400
max_words = 5000
# Word2Vec parameters (see train_word2vec)
min_word_count = 1
context = 10
# ration of training dataset
train_percent = 0.9
#
# ---------------------- Parameters end -----------------------
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
def load_train_data(kfold = False):
x, y, vocabulary, vocabulary_inv_list = data_helpers.load_train_data(train_path=train_data_path,
train_x_col=train_data_x_col,
train_y_cols=train_data_y_cols,
save_path="W2V_Helpers"
)
vocabulary_inv = {key: value for key, value in enumerate(vocabulary_inv_list)}
y = y.argmax(axis=1)
if not kfold:
# Shuffle data
shuffle_indices = np.random.permutation(np.arange(len(y)))
x = x[shuffle_indices]
y = y[shuffle_indices]
train_len = int(len(x) * train_percent)
x_train = x[:train_len]
y_train = y[:train_len]
x_val = x[train_len:]
y_val = y[train_len:]
return x_train, y_train, x_val, y_val, vocabulary_inv
else:
#Create 10 folds for 10% training/validation
train_ind_list = []
test_ind_list = []
kf = KFold(n_splits = 10)
for train_ind, test_ind in kf.split(x):
train_ind_list.append(train_ind)
test_ind_list.append(test_ind)
return x, y, train_ind_list, test_ind_list, vocabulary_inv
def loadModel(x_train, x_val, vocabulary_inv):
# Prepare embedding layer weights and convert inputs for static model
print("Model type is", model_type)
if model_type in ["CNN-non-static", "CNN-static"]:
embedding_weights = train_word2vec(np.vstack((x_train, x_val)), vocabulary_inv, num_features=embedding_dim,
min_word_count=min_word_count, context=context)
if model_type == "CNN-static":
x_train = np.stack([np.stack([embedding_weights[word] for word in sentence]) for sentence in x_train])
x_val = np.stack([np.stack([embedding_weights[word] for word in sentence]) for sentence in x_val])
print("x_train static shape:", x_train.shape)
print("x_val static shape:", x_val.shape)
elif model_type == "CNN-rand":
embedding_weights = None
else:
raise ValueError("Unknown model type")
# Build model
if model_type == "CNN-static":
input_shape = (sequence_length, embedding_dim)
else:
input_shape = (sequence_length,)
model_input = Input(shape=input_shape)
# Static model does not have embedding layer
if model_type == "CNN-static":
z = model_input
else:
z = Embedding(len(vocabulary_inv), embedding_dim, input_length=sequence_length, name="embedding")(model_input)
z = Dropout(dropout_prob[0])(z)
# Convolutional block
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(z)
conv = MaxPooling1D(pool_size=2)(conv)
conv = Flatten()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dropout(dropout_prob[1])(z)
z = Dense(hidden_dims, activation="relu")(z)
model_output = Dense(1, activation="sigmoid")(z)
model = Model(model_input, model_output)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
# Initialize weights with word2vec
if model_type == "CNN-non-static":
weights = np.array([v for v in embedding_weights.values()])
print("Initializing embedding layer with word2vec weights, shape", weights.shape)
embedding_layer = model.get_layer("embedding")
embedding_layer.set_weights([weights])
return model
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
path = str(Path(__file__).parent / "../Plots")
def plotHistory(history, i):
epoch_count = range(1, len(history.history['loss']) + 1)
plt.plot(epoch_count, history.history['loss'], 'r--')
plt.plot(epoch_count, history.history['val_loss'], 'b-')
plt.legend(['Training Loss', 'Validation Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig(path + "/rep_w2v_cnn_loss_{}.png".format(i))
plt.clf()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.savefig(path + "/rep_w2v_cnn_accuracy_{}.png".format(i))
plt.clf()
# Data Preparation
print("Load data...")
x_train, y_train, x_val, y_val, vocabulary_inv = load_train_data()
#x, y, train_ind_list, test_ind_list, vocabulary_inv = load_train_data(True)
loss_list = []
accuracy_list = []
for i in range(5):
if sequence_length != x_val.shape[1]:
print("Adjusting sequence length for actual size")
sequence_length = x_val.shape[1]
print("x_train shape:", x_train.shape)
print("x_val shape:", x_val.shape)
print("Vocabulary Size: {:d}".format(len(vocabulary_inv)))
model = loadModel(x_train, x_val, vocabulary_inv)
# Train the model
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(min_delta = 0.01, mode = 'max', monitor='val_acc', patience = 2)
callback = [early_stopping]
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epochs,
validation_data=(x_val, y_val), verbose=1)
plotHistory(history, i)
score = model.evaluate(x_val, y_val, batch_size=64, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
loss_list.append(score[0])
accuracy_list.append(score[1])
clear_session()
print("Average validation loss: {}".format(sum(loss_list)/len(loss_list)))
print("Average validation accuracy: {}".format(sum(accuracy_list)/len(accuracy_list)))
print("Total script time: {}".format(format_time(time.time() - t0)))
# Create count of the number of epochs
# Visualize learning curve. Here learning curve is not ideal. It should be much smoother as it decreases.
#As mentioned before, altering different hyper parameters especially learning rate can have a positive impact
#on accuracy and learning curve.
#
# **If validation loss >> training loss you can call it overfitting.**
#
# If validation loss > training loss you can call it some overfitting.
#
# If validation loss < training loss you can call it some underfitting.
#
# If validation loss << training loss you can call it underfitting.
#
# Just right if training loss ~ validation loss
#
# -----------------------------------------
#
# ### Steps for reducing overfitting:
#
# 1. Add more data
# 2. Use data augmentation
# 3. Use architectures that generalize well
# 4. Add regularization (mostly dropout, L1/L2 regularization are also possible)
# 5. Reduce architecture complexity.
#
# print test accuracy
'''score = model.evaluate(x_val, y_val, batch_size=32, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])'''
#data_helpers.save_model(model, models_dir)
|
[
"guanzhi97@gmail.com"
] |
guanzhi97@gmail.com
|
0b03d074fa0c57ddbd4539401be24204d3559b54
|
322df586ef094872d4c4609097bcf3e52c361695
|
/test/operations/math/test_div.py
|
c8d4161e541ba3d40cd52468dfb862576e53b667
|
[
"MIT"
] |
permissive
|
xpenalosa/PyPc
|
04b19d05d8628d39f9bc8759e3875620cce89c06
|
fff3ae29b800d127d261492098aecbbf6719bd07
|
refs/heads/master
| 2020-12-09T06:22:11.306317
| 2020-02-16T21:11:30
| 2020-02-16T21:11:30
| 233,220,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
from pypc.memory import MemoryTypes
from pypc.operations import Operations
from test.operations.operation_test import OperationTestBase
class DivTest(OperationTestBase):
def test_div_basic(self):
memory_type = MemoryTypes.BASIC
initial_data = "\n".join([
"DIV 8, 2, 0"])
expected_data = list(
[4, 8, 2, 0,
Operations.END.value])
result_mem = self.run_pc(memory_type, self.parse_input(initial_data))
self.assertEqual(expected_data, result_mem.data)
self.assertEqual(result_mem.address, 4)
def test_div_basic_decimal(self):
memory_type = MemoryTypes.BASIC
initial_data = "\n".join([
"DIV 8, 3, 0"])
expected_data = list(
[2, 8, 3, 0,
Operations.END.value])
result_mem = self.run_pc(memory_type, self.parse_input(initial_data))
self.assertEqual(expected_data, result_mem.data)
self.assertEqual(result_mem.address, 4)
def test_div_mod_access_ll(self):
memory_type = MemoryTypes.MODIFIED_ACCESS
initial_data = "\n".join([
"DIV 8L, 2L, 0"])
expected_data = list(
[4, 8, 2, 0,
Operations.END.value])
result_mem = self.run_pc(memory_type, self.parse_input(initial_data))
self.assertEqual(expected_data, result_mem.data)
self.assertEqual(result_mem.address, 4)
def test_div_mod_access_lr(self):
memory_type = MemoryTypes.MODIFIED_ACCESS
initial_data = "\n".join([
"DIV 8L, 1, 0"])
expected_data = list(
[1, 8, 1, 0,
Operations.END.value])
result_mem = self.run_pc(memory_type, self.parse_input(initial_data))
self.assertEqual(expected_data, result_mem.data)
self.assertEqual(result_mem.address, 4)
def test_div_mod_access_rr(self):
memory_type = MemoryTypes.MODIFIED_ACCESS
initial_data = "\n".join([
"DIV 3, 2, 0"])
expected_data = list(
[0, 3, 2, 0,
Operations.END.value])
result_mem = self.run_pc(memory_type, self.parse_input(initial_data))
self.assertEqual(expected_data, result_mem.data)
self.assertEqual(result_mem.address, 4)
|
[
"xavier.penalosa.esteller@gmail.com"
] |
xavier.penalosa.esteller@gmail.com
|
cf8ae1bf18dadeb8aa363dc03bb7037db385880c
|
3747768c664256fe65108b4bc21e6baa7eb7a613
|
/ex5_2.py
|
134e8bdceb2cf74704904190393ea28a661a4674
|
[] |
no_license
|
Ellzud/Python_Hard
|
c4a41e606620b426c7e16410190598d30049e17c
|
32c1bb12f18bde9b1d7d7516cd4621ac9416ab8d
|
refs/heads/master
| 2020-03-12T15:53:32.439419
| 2018-04-25T13:54:39
| 2018-04-25T13:54:39
| 130,702,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# Learn python the hard way
# exercise 5
# more variables and printing
# Rafael Serrano 4/23/2018
name = 'Zed A. Shaw'
age = 35 # not a lie
height = 74 # inches
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inches tall." % height
print "he's %d pounds heavy." % weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
#this line is tricky, we will try to get it right
print "If I add %d, %d, and %d I get %d." % (age, height, weight, age + height + weight)
|
[
"serrano.rafael1@gmail.com"
] |
serrano.rafael1@gmail.com
|
a315cc93eb48378a0694c0131b8d1a8bd460e157
|
432b9b1ba469ef94ffd93065d4fde5d8c89f1a6e
|
/DM3/src/data.py
|
22298997aad2e1ff16e001bfe849a1acca56c01a
|
[] |
no_license
|
NelleV/SVM
|
0ea9931e2152d6200ef094325a9f1838eed99943
|
a46cfecb7f5d4361a93d36bdf85c2cc76c72838b
|
refs/heads/master
| 2020-06-05T07:31:34.034416
| 2012-03-06T19:36:40
| 2012-03-06T19:36:40
| 3,238,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
import csv
import numpy as np
def libras_movement():
"""
Fetches the Libras Movement dataset
Returns
-------
X, Y
"""
dataset = csv.reader(open('data/movement_libras.data', 'r'))
X = []
Y = []
for element in dataset:
X.append(element[:-1])
Y.append(element[-1])
return np.array(X).astype('float'), np.array(Y).astype('float')
|
[
"nelle.varoquaux@gmail.com"
] |
nelle.varoquaux@gmail.com
|
13cfea6db55dada74d7680481f21ba2ede13b5c4
|
616024faf0f95aafb76db2f6caa96c5aa2fccbf0
|
/session.py
|
a8ae892a5f0b1041e9f8ff87a3aff9387c6a857b
|
[] |
no_license
|
Shevane12/Chisel
|
154f0025664f23c0eaf52ebcb64aa3a7c2495115
|
d3a7e8a5926e5ed9ed6edf0fed6e166387352e8c
|
refs/heads/master
| 2022-12-30T05:56:08.033868
| 2020-10-23T23:27:48
| 2020-10-23T23:27:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,057
|
py
|
from requests import Session
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import title_is
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from random import choice
from time import sleep
from cloudscraper import CloudScraper
from http.cookiejar import CookiePolicy
from pymongo import MongoClient, DESCENDING
from tldextract import TLDExtract
from credentials import mongodb
from tempfile import TemporaryDirectory
from filelock import FileLock
from os.path import join
from urllib.parse import urlsplit
import re
from random import random
from requests.exceptions import ConnectionError, ReadTimeout
import pandas as pd
import requests
from datetime import datetime
from copy import deepcopy
import magic
class BlockCookies(CookiePolicy):
return_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False
netscape = True
rfc2965 = hide_cookie2 = False
class ChiselSession(Session):
def __init__(self):
super().__init__()
self.cookies.set_policy(BlockCookies())
self.options = Options()
self.options.headless = True
self.options.add_argument('window-size=1920,1080')
with Chrome(options=self.options) as browser:
user_agent = browser.execute_script('return navigator.userAgent').replace('Headless', '')
self.headers['user-agent'] = user_agent
self.options.add_argument('user-agent=' + user_agent)
self.database = MongoClient(**mongodb)['chisel']
self.database['tokens'].create_index(keys=(('domain', 1), ('ip', 1)), unique=True)
self.database['history'].create_index(keys='domain', unique=True)
self.database['proxies'].create_index(keys='proxy', unique=True)
self.database['proxies'].create_index(keys='works')
self.database['proxies'].create_index(keys='inserted')
self.locks = TemporaryDirectory()
self.IPv4 = requests.get('https://api.ipify.org/').text
self.extract = TLDExtract(cache_dir=self.locks.name)
def _domain(self, url):
extracted = self.extract(url)
return '.' + extracted.domain + '.' + extracted.suffix
def _ip(self, proxy):
if proxy:
return urlsplit(proxy).hostname
else:
return self.IPv4
def save_tokens(self, url, proxy, cookie1, cookie2):
if not cookie1 or not cookie2:
return
domain = self._domain(url)
ip = self._ip(proxy)
self.database['tokens'].update({
'domain': domain,
'ip': ip,
}, {
'domain': domain,
'ip': ip,
'token1': cookie1['value'],
'token2': cookie2['value'],
}, True)
def load_tokens(self, url, proxy):
document = self.database['tokens'].find_one({'domain': self._domain(url), 'ip': self._ip(proxy)})
if document is None:
return {}
return {'__cfduid': document['token1'], 'cf_clearance': document['token2']}
def save_history(self, url, blocked):
domain = urlsplit(url).netloc
if not self.database['history'].count({'domain': domain}):
self.database['history'].insert({
'domain': domain,
'visits': 0,
'bans': 0,
})
increments = {'visits': 1}
if blocked:
increments['bans'] = 1
self.database['history'].update({'domain': domain}, {'$inc': increments})
def load_history(self, url):
document = self.database['history'].find_one({'domain': urlsplit(url).netloc})
return document and random() < document['bans'] / (document['visits'] + 1)
def request(self, method, url, **kwargs):
assert urlsplit(url).scheme in ('http', 'https')
resp = None
retries = 0
cookies = kwargs.pop('cookies', {})
blocked = self.load_history(url)
proxy = None
tokens = {}
while retries < 5:
pass
if retries == 0 or tokens == self.load_tokens(url, proxy):
proxy = self.get_random_proxy(blocked)
tokens = self.load_tokens(url, proxy)
try:
resp = super().request(
method=method,
url=url,
cookies={**cookies, **tokens},
proxies={'http': proxy, 'https': proxy},
timeout=60,
**kwargs,
)
except (ConnectionError, ReadTimeout):
if proxy:
self.database['proxies'].update({'proxy': proxy}, {'$set': {'works': False}})
else:
retries += 1
print('Retrying "{}" after connection error ...'.format(url))
continue
if 'content-type' not in resp.headers:
resp.headers['content-type'] = magic.from_buffer(resp.content, True)
if 'content-length' not in resp.headers:
resp.headers['content-length'] = str(len(resp.content))
if resp.headers['content-type'].startswith('text/html') and re.search(r'<title>\s*BANNED\s*</title>', resp.text):
resp.status_code = 403
if not blocked:
blocked = resp.status_code in (429, 403)
self.save_history(url, blocked)
if resp.ok or resp.status_code == 404:
return resp
if CloudScraper.is_IUAM_Challenge(resp) or CloudScraper.is_New_IUAM_Challenge(resp):
with FileLock(join(self.locks.name, self._domain(url) + ' -- ' + self._ip(proxy))):
if tokens == self.load_tokens(url, proxy):
options = deepcopy(self.options)
if proxy:
options.add_argument('--proxy-server=' + proxy)
with Chrome(options=options) as browser:
with open('selenium.js', 'r') as fp:
browser.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {'source': fp.read()})
browser.get(url)
actions = ActionChains(browser)
for _ in range(30):
actions.send_keys(choice((Keys.DOWN, Keys.UP, Keys.LEFT, Keys.RIGHT))).perform()
try:
WebDriverWait(browser, 30).until_not(title_is('Just a moment...'))
except TimeoutException:
pass
self.save_tokens(url, proxy, browser.get_cookie('__cfduid'), browser.get_cookie('cf_clearance'))
print('Retrying "{}" after status code {} ...'.format(url, resp.status_code))
sleep(2 ** retries)
retries += 1
return resp
def get_random_proxy(self, enabled):
if not enabled:
return None
return self.database['proxies'].aggregate([
{'$match': {'works': True}},
{'$sample': {'size': 1}},
]).next()['proxy']
def store_proxy_series(self, series):
for item in series:
if not self.database['proxies'].count({'proxy': item}):
self.database['proxies'].insert({
'proxy': item,
'works': True,
'inserted': datetime.now(),
})
def worker(self):
while True:
pass
df = pd.read_html(requests.get('https://www.socks-proxy.net/').text)[0][:-1]
df['Port'] = df['Port'].astype(int).astype(str)
df['Version'] = df['Version'].str.lower()
self.store_proxy_series(df['Version'] + '://' + df['IP Address'] + ':' + df['Port'])
df = pd.read_html(requests.get('https://free-proxy-list.net/').text)[0][:-1]
df['Port'] = df['Port'].astype(int).astype(str)
df['Https'] = df['Https'].map({'yes': 'https', 'no': 'http'})
self.store_proxy_series(df['Https'] + '://' + df['IP Address'] + ':' + df['Port'])
for proxy in [doc['proxy'] for doc in self.database['proxies'].find().sort('inserted', DESCENDING)]:
try:
works = all(requests.head(
url=protocol + '://connectivitycheck.gstatic.com/generate_204',
proxies={protocol: proxy},
timeout=5,
).status_code == 204 for protocol in ('http', 'https'))
except (ConnectionError, ReadTimeout):
works = False
self.database['proxies'].update({'proxy': proxy}, {'$set': {'works': works}})
|
[
"deboer.wilco@gmail.com"
] |
deboer.wilco@gmail.com
|
a82beeb3c4f4b2b11632854d1f7251427ba6389b
|
9d5c9d9373002ab4ed1b493136517e8b4ab160e5
|
/saas/backend/apps/application/migrations/0009_auto_20200902_1134.py
|
1040ea6a30228a6fcf4b7f4921d2955e0bf91fbf
|
[
"MIT"
] |
permissive
|
robert871126/bk-iam-saas
|
f8299bb632fc853ef0131d445f84c6084fc84aba
|
33c8f4ffe8697081abcfc5771b98a88c0578059f
|
refs/heads/master
| 2023-08-23T19:23:01.987394
| 2021-10-22T09:45:28
| 2021-10-22T09:45:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 2.2.14 on 2020-09-02 03:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0008_auto_20200803_1157'),
]
operations = [
migrations.AlterField(
model_name='approvalnode',
name='approver',
field=models.TextField(help_text='多个以英文逗号分隔', verbose_name='审批人'),
),
]
|
[
"zhu327@gmail.com"
] |
zhu327@gmail.com
|
5fa380db591b15f03f9b47603efafac84ba50a64
|
994e60f19bf8cf45317cf48a92bee64ac798eb44
|
/code/loop.py
|
b487f1a007b8844e7844a0e86234323c812c3b58
|
[] |
no_license
|
ozntur/col_oil_prod
|
ad74e9182d594ca30cda213d8e7648cb8e8e0c4b
|
15247b2c937a7f5cf6ee64e520f74578fc07ca11
|
refs/heads/master
| 2022-11-07T05:52:02.642364
| 2020-07-02T18:43:27
| 2020-07-02T18:43:27
| 276,159,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
# USEFUL LOOP TO DOWNLOAD DATA
import geopandas as gpd
import pandas as pd
ds = gpd.read_file('data/WELLS_SHP/Wells.shp')
print('asd')
#print('done')
print(ds.API[ds.Facil_Stat == 'PR'])
#print('done')
d = {}
for x in (ds.API[ds.Facil_Stat =='PR']):
a = ds.API_County.loc[ds.API == x].item()
b = ds.API_Seq.loc[ds.API == x].item()
print('x = ' + x + ' a = ' + a + ' b =' + b)
website = 'https://cogcc.state.co.us/production/?&apiCounty='+a+'&apiSequence='+b+'&APIWB=00&Year=All'
print(website)
dfs = pd.read_html(website)
dfs[1]['First of Month'] = pd.to_datetime(dfs[1]['First of Month'])
data = {
'Time' : dfs[1]['First of Month'] ,
'Oilprod' : dfs[1]['Oil Produced']
}
d[x] = pd.DataFrame(data)
if b == '08294':
break
print('finished')
for m in list(d):
plt.plot(d[m]['Time'], d[m]['Oilprod'], label = m)
#plt.legend()
plt.xlabel('Date')
plt.ylabel('Oil Produced')
plt.show()
|
[
"ozan.turkes@gmail.com"
] |
ozan.turkes@gmail.com
|
92477151719bfd2fe4fb01b879caecf255490283
|
9fa57500b2f5201048da971d1e5e5cc4af056dc9
|
/tests/unit/models/reddit/test_inline_media.py
|
ec5a18607b1761d46a42e6768b73a9a6810a1df5
|
[
"BSD-2-Clause"
] |
permissive
|
LilSpazJoekp/asyncpraw
|
7fc95bf88f875010097d601e7f488748392fedf6
|
afddfb770b31a08f06a1b8ced6af803e499804e7
|
refs/heads/master
| 2022-04-01T09:42:42.035001
| 2022-03-04T21:32:09
| 2022-03-04T21:32:09
| 320,988,731
| 0
| 0
|
BSD-2-Clause
| 2020-12-13T05:19:57
| 2020-12-13T05:10:34
| null |
UTF-8
|
Python
| false
| false
| 2,307
|
py
|
from asyncpraw.models import InlineGif, InlineImage, InlineMedia, InlineVideo
from ... import UnitTest
class TestInlineMedia(UnitTest):
def test_equality(self):
media1 = InlineMedia(path="path1", caption="caption1")
media1.media_id = "media_id1"
media2 = InlineMedia(path="path1", caption="caption1")
media2.media_id = "media_id1"
media3 = InlineMedia(path="path2", caption="caption2")
media3.media_id = "media_id2"
assert media1 == media1
assert media2 == media2
assert media3 == media3
assert media1 == media2
assert media2 != media3
assert media1 != media3
def test_repr(self):
media = InlineMedia(path="path1", caption="caption1")
no_caption = InlineMedia(path="path1")
gif = InlineGif(path="gif_path1", caption="gif_caption1")
image = InlineImage(path="image_path1", caption="image_caption1")
video = InlineVideo(path="video_path1", caption="video_caption1")
assert repr(media) == "<InlineMedia caption='caption1'>"
assert repr(no_caption) == "<InlineMedia caption=None>"
assert repr(gif) == "<InlineGif caption='gif_caption1'>"
assert repr(image) == "<InlineImage caption='image_caption1'>"
assert repr(video) == "<InlineVideo caption='video_caption1'>"
def test_str(self):
media = InlineMedia(path="path1", caption="caption1")
no_caption = InlineMedia(path="path1")
gif = InlineGif(path="gif_path1", caption="gif_caption1")
image = InlineImage(path="image_path1", caption="image_caption1")
video = InlineVideo(path="video_path1", caption="video_caption1")
media.media_id = "media_media_id"
no_caption.media_id = "media_media_id_no_caption"
gif.media_id = "gif_media_id"
image.media_id = "image_media_id"
video.media_id = "video_media_id"
assert str(media) == '\n\n\n\n'
assert str(no_caption) == '\n\n\n\n'
assert str(gif) == '\n\n\n\n'
assert str(image) == '\n\n\n\n'
assert str(video) == '\n\n\n\n'
|
[
"15524072+LilSpazJoekp@users.noreply.github.com"
] |
15524072+LilSpazJoekp@users.noreply.github.com
|
92322e9b068c373a41915258ab7f5d03e802e076
|
187dfad282778ba66c9fbc410e53a25410b8ad57
|
/hellosign_python_sdk/tests/functional_tests/test_team.py
|
cb8dc2198a36e323e5900c81bbf5c88276884f63
|
[] |
no_license
|
binti-family/hellosign-python-sdk
|
eaff6f0bd1d1905cc5a0e64d6cd7e9b70adad8b3
|
5acf8c74c4ae85e6a7be9e850d8d1f2a55c11595
|
refs/heads/master
| 2021-01-21T02:41:13.123103
| 2014-03-26T08:00:22
| 2014-03-26T08:00:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
from unittest import TestCase
from hellosign_python_sdk.tests.test_helper import api_key
from hellosign_python_sdk.hsclient import HSClient
from hellosign_python_sdk.resource.team import Team
from hellosign_python_sdk.utils.exception import NotFound, HSException, InvalidEmail, Forbidden
class TestTeam(TestCase):
def setUp(self):
self.client = HSClient(api_key=api_key)
def test_add_team_member_with_invalid_info(self):
try:
self.client.add_team_member(email_address="in valid email")
except InvalidEmail:
pass
try:
self.client.add_team_member(account_id="in valid account_id")
except NotFound:
pass
def test_team_functions(self):
try:
# You already in a team
# save your old team name -> update new team name -> add member ->
# remove member -> restore your old team name
team = self.client.get_team_info()
old_team_name = team.name
team = self.client.update_team_name("New team name")
self.assertEquals(isinstance(team, Team), True)
try:
team = self.client.add_team_member(
email_address="not_existed_user@example.com")
self.assertEquals(isinstance(team, Team), True)
except Forbidden:
pass
try:
self.client.add_team_member()
except HSException:
pass
team = self.client.remove_team_member(
email_address="not_existed_user@example.com")
self.assertEquals(isinstance(team, Team), True)
team = self.client.update_team_name(old_team_name)
self.assertEquals(isinstance(team, Team), True)
except NotFound:
# You do not belong to any teams
# create team -> add member, remove member, destroy team
team = self.client.create_team("New team")
self.assertEquals(team.name, "New team")
team = self.client.add_team_member(
email_address="not_existed_user@example.com")
self.assertEquals(isinstance(team, Team), True)
team = self.client.remove_team_member(
email_address="not_existed_user@example.com")
self.assertEquals(isinstance(team, Team), True)
result = self.client.destroy_team()
self.assertEquals(result, True)
|
[
"minhdanh@tgm.vn"
] |
minhdanh@tgm.vn
|
62eaf9858c418fef633ac4d4dff91466518cb03b
|
c47e4c82a68563dbb5828dae8e9b1a3598297b7c
|
/NajaParser.py
|
8f71d0beb5236a2d8f756c33fae40069a7b2d5b8
|
[] |
no_license
|
MarceloCFSF/Naja
|
b0f28afc1a1feae7339d916a2b11189e6be0290a
|
edc38d5bd02afe840ea2ad006491e0d950191818
|
refs/heads/master
| 2023-07-11T15:06:06.850798
| 2021-08-14T05:17:09
| 2021-08-14T05:17:09
| 395,882,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,637
|
py
|
# Generated from Naja.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\31")
buf.write("\u008d\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\3\2\3\2\3\2\3")
buf.write("\3\3\3\3\3\3\3\7\3+\n\3\f\3\16\3.\13\3\3\4\3\4\3\5\6\5")
buf.write("\63\n\5\r\5\16\5\64\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6>\n")
buf.write("\6\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\n")
buf.write("\3\n\3\n\3\n\3\13\3\13\3\13\3\13\6\13T\n\13\r\13\16\13")
buf.write("U\3\13\3\13\5\13Z\n\13\3\f\3\f\3\f\6\f_\n\f\r\f\16\f`")
buf.write("\3\f\3\f\3\r\3\r\3\r\3\r\6\ri\n\r\r\r\16\rj\3\r\3\r\3")
buf.write("\16\3\16\3\16\6\16r\n\16\r\16\16\16s\3\16\3\16\3\16\3")
buf.write("\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20")
buf.write("\7\20\u0084\n\20\f\20\16\20\u0087\13\20\5\20\u0089\n\20")
buf.write("\3\21\3\21\3\21\2\2\22\2\4\6\b\n\f\16\20\22\24\26\30\32")
buf.write("\34\36 \2\5\3\2\5\6\3\2\26\27\4\2\26\26\30\30\2\u008b")
buf.write("\2\"\3\2\2\2\4&\3\2\2\2\6/\3\2\2\2\b\62\3\2\2\2\n=\3\2")
buf.write("\2\2\f?\3\2\2\2\16D\3\2\2\2\20I\3\2\2\2\22K\3\2\2\2\24")
buf.write("O\3\2\2\2\26[\3\2\2\2\30d\3\2\2\2\32n\3\2\2\2\34y\3\2")
buf.write("\2\2\36\u0088\3\2\2\2 \u008a\3\2\2\2\"#\7\3\2\2#$\5\b")
buf.write("\5\2$%\7\4\2\2%\3\3\2\2\2&\'\5\6\4\2\',\7\26\2\2()\7\22")
buf.write("\2\2)+\7\26\2\2*(\3\2\2\2+.\3\2\2\2,*\3\2\2\2,-\3\2\2")
buf.write("\2-\5\3\2\2\2.,\3\2\2\2/\60\t\2\2\2\60\7\3\2\2\2\61\63")
buf.write("\5\n\6\2\62\61\3\2\2\2\63\64\3\2\2\2\64\62\3\2\2\2\64")
buf.write("\65\3\2\2\2\65\t\3\2\2\2\66>\5\f\7\2\67>\5\4\3\28>\5\16")
buf.write("\b\29>\5\22\n\2:>\5\24\13\2;>\5\30\r\2<>\5\32\16\2=\66")
buf.write("\3\2\2\2=\67\3\2\2\2=8\3\2\2\2=9\3\2\2\2=:\3\2\2\2=;\3")
buf.write("\2\2\2=<\3\2\2\2>\13\3\2\2\2?@\7\7\2\2@A\7\r\2\2AB\7\26")
buf.write("\2\2BC\7\16\2\2C\r\3\2\2\2DE\7\b\2\2EF\7\r\2\2FG\5\20")
buf.write("\t\2GH\7\16\2\2H\17\3\2\2\2IJ\t\3\2\2J\21\3\2\2\2KL\7")
buf.write("\26\2\2LM\7\21\2\2MN\5\36\20\2N\23\3\2\2\2OP\7\t\2\2P")
buf.write("Q\5\34\17\2QS\7\23\2\2RT\5\n\6\2SR\3\2\2\2TU\3\2\2\2U")
buf.write("S\3\2\2\2UV\3\2\2\2VW\3\2\2\2WY\7\24\2\2XZ\5\26\f\2YX")
buf.write("\3\2\2\2YZ\3\2\2\2Z\25\3\2\2\2[\\\7\n\2\2\\^\7\23\2\2")
buf.write("]_\5\n\6\2^]\3\2\2\2_`\3\2\2\2`^\3\2\2\2`a\3\2\2\2ab\3")
buf.write("\2\2\2bc\7\24\2\2c\27\3\2\2\2de\7\13\2\2ef\5\34\17\2f")
buf.write("h\7\23\2\2gi\5\n\6\2hg\3\2\2\2ij\3\2\2\2jh\3\2\2\2jk\3")
buf.write("\2\2\2kl\3\2\2\2lm\7\24\2\2m\31\3\2\2\2no\7\f\2\2oq\7")
buf.write("\23\2\2pr\5\n\6\2qp\3\2\2\2rs\3\2\2\2sq\3\2\2\2st\3\2")
buf.write("\2\2tu\3\2\2\2uv\7\24\2\2vw\7\13\2\2wx\5\34\17\2x\33\3")
buf.write("\2\2\2yz\7\r\2\2z{\t\4\2\2{|\7\25\2\2|}\t\4\2\2}~\7\16")
buf.write("\2\2~\35\3\2\2\2\177\u0089\7\27\2\2\u0080\u0085\5 \21")
buf.write("\2\u0081\u0082\7\20\2\2\u0082\u0084\5 \21\2\u0083\u0081")
buf.write("\3\2\2\2\u0084\u0087\3\2\2\2\u0085\u0083\3\2\2\2\u0085")
buf.write("\u0086\3\2\2\2\u0086\u0089\3\2\2\2\u0087\u0085\3\2\2\2")
buf.write("\u0088\177\3\2\2\2\u0088\u0080\3\2\2\2\u0089\37\3\2\2")
buf.write("\2\u008a\u008b\t\4\2\2\u008b!\3\2\2\2\f,\64=UY`js\u0085")
buf.write("\u0088")
return buf.getvalue()
class NajaParser ( Parser ):
grammarFileName = "Naja.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'head'", "'tail'", "'numero'", "'texto'",
"'leia'", "'escreva'", "'se'", "'senao'", "'enquanto'",
"'execute'", "'('", "')'", "';'", "<INVALID>", "':'",
"','", "'{'", "'}'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "AP", "FP",
"SC", "OP", "ATTR", "VIR", "ACH", "FCH", "OPREL",
"ID", "TEXTO", "NUMBER", "WS" ]
RULE_prog = 0
RULE_declaravar = 1
RULE_tipo = 2
RULE_bloco = 3
RULE_cmd = 4
RULE_cmdleitura = 5
RULE_cmdescrita = 6
RULE_escrita = 7
RULE_cmdattrib = 8
RULE_cmdselecao = 9
RULE_cmdelse = 10
RULE_cmdenquanto = 11
RULE_cmdexecute = 12
RULE_cmdcondicao = 13
RULE_expr = 14
RULE_termo = 15
ruleNames = [ "prog", "declaravar", "tipo", "bloco", "cmd", "cmdleitura",
"cmdescrita", "escrita", "cmdattrib", "cmdselecao", "cmdelse",
"cmdenquanto", "cmdexecute", "cmdcondicao", "expr", "termo" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
AP=11
FP=12
SC=13
OP=14
ATTR=15
VIR=16
ACH=17
FCH=18
OPREL=19
ID=20
TEXTO=21
NUMBER=22
WS=23
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def bloco(self):
return self.getTypedRuleContext(NajaParser.BlocoContext,0)
def getRuleIndex(self):
return NajaParser.RULE_prog
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProg" ):
listener.enterProg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProg" ):
listener.exitProg(self)
def prog(self):
localctx = NajaParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_prog)
try:
self.enterOuterAlt(localctx, 1)
self.state = 32
self.match(NajaParser.T__0)
self.state = 33
self.bloco()
self.state = 34
self.match(NajaParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclaravarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def tipo(self):
return self.getTypedRuleContext(NajaParser.TipoContext,0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.ID)
else:
return self.getToken(NajaParser.ID, i)
def VIR(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.VIR)
else:
return self.getToken(NajaParser.VIR, i)
def getRuleIndex(self):
return NajaParser.RULE_declaravar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclaravar" ):
listener.enterDeclaravar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclaravar" ):
listener.exitDeclaravar(self)
def declaravar(self):
localctx = NajaParser.DeclaravarContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_declaravar)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 36
self.tipo()
self.state = 37
self.match(NajaParser.ID)
self.state = 42
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==NajaParser.VIR:
self.state = 38
self.match(NajaParser.VIR)
self.state = 39
self.match(NajaParser.ID)
self.state = 44
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TipoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return NajaParser.RULE_tipo
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTipo" ):
listener.enterTipo(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTipo" ):
listener.exitTipo(self)
def tipo(self):
localctx = NajaParser.TipoContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_tipo)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 45
_la = self._input.LA(1)
if not(_la==NajaParser.T__2 or _la==NajaParser.T__3):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlocoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_bloco
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBloco" ):
listener.enterBloco(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBloco" ):
listener.exitBloco(self)
def bloco(self):
localctx = NajaParser.BlocoContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_bloco)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 48
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 47
self.cmd()
self.state = 50
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmdleitura(self):
return self.getTypedRuleContext(NajaParser.CmdleituraContext,0)
def declaravar(self):
return self.getTypedRuleContext(NajaParser.DeclaravarContext,0)
def cmdescrita(self):
return self.getTypedRuleContext(NajaParser.CmdescritaContext,0)
def cmdattrib(self):
return self.getTypedRuleContext(NajaParser.CmdattribContext,0)
def cmdselecao(self):
return self.getTypedRuleContext(NajaParser.CmdselecaoContext,0)
def cmdenquanto(self):
return self.getTypedRuleContext(NajaParser.CmdenquantoContext,0)
def cmdexecute(self):
return self.getTypedRuleContext(NajaParser.CmdexecuteContext,0)
def getRuleIndex(self):
return NajaParser.RULE_cmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmd" ):
listener.enterCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmd" ):
listener.exitCmd(self)
def cmd(self):
localctx = NajaParser.CmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_cmd)
try:
self.state = 59
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [NajaParser.T__4]:
self.enterOuterAlt(localctx, 1)
self.state = 52
self.cmdleitura()
pass
elif token in [NajaParser.T__2, NajaParser.T__3]:
self.enterOuterAlt(localctx, 2)
self.state = 53
self.declaravar()
pass
elif token in [NajaParser.T__5]:
self.enterOuterAlt(localctx, 3)
self.state = 54
self.cmdescrita()
pass
elif token in [NajaParser.ID]:
self.enterOuterAlt(localctx, 4)
self.state = 55
self.cmdattrib()
pass
elif token in [NajaParser.T__6]:
self.enterOuterAlt(localctx, 5)
self.state = 56
self.cmdselecao()
pass
elif token in [NajaParser.T__8]:
self.enterOuterAlt(localctx, 6)
self.state = 57
self.cmdenquanto()
pass
elif token in [NajaParser.T__9]:
self.enterOuterAlt(localctx, 7)
self.state = 58
self.cmdexecute()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdleituraContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AP(self):
return self.getToken(NajaParser.AP, 0)
def ID(self):
return self.getToken(NajaParser.ID, 0)
def FP(self):
return self.getToken(NajaParser.FP, 0)
def getRuleIndex(self):
return NajaParser.RULE_cmdleitura
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdleitura" ):
listener.enterCmdleitura(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdleitura" ):
listener.exitCmdleitura(self)
def cmdleitura(self):
localctx = NajaParser.CmdleituraContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_cmdleitura)
try:
self.enterOuterAlt(localctx, 1)
self.state = 61
self.match(NajaParser.T__4)
self.state = 62
self.match(NajaParser.AP)
self.state = 63
self.match(NajaParser.ID)
self.state = 64
self.match(NajaParser.FP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdescritaContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AP(self):
return self.getToken(NajaParser.AP, 0)
def escrita(self):
return self.getTypedRuleContext(NajaParser.EscritaContext,0)
def FP(self):
return self.getToken(NajaParser.FP, 0)
def getRuleIndex(self):
return NajaParser.RULE_cmdescrita
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdescrita" ):
listener.enterCmdescrita(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdescrita" ):
listener.exitCmdescrita(self)
def cmdescrita(self):
localctx = NajaParser.CmdescritaContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_cmdescrita)
try:
self.enterOuterAlt(localctx, 1)
self.state = 66
self.match(NajaParser.T__5)
self.state = 67
self.match(NajaParser.AP)
self.state = 68
self.escrita()
self.state = 69
self.match(NajaParser.FP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EscritaContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(NajaParser.ID, 0)
def TEXTO(self):
return self.getToken(NajaParser.TEXTO, 0)
def getRuleIndex(self):
return NajaParser.RULE_escrita
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEscrita" ):
listener.enterEscrita(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEscrita" ):
listener.exitEscrita(self)
def escrita(self):
localctx = NajaParser.EscritaContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_escrita)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 71
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.TEXTO):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdattribContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(NajaParser.ID, 0)
def ATTR(self):
return self.getToken(NajaParser.ATTR, 0)
def expr(self):
return self.getTypedRuleContext(NajaParser.ExprContext,0)
def getRuleIndex(self):
return NajaParser.RULE_cmdattrib
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdattrib" ):
listener.enterCmdattrib(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdattrib" ):
listener.exitCmdattrib(self)
def cmdattrib(self):
localctx = NajaParser.CmdattribContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_cmdattrib)
try:
self.enterOuterAlt(localctx, 1)
self.state = 73
self.match(NajaParser.ID)
self.state = 74
self.match(NajaParser.ATTR)
self.state = 75
self.expr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdselecaoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmdcondicao(self):
return self.getTypedRuleContext(NajaParser.CmdcondicaoContext,0)
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def cmdelse(self):
return self.getTypedRuleContext(NajaParser.CmdelseContext,0)
def getRuleIndex(self):
return NajaParser.RULE_cmdselecao
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdselecao" ):
listener.enterCmdselecao(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdselecao" ):
listener.exitCmdselecao(self)
def cmdselecao(self):
localctx = NajaParser.CmdselecaoContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_cmdselecao)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 77
self.match(NajaParser.T__6)
self.state = 78
self.cmdcondicao()
self.state = 79
self.match(NajaParser.ACH)
self.state = 81
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 80
self.cmd()
self.state = 83
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 85
self.match(NajaParser.FCH)
self.state = 87
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==NajaParser.T__7:
self.state = 86
self.cmdelse()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdelseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_cmdelse
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdelse" ):
listener.enterCmdelse(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdelse" ):
listener.exitCmdelse(self)
def cmdelse(self):
localctx = NajaParser.CmdelseContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_cmdelse)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 89
self.match(NajaParser.T__7)
self.state = 90
self.match(NajaParser.ACH)
self.state = 92
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 91
self.cmd()
self.state = 94
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 96
self.match(NajaParser.FCH)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdenquantoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmdcondicao(self):
return self.getTypedRuleContext(NajaParser.CmdcondicaoContext,0)
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_cmdenquanto
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdenquanto" ):
listener.enterCmdenquanto(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdenquanto" ):
listener.exitCmdenquanto(self)
def cmdenquanto(self):
localctx = NajaParser.CmdenquantoContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_cmdenquanto)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 98
self.match(NajaParser.T__8)
self.state = 99
self.cmdcondicao()
self.state = 100
self.match(NajaParser.ACH)
self.state = 102
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 101
self.cmd()
self.state = 104
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 106
self.match(NajaParser.FCH)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdexecuteContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmdcondicao(self):
return self.getTypedRuleContext(NajaParser.CmdcondicaoContext,0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_cmdexecute
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdexecute" ):
listener.enterCmdexecute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdexecute" ):
listener.exitCmdexecute(self)
def cmdexecute(self):
localctx = NajaParser.CmdexecuteContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_cmdexecute)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.match(NajaParser.T__9)
self.state = 109
self.match(NajaParser.ACH)
self.state = 111
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 110
self.cmd()
self.state = 113
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 115
self.match(NajaParser.FCH)
self.state = 116
self.match(NajaParser.T__8)
self.state = 117
self.cmdcondicao()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdcondicaoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AP(self):
return self.getToken(NajaParser.AP, 0)
def OPREL(self):
return self.getToken(NajaParser.OPREL, 0)
def FP(self):
return self.getToken(NajaParser.FP, 0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.ID)
else:
return self.getToken(NajaParser.ID, i)
def NUMBER(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.NUMBER)
else:
return self.getToken(NajaParser.NUMBER, i)
def getRuleIndex(self):
return NajaParser.RULE_cmdcondicao
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdcondicao" ):
listener.enterCmdcondicao(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdcondicao" ):
listener.exitCmdcondicao(self)
def cmdcondicao(self):
localctx = NajaParser.CmdcondicaoContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_cmdcondicao)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 119
self.match(NajaParser.AP)
self.state = 120
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.NUMBER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 121
self.match(NajaParser.OPREL)
self.state = 122
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.NUMBER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 123
self.match(NajaParser.FP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXTO(self):
return self.getToken(NajaParser.TEXTO, 0)
def termo(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.TermoContext)
else:
return self.getTypedRuleContext(NajaParser.TermoContext,i)
def OP(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.OP)
else:
return self.getToken(NajaParser.OP, i)
def getRuleIndex(self):
return NajaParser.RULE_expr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr" ):
listener.enterExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr" ):
listener.exitExpr(self)
def expr(self):
localctx = NajaParser.ExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_expr)
self._la = 0 # Token type
try:
self.state = 134
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [NajaParser.TEXTO]:
self.enterOuterAlt(localctx, 1)
self.state = 125
self.match(NajaParser.TEXTO)
pass
elif token in [NajaParser.ID, NajaParser.NUMBER]:
self.enterOuterAlt(localctx, 2)
self.state = 126
self.termo()
self.state = 131
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==NajaParser.OP:
self.state = 127
self.match(NajaParser.OP)
self.state = 128
self.termo()
self.state = 133
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(NajaParser.ID, 0)
def NUMBER(self):
return self.getToken(NajaParser.NUMBER, 0)
def getRuleIndex(self):
return NajaParser.RULE_termo
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTermo" ):
listener.enterTermo(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTermo" ):
listener.exitTermo(self)
def termo(self):
localctx = NajaParser.TermoContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_termo)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 136
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.NUMBER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
|
[
"marcelo.cfsf@gmail.com"
] |
marcelo.cfsf@gmail.com
|
63c68801afba7e64361b69647cb7a4ce297ee086
|
60c71a35efed4321ace93042f822896cd3eb5588
|
/app/spider/reply/generate_reply_spider.py
|
85fa1b27a79ea64b8b6bf88904236d8b03d0e1fb
|
[
"Apache-2.0"
] |
permissive
|
zkguchun/ASoulCnki
|
b3e21ba3f7c1a5ff4840c73346469418a3d6ec52
|
e66b8427dac68ed0fd0347447c93f161a04ff785
|
refs/heads/master
| 2023-06-30T07:20:03.496162
| 2021-07-25T02:08:06
| 2021-07-25T02:08:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
from app.config import sqla
import datetime
import app.models as models
import tasks
def send_low_priority_reply_spider_task():
session = sqla['session']
three_day_ago = int((datetime.datetime.now() - datetime.timedelta(days=3)).timestamp())
low_priority_reply_task = session.query(models.UserDynamic).filter(models.UserDynamic.ctime < three_day_ago).all()
for dynamic in low_priority_reply_task:
param_tuple = (dynamic.type_id, dynamic.oid, dynamic.status, dynamic.dynamic_id)
tasks.get_reply_data_task.apply_async(param_tuple, queue="reply_task_low_priority", routing_key='reply_low')
def send_high_priority_reply_spider_task():
session = sqla['session']
three_day_ago = int((datetime.datetime.now() - datetime.timedelta(days=3)).timestamp())
high_priority_reply_task = session.query(models.UserDynamic).filter(models.UserDynamic.ctime >= three_day_ago).all()
for dynamic in high_priority_reply_task:
param_tuple = (dynamic.type_id, dynamic.oid, dynamic.status, dynamic.dynamic_id)
tasks.get_reply_data_task.apply_async(param_tuple, queue="reply_task_high_priority", routing_key='reply_high')
if __name__ == '__main__':
send_low_priority_reply_spider_task()
|
[
"stream2000@139.com"
] |
stream2000@139.com
|
0c7002f95919d1deecaa962259fac4be46263738
|
5dcfef2058f8daae0a6b3436cde9486562943c8c
|
/2016/Computer Vision Labs/Lab 1/hellocv.py
|
eb569449a606d236369f864fdd6c99c1044b97d5
|
[] |
no_license
|
RhysAgombar/CV-Misc-Files
|
84f8c258edb981885192ab0d1b091c7564244328
|
8cc98c04a8c0b0fd6a5951f86db132fd4fc58bb0
|
refs/heads/master
| 2021-01-20T02:29:21.211280
| 2017-04-25T23:15:13
| 2017-04-25T23:15:13
| 89,414,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from cv2.cv import *
img = LoadImage("C:/Users/100515147/Desktop/Computer Vision Labs/Lab 1/test.jpg")
NamedWindow("opencv")
ShowImage("opencv",img)
WaitKey(0)
|
[
"Rhys.Agombar@uoit.net"
] |
Rhys.Agombar@uoit.net
|
2bc6bf87b8a17707f937427fda6c823672244a56
|
14303ddae70645ed8beb32c869fa00a9bd7870a1
|
/orbit_return.py
|
eb87165ca0c0b88340cbbebe04499046bba9aeb8
|
[] |
no_license
|
bpmagallon/Diwata1_Orbit
|
65e292906122005afe58b041376f71147f185d97
|
d2463233317ab5af126a266920da5c247fd72b78
|
refs/heads/master
| 2021-01-10T14:21:22.583284
| 2016-03-13T20:34:43
| 2016-03-13T20:34:43
| 53,806,032
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,740
|
py
|
import numpy as np
import math
import dateAdd
import urllib
import geoc_geod
import getXY
import arcpy
import getNearest
import ephem
import datetime
import geod_to_geoc
import isAM
import groundtrack
import get_vector
import spherical
arcpy.env.overwriteOutput = True
celestrak = urllib.urlopen("http://www.celestrak.com/NORAD/elements/stations.txt")
tle_data = celestrak.readlines()
celestrak.close()
iss_tle1 = tle_data[1]
iss_tle2 = tle_data[2]
now = datetime.datetime.utcnow() + datetime.timedelta(11,2)
now1 = datetime.datetime.utcnow() + datetime.timedelta(11,1)
iss = ephem.readtle('GOSAT', iss_tle1, iss_tle2)
iss.compute(now)
lat=math.degrees(iss.sublat)
lon=math.degrees(iss.sublong)
h=math.degrees(iss.elevation)
pos = [lat,lon,0]
position = geod_to_geoc.geod_geoc(pos)
iss.compute(now1)
lat1=math.degrees(iss.sublat)
lon1=math.degrees(iss.sublong)
h1=(iss.elevation)
print h1
pos1 = [lat1,lon1,0]
position1 = geod_to_geoc.geod_geoc(pos)
#check if sat pos is within the effective radius
eff_rad = 41
#nadir = 32.5 ; 5deg = 1759.6248751; 15deg from horizon = 1105.2976329; 45deg from horizon = 377.3453284 60deg = 230.9401 ;75deg = 107.1797;
smi_pt = getXY.getCoords(r'C:\Users\Satellite L40D\Documents\philmicrosat_study\path_simulation\gis_files\country_reduceph4.shp')
input_pts = r'C:\Users\Satellite L40D\Documents\philmicrosat_study\path_simulation\gis_files\country_reduceph4.shp'
track = []
date_pass = []
near = []
duration = 0
ground_track = []
ground_track_pm = []
date_pass_pm = []
track_pm = []
sr = arcpy.SpatialReference(4326)
count = 0
factor = 0
while duration<=1209600:
near = getNearest.closest(position, smi_pt)
print now
time = isAM.isMorning(str(now))
if (near[1]<=eff_rad) and (time==True):
while (near[1]<=eff_rad) and (time==True):
print "time enter:"+str(now)
print "pass"
param = spherical.getAngles(pos1,pos,fov)
geod_near = geoc_geod.geoc_to_geod(near[0])
ground_track_1 = [geod_near[1]+param[0][1],geod_near[0]+param[0][0]]
ground_track_2 = [geod_near[1]+param[1][1],geod_near[0]+param[1][0]]
ground_track_3 = [geod_near[1]+param[2][1],geod_near[0]+param[2][0]]
ground_track_4 = [geod_near[1]+param[3][1],geod_near[0]+param[3][0]]
arcpy.CreateFeatureclass_management(r"C:\\Users\\Public\\Documents\\tracks", "groundtrack_"+str(count)+".shp", "Polygon", "", "", "", sr)
arcpy.AddField_management("C:\\Users\\Public\\Documents\\tracks\\groundtrack_"+str(count)+".shp", 'TIME', 'TEXT')
polygonFC = r"C:\\Users\\Public\\Documents\\tracks\\groundtrack_"+str(count)+".shp"
c = arcpy.InsertCursor(polygonFC)
id = count + 1
counter=0
array = arcpy.Array([arcpy.Point(ground_track_1[0],ground_track_1[1]),arcpy.Point(ground_track_2[0],ground_track_2[1]),
arcpy.Point(ground_track_3[0],ground_track_3[1]),arcpy.Point(ground_track_4[0],ground_track_4[1])])
polygon = arcpy.Polygon(array)
row = c.newRow()
row.id = id
row.Shape = polygon
lati = [math.degrees(iss.sublat),math.degrees(iss.sublong),iss.elevation]
track.append(lati)
ph_time = now + datetime.timedelta(0,28800)
date_pass.append(str(ph_time.date())+"-"+str(ph_time.time()))
row.TIME = (str(ph_time.date())+"-"+str(ph_time.time()))
c.insertRow(row)
id+=1
del c, polygon
input_pts = input_pts
smi_pt = getXY.getCoords(input_pts)
count = count + 1
if duration==0:
now = now + datetime.timedelta(0,2)
else:
now = now + datetime.timedelta(0,1)
duration = duration + 1
iss.compute(now)
lat=math.degrees(iss.sublat)
lon=math.degrees(iss.sublong)
pos1 = pos
pos = [lat,lon,0]
position1 = position
position = geod_to_geoc.geod_geoc(pos)
near = getNearest.closest(position, smi_pt)
time = isAM.isMorning(str(now))
else:
if duration==0:
now = now + datetime.timedelta(0,2)
factor=2
else:
if near[1]<=100:
now = now + datetime.timedelta(0,1)
factor=1
else:
now = now + datetime.timedelta(0,30)
factor =30
duration = duration + factor
iss.compute(now)
lat=math.degrees(iss.sublat)
lon=math.degrees(iss.sublong)
pos1 = pos
pos = [lat,lon,0]
position1 = position
position = geod_to_geoc.geod_geoc(pos)
time = isAM.isMorning(str(now))
factor = 0
sr = arcpy.SpatialReference(4326)
arcpy.CreateFeatureclass_management(r"C:\\Users\\Public\\Documents\\tracks", "track.shp", "Point", "", "", "", sr)
arcpy.AddField_management("C:\\Users\\Public\\Documents\\tracks\\track.shp", 'LAT', 'double', '12', '3',)
arcpy.AddField_management("C:\\Users\\Public\\Documents\\tracks\\track.shp", 'LON', 'double', '12', '3',)
arcpy.AddField_management("C:\\Users\\Public\\Documents\\tracks\\track.shp", 'TIME', 'TEXT')
cur = arcpy.InsertCursor("C:\\Users\\Public\\Documents\\tracks\\track.shp")
pt = arcpy.Point()
id = 1
count=0
for i in track:
pt.X = float(i[1])
pt.Y = float(i[0])
row = cur.newRow()
row.Shape = pt
row.id = id
row.LAT = pt.Y
row.LON = pt.X
row.TIME = str(date_pass[count])
cur.insertRow(row)
id+=1
count+=1
del cur, pt
|
[
"bpmagallon@gmail.com"
] |
bpmagallon@gmail.com
|
22587d55f9f9fab4188c819fa38b8dfd4eef0c1d
|
d7a88bb64c1b343ebbacf140ba07495c23e1cc3d
|
/NegStars.py
|
fa287ce660e4fde3769cb20b5fcf2e39ea6e6b2c
|
[] |
no_license
|
RubyEye7/Week-1
|
b485c103b296bec5e0fa6b7f613de3ae22582cc4
|
ff499338b8e630caeb513e41f58c772227d9f236
|
refs/heads/master
| 2020-03-23T00:54:35.559294
| 2018-07-13T20:54:21
| 2018-07-13T20:54:21
| 140,891,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
a = int(input("How many lines do you want: "))
count = a
while(count > 0):
count -= 1
print("**" * count)
|
[
"40636561+Andrew800@users.noreply.github.com"
] |
40636561+Andrew800@users.noreply.github.com
|
d19988ad33589d48cc57918d518294a2fd6150d7
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/exp-big-490.py
|
878f7cc3ed471a6dd801ce1f464e12d880342a22
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,180
|
py
|
# Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = $INT
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
b11803d6a0a1957d777cda1dc1d36376308e22ff
|
00dfa1c55b2c3e6ad98e2e1ceebcdabb65bb3712
|
/Recursion/tower_of_hanoi.py
|
f2fe72980f98b55d2fb878c8479f8cd2434956ca
|
[] |
no_license
|
taddes/algos_python
|
889f580c296ced1b74fdebbb38ba16df1249306a
|
b72417e53fe9b45ed12270716fe13cbed6a178d1
|
refs/heads/master
| 2023-02-12T00:43:36.414418
| 2021-01-02T23:15:33
| 2021-01-02T23:15:33
| 284,581,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
"""
Three rods and a number of disks of different sizes which can slide on any rod.
Puzzle begins with the disks in a neat stack in ascending order of size on the leftmost
rod. The smallest disk is at the top, making a conical shape.
Rules:
Only one disk can be moved at a time
Each move consists of taking the upper disk from one stack and placing it on another.
No disk may be placed on top of a smaller disk.
Minimum No. of moves required to solved is 2^n - 1 O(2^n) - Exponential time complexity
Recurring sub-problem: at some point, one will have managed to shift n - 1 plates (for 3 plates, this is 2)
to the middle auxiliary rod, at which point the largest disk has to be moved to the final
rod. The largest disk is the only one left, so it can be placed on the final rod.
"""
def hanoi(disk, source='A', middle='B', destination='C'):
# base case - 0 is always the smallest plate
# We manipulat the smallest plate in the base case (index 0)
if disk == 1:
# Move disk from source to dest
print(f'Disk {disk} moved from {source} to {destination}')
return
hanoi(disk-1, source, destination, middle)
print(f'Disk {disk} moved from {source} to {destination}')
hanoi(disk-1, middle, source, destination)
hanoi(3, 'a', 'b', 'c')
|
[
"taddes.korris@gmail.com"
] |
taddes.korris@gmail.com
|
62c33222a67be9a4d9938a2ef8513d7ecd54029e
|
544356d5eacf67a27b5be7dfb276a919a232f547
|
/post_test.py
|
ac6510a661435ee9210a0fa9382cf6dc6454a849
|
[] |
no_license
|
drellxor/wolf_markov
|
8967d660bd248d674b47ae65fa1bc9d6a9fa8497
|
8df4e882464209461bf9f3d4c43ee3888ec747e0
|
refs/heads/main
| 2023-04-12T19:52:20.339640
| 2021-05-03T14:44:42
| 2021-05-03T14:44:42
| 322,602,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import vk_api
from markov_wolf import MarkovWolf
if __name__ == '__main__':
# session = vk_api.VkApi(token='c8767213cdfa52be374952b3eb9632c6d4ed4b442b8202ed295efba6520613f8d1eb109a2a302595a6b59')
# api = session.get_api()
#
# uploader = vk_api.upload.VkUpload(api)
# result = uploader.photo_wall([pic], group_id=201211851)
wolf = MarkovWolf('dataset_wolf', 'pictures_wolf')
quote, pic = wolf.make_quote()
pass
#api.wall.post(owner_id='-201211851', )
|
[
"tryakinan@gmail.com"
] |
tryakinan@gmail.com
|
2707f52621ecd803872de5c8f53a8a4582b75aef
|
6814c5b1facf4c5a2a1b35e194a50a67c6e547b8
|
/classifier.py
|
2f41d6d38436823ebee17c62f85e90babc9b8e9f
|
[] |
no_license
|
Sahana012/Model-View-Controller
|
f9a9916dd78eb336930772174c6c366b8d648109
|
15fc6b7035635442046f0c0910f6e08d0122795b
|
refs/heads/main
| 2023-06-22T21:22:18.644303
| 2021-07-25T01:30:03
| 2021-07-25T01:30:03
| 389,233,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from PIL import Image
import PIL.ImageOps
X = np.load('image.npz')['arr_0']
y = pd.read_csv("labels.csv")["labels"]
print(pd.Series(y).value_counts())
classes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
nclasses = len(classes)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=9, train_size=7500, test_size=2500)
X_train_scaled = X_train/255.0
X_test_scaled = X_test/255.0
clf = LogisticRegression(solver='saga', multi_class='multinomial').fit(X_train_scaled, y_train)
def get_prediction(image):
im_pil = Image.open(image)
image_bw = im_pil.convert('L')
image_bw_resized = image_bw.resize((28,28), Image.ANTIALIAS)
pixel_filter = 20
min_pixel = np.percentile(image_bw_resized, pixel_filter)
image_bw_resized_inverted_scaled = np.clip(image_bw_resized-min_pixel, 0, 255)
max_pixel = np.max(image_bw_resized)
image_bw_resized_inverted_scaled = np.asarray(image_bw_resized_inverted_scaled)/max_pixel
test_sample = np.array(image_bw_resized_inverted_scaled).reshape(1,784)
test_pred = clf.predict(test_sample)
return test_pred[0]
|
[
"noreply@github.com"
] |
Sahana012.noreply@github.com
|
e6a936ccc3de105e36ffef350ea2096d974dc9f0
|
760e1c14d056dd75958d367242c2a50e829ac4f0
|
/剑指offer/6_旋转数组最小的数字.py
|
795e06ad064cd143007a5bdc31ea65296446baea
|
[] |
no_license
|
lawtech0902/py_imooc_algorithm
|
8e85265b716f376ff1c53d0afd550470679224fb
|
74550d68cd3fd2cfcc92e1bf6579ac3b8f31aa75
|
refs/heads/master
| 2021-04-26T22:54:42.176596
| 2018-09-23T15:45:22
| 2018-09-23T15:45:22
| 123,894,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
# _*_ coding: utf-8 _*_
"""
把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。 输入一个非递减排序的数组的一个旋转,输出旋转数组的最小元素。 例如数组{3,4,5,1,2}为{1,2,3,4,5}的一个旋转,该数组的最小值为1。 NOTE:给出的所有元素都大于0,若数组大小为0,请返回0。
__author__ = 'lawtech'
__date__ = '2018/5/9 下午9:35'
"""
class Solution:
def minNumberInRotateArray(self, rotateArray):
# write code here
size = len(rotateArray)
if size == 0:
return 0
low, high = 0, size - 1
while rotateArray[low] >= rotateArray[high]:
if high - low == 1:
return rotateArray[high]
mid = low + (high - low) // 2
if rotateArray[mid] >= rotateArray[low]:
low = mid
else:
high = mid
return rotateArray[low]
|
[
"584563542@qq.com"
] |
584563542@qq.com
|
deb50bc3047abbdefa3468e418745c566c58248e
|
a985c6024fb0ab2665ed49eb82f01dedb6209531
|
/logs.py
|
01e49ca7d71c4e17280d07106ee3fccd0ca64b9e
|
[] |
no_license
|
JessDF/LogsAnalysisProject
|
7f4a914bdb64950ea685895492c818bc5ef9dad9
|
c04aed4b4a53c5e925eccf97c17117aedb4995f6
|
refs/heads/master
| 2021-05-16T10:15:37.641460
| 2018-01-02T21:36:29
| 2018-01-02T21:36:29
| 104,678,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
#!/usr/bin/env python3
# ---------------------- DESCRIPTION ---------------------
# logs.py is an example of how to query a Postgresql database
# to report data from a news website. The purpose is to obtain
# information from the newsdata and ouput it.
import psycopg2
DBNAME = 'news'
def process_query(user_query):
""" Return the result of a given query """
database_object = psycopg2.connect(dbname=DBNAME)
cursor = database_object.cursor()
cursor.execute(user_query)
results = cursor.fetchall()
database_object.close()
return results
def print_heading(heading):
""" Print a heading prior to data output """
print("\n\t\t" + heading + "\n")
def top_three_articles_alltime():
""" Print the three most popular articles of all time """
top_three = process_query(("select * from top_articles limit 3"))
print_heading("TOP 3 ARTICLES OF ALL TIME")
for title, views in top_three:
print(" \"{}\" -- {} views".format(title, views))
def top_authors_alltime():
""" Print the top authors of all time """
top_authors = process_query(("select * from top_authors"))
print_heading("TOP AUTHORS OF ALL TIME")
for name, views in top_authors:
print(" {} -- {} views".format(name, views))
def error_prone_days():
""" Print the days in which there were more than 1% bad requests """
high_404_days = process_query("select * from high_404_days")
print_heading("DAYS WITH GREATER THAN 1% 404 REQUESTS")
for day, percentage in high_404_days:
print(" {0:%B %d, %Y} -- {1:.2f} % errors".format(day, percentage))
if __name__ == '__main__':
top_three_articles_alltime()
top_authors_alltime()
error_prone_days()
|
[
"noreply@github.com"
] |
JessDF.noreply@github.com
|
9dbdc9a4865ba9be97a3a163f61fe24566c2591a
|
8cb3604a6cba69d13ec7ee0c38b8c617daff5d82
|
/Array/sort012.py
|
3e1b1633707ceabd5d681f6bccd1e4f0910f5557
|
[] |
no_license
|
therohitsingh/CODE
|
6e4ba53f52d939ada183e6d02a4f03f1c0680cde
|
d803a226d24afc6b7e417be6ef1e51e81da9fe92
|
refs/heads/master
| 2023-06-10T09:14:50.703222
| 2021-07-02T15:52:40
| 2021-07-02T15:52:40
| 382,393,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
def sortarr(a,n):
a = []
n = len(a)
count0 = 0
count1 = 0
count2 = 0
for i in range(n):
if a[i]==0:
count0+=1
elif a[i]==1:
count1+=1
elif a[i]==2:
count2+=1
i = 0
while(count0>0):
a[i] = 0
i+=1
count0-=1
while(count1>0):
a[i]=1
i+=1
count1-=1
while(count2>0):
a[i]=2
i+=1
count2-=1
def printarr(a,n):
for i in range(n):
print(a[i])
if __name__ == "__main__":
a = [0, 1, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1]
n = len(a)
sortarr(a,n)
|
[
"therohitsinghr@gmail.com"
] |
therohitsinghr@gmail.com
|
67a5365443ffe32e04263ef87eabb797f29e9ddf
|
af0661c89e70c914f9e2f1e5053fae69558b7c71
|
/Lesson_01/Lesson_01.py
|
a4c7052cf1214813a906c7ccf1cb269aa7b16662
|
[] |
no_license
|
kamyninatatyana/DataSearch
|
ca1021233ed53874dbbca57f70d3eecd722c5b20
|
dceee3bcdf55a8ad418c69921d2cf77efec0f921
|
refs/heads/master
| 2023-04-07T04:11:11.935750
| 2021-04-17T13:59:50
| 2021-04-17T13:59:50
| 358,893,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
import requests
import json
# Задание 1
# Посмотреть документацию к API GitHub, разобраться как вывести список репозиториев для конкретного пользователя,
# сохранить JSON-вывод в файле *.json.
print('Задание 1')
print()
user_name = 'kamyninatatyana'
my_url = 'https://api.github.com'
my_request = requests.get(f'{my_url}/users/{user_name}/repos')
print(f'Список репозиториев пользователя {user_name}')
with open('task_1.json', 'w') as my_file:
json.dump(my_request.json(), my_file)
counter = 1
for item in my_request.json():
print(counter, item["name"])
counter = counter + 1
# Задание 2
# 2. Изучить список открытых API. Найти среди них любое, требующее авторизацию (любого типа).
# Выполнить запросы к нему, пройдя авторизацию. Ответ сервера записать в файл.
print()
print('Задание 2')
my_url = 'https://api.nasa.gov/'
api_key = 'Rfnkizox7W8yF2PXVgFxfa4AcmQJqknrkK9h8r2P'
params = {'api_key': api_key}
my_link = 'https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&format=json'
my_request = requests.get('https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&format=json', params=params)
with open('task_2.json', 'w') as my_file:
json.dump(my_request.json(), my_file)
print(f'Названия планет')
counter = 1
for item in my_request.json():
while counter < 100:
print(counter, item['pl_name'])
counter = counter + 1
|
[
"tatyanak@yandex.ru"
] |
tatyanak@yandex.ru
|
504c177772b52656ec86a57238ff9dc5ffc4b589
|
172adddeaf9da90f39fbb7e93b59e2369f44d64a
|
/level_2/next_bignum.py
|
f43fbbb3e67c5ce6011d2cac06bf65c397728152
|
[] |
no_license
|
sujin16/studycoding
|
1e7bae941513e4d65e3df44cbde471764940b7e9
|
1a394f3fd6851c296dc37fba0d67b01cc665ecce
|
refs/heads/main
| 2023-03-14T03:51:14.022481
| 2021-03-04T12:56:04
| 2021-03-04T12:56:04
| 324,210,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
def solution(n):
num =list(bin(n)).count('1')
i =1
while True:
n+=1
if list(bin(n)).count('1') ==num:return n
n=15
print(solution(n))
|
[
"tnwls9712@ajou.ac.kr"
] |
tnwls9712@ajou.ac.kr
|
ba59ebba2e068546face3a92c586930dc6c334c9
|
a45c87da1d573891a6009546b58320e6e9e0a54e
|
/html_compiler/compiler.py
|
ca07b433b7198def73b4a5f7ebd278ef26c0fcb4
|
[
"MIT"
] |
permissive
|
hsuanhauliu/html-compiler
|
f805254a5b58c3b21a95882d98784f55d63547fb
|
17f2659b95153690b517f58964f9002426c08c03
|
refs/heads/master
| 2020-09-11T12:00:12.677145
| 2019-12-14T06:10:05
| 2019-12-14T06:10:05
| 222,057,278
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
"""
Compiler module.
"""
import os
from bs4 import BeautifulSoup
def compile(path):
""" Recursive function for merging components """
soup = ""
next_dir, filename = _separate_dir_and_file(path)
with cd(next_dir):
with open(filename, "r") as rfile:
soup = BeautifulSoup(rfile, 'html.parser')
component_tags = soup.findAll("div", {"class": "m_component"})
for tag in component_tags:
tag_id = tag.get("id")
component_file = tag_id + ".html"
component = compile(component_file)
soup.find(id=tag_id).replaceWith(component)
return soup
def _separate_dir_and_file(path):
""" Helper function for separating file directory and the file """
temp = path.rfind("/")
if temp == -1:
return ".", path
return path[:temp], path[temp + 1:]
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
|
[
"hsuanhal@usc.edu"
] |
hsuanhal@usc.edu
|
f74f9719deb520d9f4b0f1dbb3b693f5511b8485
|
2fbf9e271de7889d15fe78696cbb4807f35fcc30
|
/boards/views.py
|
2559b77fa121c89158261e694ca58572d3a89057
|
[] |
no_license
|
xsaints/proj0064
|
03c990a0fc2be269c079b07399fb651b9fbd7bd2
|
49c80b3eba89418d5b24f838140bbf2be1d3c242
|
refs/heads/master
| 2020-04-03T12:41:09.786367
| 2018-10-29T18:42:27
| 2018-10-29T18:42:27
| 155,259,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import Board, Topic, Post
from .forms import NewTopicForm, NewPostForm
'''
def home(request):
return render(request, 'boards/home.html')
'''
def boards(request):
''' displays all boards
'''
boards= Board.objects.all()
return render(request, 'boards/boards.html', {'boards': boards})
def board_topics(request, board_pk):
''' displays all topics under a particular board
'''
board = get_object_or_404(Board, pk= board_pk)
return render(request, 'boards/board_topics.html', {'board': board})
def topic_posts(request, board_pk, topic_pk):
''' displays all posts under this particular topic
'''
topic= get_object_or_404(Topic, pk= topic_pk)
return render(request, 'boards/topic_posts.html', {'topic': topic})
def new_topic(request, board_pk):
''' add a new topic
'''
board= get_object_or_404(Board, pk= board_pk)
form= NewTopicForm()
if request.method== 'POST':
form= NewTopicForm(request.POST)
if form.is_valid():
#form.save()
print(request.POST)
return HttpResponseRedirect(reverse('boards:boards'))
return render(request, 'boards/new_topic.html', {'form': form})
def new_post(request):
''' add a new topic
'''
form= NewPostForm()
return render(request, 'boards/new_post.html', {'form': form})
|
[
"xsaints@yahoo.com"
] |
xsaints@yahoo.com
|
8758ccee5ee57572793f201065aa31b037f2f96a
|
60e99cec25105719dd25e4c6227ffe2358c3cb98
|
/ToDo/migrations/0001_initial.py
|
aed874cd6296207f5a703c36a9016c6776ea8e46
|
[] |
no_license
|
BrunoSlamek/ToDo_api_drf
|
350d7bfacc26c037807b2afdb8563cd407c0e630
|
c9e66747f36f5d2345f3b237992d3f7f72132bf2
|
refs/heads/main
| 2023-07-14T17:03:36.655587
| 2021-08-25T02:49:46
| 2021-08-25T02:49:46
| 399,606,257
| 0
| 0
| null | 2021-08-25T02:49:47
| 2021-08-24T21:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 624
|
py
|
# Generated by Django 3.2.6 on 2021-08-24 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"brunoslamek@gmail.com"
] |
brunoslamek@gmail.com
|
8798346e2d05dabe4d21c475211f6096d376a636
|
a5b3c21b7dc0ebfc7096e877228dbd4740713bab
|
/Python - learning/Data Structures/copy.py
|
7c31451ef93b11fb7127d05eadf7cfb705c7f771
|
[] |
no_license
|
Cameron-Calpin/Code
|
7dd1ee7d8b83e3d325510ef14a92d36f254bc078
|
fc32e0ed2464efb64b0ebad7f76270a369e2a829
|
refs/heads/master
| 2023-07-07T06:37:54.397181
| 2023-07-01T23:06:58
| 2023-07-01T23:06:58
| 91,615,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
mytuple = ('one', 'two', 'three')
listcopy = list(mytuple)
listcopy.sort()
for item in listcopy:
print item
myDict = {'one': 1, 'two': 2, 'three': 3}
keys = myDict.keys()
keys.sort()
for key in keys:
print key, myDict[key]
|
[
"cameron.calpin.14@cnu.edu"
] |
cameron.calpin.14@cnu.edu
|
12c5fac8d736aaa2b4d1f2cb36d9d2f8cbecc0b1
|
fac5254bbad63a9abdf77c575b76a86b47f62a29
|
/fetch_and_prepare/people_dataframe.py
|
4ea51ae19426d70e5f330182beea0ff74675ffc4
|
[] |
no_license
|
Nickheythatsme/baseball-ml
|
91aa1a00ec1472118b6a9da07728b6ec1d08961e
|
0159695a3a82e1a02a7622384b04cafd8e41df68
|
refs/heads/master
| 2020-06-06T06:54:59.134126
| 2019-06-21T06:24:42
| 2019-06-21T06:24:42
| 192,671,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from baseball_dataframe import BaseballDataFrame
class PeopleDataFrame(BaseballDataFrame):
def __init__(self):
super().__init__('people.csv')
print(super().memory_usage())
self['birthYear'] = self['birthYear'].fillna(value=0).astype('int')
self['birthMonth'] = self['birthMonth'].fillna(value=0).astype('int')
self['birthDay'] = self['birthDay'].fillna(value=0).astype('int')
self['birthCountry'] = self['birthCountry'].fillna(value='Unknown').astype('category')
print(super().memory_usage())
if __name__ == '__main__':
p = PeopleDataFrame()
print(p.head())
|
[
"nick_grout@ext.airbnb.com"
] |
nick_grout@ext.airbnb.com
|
72212e31ccf58f2dc7b46b3e6ecd97d710db25ae
|
db54b1234f2c7e2c1fda0700365cad4937d13089
|
/musicbrainz/search/urls.py
|
5b2eae9d6470a58e188065a4c071b3a2ce4d94d8
|
[] |
no_license
|
manuelmamut/musicbrainz_search
|
6add73fe1ef9f6786fcefefa7c19fd993979736f
|
72ce439809cbbf4a0b3b5556363a7eb16b9c8bd4
|
refs/heads/master
| 2020-03-26T17:19:21.415485
| 2018-08-22T02:05:52
| 2018-08-22T02:05:52
| 145,154,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
from django.conf.urls import url
from django.views.generic import RedirectView
from search import views
from django.views.decorators.cache import cache_page
urlpatterns = [
url(r'^$', RedirectView.as_view(url='release-groups/')),
url(r'^release-groups/$', cache_page(60 * 15)(views.releaseGroupView.as_view()), name='release-groups'),
url(r'^release-groups-nocache/$', views.releaseGroupView.as_view(), name='release-groups-nocache'),
]
|
[
"manuelj.fuentesg@gmail.com"
] |
manuelj.fuentesg@gmail.com
|
fc9ba580ad9a11c6f67bcea854c79af053e832b4
|
2a5145f811c0679b35af367d25fce5914c2e0e40
|
/Algorithm/169_MajorityElement.py
|
641252ef81bfbf55f340168da7a95c33bb00a40e
|
[] |
no_license
|
lingtianwan/Leetcode
|
8f93fc3fc85db289ca8f618143af2a43711425ba
|
bf2edab87dd96afab1ff411df35d3163c1dfdc55
|
refs/heads/master
| 2021-01-12T19:16:31.918703
| 2017-02-09T01:50:53
| 2017-02-09T01:50:53
| 81,396,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always exist in the array.
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
for i in set(nums):
if nums.count(i) > len(nums) / 2:
return i
return 0
|
[
"lingtian.wan@gmail.com"
] |
lingtian.wan@gmail.com
|
cb9fbb26c3919aa6d7f1384ed52081e21a14efb5
|
b514dce7f2796022bb50792e8fb22265160eb3e7
|
/experiments/sf-heuristic/make-testbed-exp-testbed-ga.py
|
53ef07cacf538b50c2985144f297d718ea448b85
|
[] |
no_license
|
imec-idlab/tsch-slotbonding-ga-simulator
|
7ac48b12fa8165e01c6f38702f78be31f5081136
|
7fe670baa7ea5f6319d7f5ad5ab77d590100d0cb
|
refs/heads/main
| 2023-02-24T07:13:50.483590
| 2021-02-02T11:49:07
| 2021-02-02T11:49:07
| 335,271,078
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,018
|
py
|
import random
import json
import os
import sys
sys.path.insert(1, '../../simulator/SimEngine/')
random.seed(100)
ITERATIONS = 20
INPUT_DIR = '../input'
MAX_SEED = 10000
random_seeds = []
##### SIMULATOR SPECIFIC SETTINGS #####
expLength = 300 # seconds
cooldownLength = 60 # seconds
simulator_config = {}
simulator_config['numRuns'] = 1
simulator_config['convergeFirst'] = 1
simulator_config['numChans'] = 3 # take only two channels to limit the total number of slots
simulator_config['numMotes'] = 13 # number of motes
simulator_config['topology'] = 'random' # we only consider a random topology
simulator_config[
'nrMinimalCells'] = 0 # we only want one 1 minimal cell, however it won't really be used in the ILP case
simulator_config['trafficGenerator'] = 'ilp' # the ilp setting deals with the correct ILP traffic
simulator_config['sporadicTraffic'] = 0 # to be sure, set it to 0. we do not want sporadic traffic.
simulator_config['changeParent'] = 0 # we do not change parents
simulator_config[
'backoffMinExp'] = 1 # we do not have SHARED cells anymore (except for the minimal ones), so we do not care
simulator_config[
'backoffMaxExp'] = 1 # we do not have SHARED cells anymore (except for the minimal ones), so we do not care
simulator_config['sf'] = 'ilp' # we only want to use the ILP SF.
simulator_config['minCellsMSF'] = 1 # is ignored.
simulator_config['packetSize'] = 127 # maximum packet size
simulator_config['subGHz'] = 1 # yes, go subGHz
simulator_config['individualModulations'] = 1 # we have modulations per link
simulator_config['pkPeriodVar'] = 0 # important to set to 0 to have a nice equally sent packet distribution
simulator_config['modulationConfig'] = 'MCS234s10ms'
simulator_config['slotDuration'] = 0.010 # will be replaced
simulator_config['slotframeLength'] = 12 # will be replaced
simulator_config['numCyclesPerRun'] = int(expLength / float(simulator_config['slotDuration'] * simulator_config['slotframeLength'])) # will be replaced
simulator_config['cooldown'] = cooldownLength / float(simulator_config['slotDuration']) # in ASNs
simulator_config['noNewInterference'] = 0 # will be replaced
simulator_config['noPropagationLoss'] = 0 # will be replaced
simulator_config['settlingTime'] = 120 # in seconds
simulator_config['maxToConverge'] = 6060 # in seconds
simulator_config['stableNeighbors'] = 1
simulator_config['measuredData'] = 1
# TODO SEED STILL HAS TO BE TAKEN FROM THE GLOBAL SETTINGS FILE NOW
##### GA SPECIFIC SETTINGS #####
DEFAULT_SETTINGS = '../final-settings/settings-testbed-makenotfeasible-testbed-ga.json'
config = {}
with open(DEFAULT_SETTINGS) as json_file:
config = json.load(json_file)
config["simulator"] = simulator_config
config["simulator"]["modulationFile"] = os.path.basename(config["modulations"]["modulations_file"])
##### CONFIGURATIONS #####
configurations = {
'sfl_153ms_motes_13_top_gaheuristic_delta_005_': {
"slotDuration": 0.09,
"slotframeLength": 17,
"delta": 0.05
},
'sfl_333ms_motes_13_top_gaheuristic_delta_005_': {
"slotDuration": 0.09,
"slotframeLength": 37,
"delta": 0.05
}
}
##### SETTINGS FILE #####
def getUniqueSeed():
seed = random.randint(0, MAX_SEED)
while seed in random_seeds:
seed = random.randint(0, MAX_SEED)
random_seeds.append(seed)
return seed
if __name__ == "__main__":
seeds = [getUniqueSeed() for x in range(ITERATIONS)]
NODES = ['nuc9-3', 'nuc9-5', 'nuc9-6', 'nuc9-14', 'nuc9-33', 'nuc9-34', 'nuc9-35', 'nuc9-37', 'nuc10-1', 'nuc10-5',
'nuc10-18', 'nuc10-20', 'nuc10-33', 'nuc10-34', 'nuc10-35']
if not os.path.exists(INPUT_DIR):
os.mkdir(INPUT_DIR)
for c_name, c in configurations.items():
cnt = 0
# set config
config["sf-heuristic"]["delta"] = c['delta']
config["simulator"]['slotDuration'] = c['slotDuration']
config["simulator"]['slotframeLength'] = c['slotframeLength']
config["simulator"]['numCyclesPerRun'] = int(expLength / float(simulator_config['slotDuration'] * config["simulator"]['slotframeLength'])) # will be replaced
config["simulator"]['cooldown'] = cooldownLength / float(simulator_config['slotDuration']) # in ASNs
while cnt < ITERATIONS:
config["seed"] = seeds[cnt]
config["name"] = "seed-{0}-exp-{1}".format(config["seed"], c_name)
config["sf-heuristic"]["testbed_results"]["root"] = NODES[cnt % len(NODES)]
nameFile = '{input_dir}/ga_seed_{seed}_c_{config_modulation}_ss_{slotframe_size}_exp_{file_suffix}.json'.format(
input_dir=INPUT_DIR,
seed=config["seed"],
config_modulation=config['simulator']['modulationConfig'],
slotframe_size=int(1000*config['simulator']['slotframeLength']*config['simulator']['slotDuration']),
file_suffix=c_name)
with open(nameFile, 'w') as outfile:
json.dump(config, outfile)
cnt += 1
|
[
"glenn.daneels@uantwerpen.be"
] |
glenn.daneels@uantwerpen.be
|
28ded34c244dfde21440e7b8a4c967128d3118be
|
b39d72ba5de9d4683041e6b4413f8483c817f821
|
/GeneVisualization/ass1/Lib/site-packages/itk/itkImageDuplicatorPython.py
|
feba22d089e1bed3a6240415b04b2f8985228d76
|
[] |
no_license
|
ssalmaan/DataVisualization
|
d93a0afe1290e4ea46c3be5718d503c71a6f99a7
|
eff072f11337f124681ce08742e1a092033680cc
|
refs/heads/master
| 2021-03-13T05:40:23.679095
| 2020-03-11T21:37:45
| 2020-03-11T21:37:45
| 246,642,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149,049
|
py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkImageDuplicatorPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkImageDuplicatorPython', [dirname(__file__)])
except ImportError:
import _itkImageDuplicatorPython
return _itkImageDuplicatorPython
if fp is not None:
try:
_mod = imp.load_module('_itkImageDuplicatorPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkImageDuplicatorPython = swig_import_helper()
del swig_import_helper
else:
import _itkImageDuplicatorPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkImagePython
import itkSizePython
import pyBasePython
import itkRGBAPixelPython
import itkFixedArrayPython
import itkMatrixPython
import itkCovariantVectorPython
import vnl_vector_refPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import itkVectorPython
import vnl_matrix_fixedPython
import itkPointPython
import itkRGBPixelPython
import ITKCommonBasePython
import itkSymmetricSecondRankTensorPython
import itkOffsetPython
import itkIndexPython
import itkImageRegionPython
def itkImageDuplicatorIUL3_New():
return itkImageDuplicatorIUL3.New()
def itkImageDuplicatorIUL2_New():
return itkImageDuplicatorIUL2.New()
def itkImageDuplicatorISSRTD33_New():
return itkImageDuplicatorISSRTD33.New()
def itkImageDuplicatorID3_New():
return itkImageDuplicatorID3.New()
def itkImageDuplicatorISSRTD22_New():
return itkImageDuplicatorISSRTD22.New()
def itkImageDuplicatorID2_New():
return itkImageDuplicatorID2.New()
def itkImageDuplicatorICVF43_New():
return itkImageDuplicatorICVF43.New()
def itkImageDuplicatorIVF43_New():
return itkImageDuplicatorIVF43.New()
def itkImageDuplicatorICVF33_New():
return itkImageDuplicatorICVF33.New()
def itkImageDuplicatorIVF33_New():
return itkImageDuplicatorIVF33.New()
def itkImageDuplicatorICVF23_New():
return itkImageDuplicatorICVF23.New()
def itkImageDuplicatorIVF23_New():
return itkImageDuplicatorIVF23.New()
def itkImageDuplicatorIF3_New():
return itkImageDuplicatorIF3.New()
def itkImageDuplicatorICVF42_New():
return itkImageDuplicatorICVF42.New()
def itkImageDuplicatorIVF42_New():
return itkImageDuplicatorIVF42.New()
def itkImageDuplicatorICVF32_New():
return itkImageDuplicatorICVF32.New()
def itkImageDuplicatorIVF32_New():
return itkImageDuplicatorIVF32.New()
def itkImageDuplicatorICVF22_New():
return itkImageDuplicatorICVF22.New()
def itkImageDuplicatorIVF22_New():
return itkImageDuplicatorIVF22.New()
def itkImageDuplicatorIF2_New():
return itkImageDuplicatorIF2.New()
def itkImageDuplicatorIUS3_New():
return itkImageDuplicatorIUS3.New()
def itkImageDuplicatorIUS2_New():
return itkImageDuplicatorIUS2.New()
def itkImageDuplicatorIRGBAUC3_New():
return itkImageDuplicatorIRGBAUC3.New()
def itkImageDuplicatorIRGBUC3_New():
return itkImageDuplicatorIRGBUC3.New()
def itkImageDuplicatorIUC3_New():
return itkImageDuplicatorIUC3.New()
def itkImageDuplicatorIRGBAUC2_New():
return itkImageDuplicatorIRGBAUC2.New()
def itkImageDuplicatorIRGBUC2_New():
return itkImageDuplicatorIRGBUC2.New()
def itkImageDuplicatorIUC2_New():
return itkImageDuplicatorIUC2.New()
def itkImageDuplicatorISS3_New():
return itkImageDuplicatorISS3.New()
def itkImageDuplicatorISS2_New():
return itkImageDuplicatorISS2.New()
class itkImageDuplicatorICVF22(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF22 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF22_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF22_Pointer":
"""Clone(itkImageDuplicatorICVF22 self) -> itkImageDuplicatorICVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF22') -> "void":
"""SetInputImage(itkImageDuplicatorICVF22 self, itkImageCVF22 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF22 *":
"""
GetOutput(itkImageDuplicatorICVF22 self) -> itkImageCVF22
GetOutput(itkImageDuplicatorICVF22 self) -> itkImageCVF22
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF22 *":
"""GetModifiableOutput(itkImageDuplicatorICVF22 self) -> itkImageCVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF22 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF22
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF22 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF22
Create a new object of the class itkImageDuplicatorICVF22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF22.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_Clone, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_SetInputImage, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetOutput, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_GetModifiableOutput, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF22_Update, None, itkImageDuplicatorICVF22)
itkImageDuplicatorICVF22_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF22_swigregister
itkImageDuplicatorICVF22_swigregister(itkImageDuplicatorICVF22)
def itkImageDuplicatorICVF22___New_orig__() -> "itkImageDuplicatorICVF22_Pointer":
"""itkImageDuplicatorICVF22___New_orig__() -> itkImageDuplicatorICVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22___New_orig__()
def itkImageDuplicatorICVF22_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF22 *":
"""itkImageDuplicatorICVF22_cast(itkLightObject obj) -> itkImageDuplicatorICVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF22_cast(obj)
class itkImageDuplicatorICVF23(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF23 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF23_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF23_Pointer":
"""Clone(itkImageDuplicatorICVF23 self) -> itkImageDuplicatorICVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF23') -> "void":
"""SetInputImage(itkImageDuplicatorICVF23 self, itkImageCVF23 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF23 *":
"""
GetOutput(itkImageDuplicatorICVF23 self) -> itkImageCVF23
GetOutput(itkImageDuplicatorICVF23 self) -> itkImageCVF23
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF23 *":
"""GetModifiableOutput(itkImageDuplicatorICVF23 self) -> itkImageCVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF23 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF23
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF23 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF23
Create a new object of the class itkImageDuplicatorICVF23 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF23.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF23.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF23.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF23.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_Clone, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_SetInputImage, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetOutput, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_GetModifiableOutput, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF23_Update, None, itkImageDuplicatorICVF23)
itkImageDuplicatorICVF23_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF23_swigregister
itkImageDuplicatorICVF23_swigregister(itkImageDuplicatorICVF23)
def itkImageDuplicatorICVF23___New_orig__() -> "itkImageDuplicatorICVF23_Pointer":
"""itkImageDuplicatorICVF23___New_orig__() -> itkImageDuplicatorICVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23___New_orig__()
def itkImageDuplicatorICVF23_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF23 *":
"""itkImageDuplicatorICVF23_cast(itkLightObject obj) -> itkImageDuplicatorICVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF23_cast(obj)
class itkImageDuplicatorICVF32(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF32 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF32_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF32_Pointer":
"""Clone(itkImageDuplicatorICVF32 self) -> itkImageDuplicatorICVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF32') -> "void":
"""SetInputImage(itkImageDuplicatorICVF32 self, itkImageCVF32 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF32 *":
"""
GetOutput(itkImageDuplicatorICVF32 self) -> itkImageCVF32
GetOutput(itkImageDuplicatorICVF32 self) -> itkImageCVF32
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF32 *":
"""GetModifiableOutput(itkImageDuplicatorICVF32 self) -> itkImageCVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF32 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF32
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF32 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF32
Create a new object of the class itkImageDuplicatorICVF32 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF32.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF32.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF32.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF32.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_Clone, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_SetInputImage, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetOutput, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_GetModifiableOutput, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF32_Update, None, itkImageDuplicatorICVF32)
itkImageDuplicatorICVF32_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF32_swigregister
itkImageDuplicatorICVF32_swigregister(itkImageDuplicatorICVF32)
def itkImageDuplicatorICVF32___New_orig__() -> "itkImageDuplicatorICVF32_Pointer":
"""itkImageDuplicatorICVF32___New_orig__() -> itkImageDuplicatorICVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32___New_orig__()
def itkImageDuplicatorICVF32_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF32 *":
"""itkImageDuplicatorICVF32_cast(itkLightObject obj) -> itkImageDuplicatorICVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF32_cast(obj)
class itkImageDuplicatorICVF33(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF33 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF33_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF33_Pointer":
"""Clone(itkImageDuplicatorICVF33 self) -> itkImageDuplicatorICVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF33') -> "void":
"""SetInputImage(itkImageDuplicatorICVF33 self, itkImageCVF33 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF33 *":
"""
GetOutput(itkImageDuplicatorICVF33 self) -> itkImageCVF33
GetOutput(itkImageDuplicatorICVF33 self) -> itkImageCVF33
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF33 *":
"""GetModifiableOutput(itkImageDuplicatorICVF33 self) -> itkImageCVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF33 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF33
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF33 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF33
Create a new object of the class itkImageDuplicatorICVF33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF33.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_Clone, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_SetInputImage, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetOutput, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_GetModifiableOutput, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF33_Update, None, itkImageDuplicatorICVF33)
itkImageDuplicatorICVF33_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF33_swigregister
itkImageDuplicatorICVF33_swigregister(itkImageDuplicatorICVF33)
def itkImageDuplicatorICVF33___New_orig__() -> "itkImageDuplicatorICVF33_Pointer":
"""itkImageDuplicatorICVF33___New_orig__() -> itkImageDuplicatorICVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33___New_orig__()
def itkImageDuplicatorICVF33_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF33 *":
"""itkImageDuplicatorICVF33_cast(itkLightObject obj) -> itkImageDuplicatorICVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF33_cast(obj)
class itkImageDuplicatorICVF42(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF42 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF42_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF42_Pointer":
"""Clone(itkImageDuplicatorICVF42 self) -> itkImageDuplicatorICVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF42') -> "void":
"""SetInputImage(itkImageDuplicatorICVF42 self, itkImageCVF42 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF42 *":
"""
GetOutput(itkImageDuplicatorICVF42 self) -> itkImageCVF42
GetOutput(itkImageDuplicatorICVF42 self) -> itkImageCVF42
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF42 *":
"""GetModifiableOutput(itkImageDuplicatorICVF42 self) -> itkImageCVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF42 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF42
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF42 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF42
Create a new object of the class itkImageDuplicatorICVF42 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF42.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF42.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF42.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF42.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_Clone, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_SetInputImage, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetOutput, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_GetModifiableOutput, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF42_Update, None, itkImageDuplicatorICVF42)
itkImageDuplicatorICVF42_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF42_swigregister
itkImageDuplicatorICVF42_swigregister(itkImageDuplicatorICVF42)
def itkImageDuplicatorICVF42___New_orig__() -> "itkImageDuplicatorICVF42_Pointer":
"""itkImageDuplicatorICVF42___New_orig__() -> itkImageDuplicatorICVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42___New_orig__()
def itkImageDuplicatorICVF42_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF42 *":
"""itkImageDuplicatorICVF42_cast(itkLightObject obj) -> itkImageDuplicatorICVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF42_cast(obj)
class itkImageDuplicatorICVF43(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorICVF43 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorICVF43_Pointer":
"""__New_orig__() -> itkImageDuplicatorICVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorICVF43_Pointer":
"""Clone(itkImageDuplicatorICVF43 self) -> itkImageDuplicatorICVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_Clone(self)
def SetInputImage(self, _arg: 'itkImageCVF43') -> "void":
"""SetInputImage(itkImageDuplicatorICVF43 self, itkImageCVF43 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageCVF43 *":
"""
GetOutput(itkImageDuplicatorICVF43 self) -> itkImageCVF43
GetOutput(itkImageDuplicatorICVF43 self) -> itkImageCVF43
"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageCVF43 *":
"""GetModifiableOutput(itkImageDuplicatorICVF43 self) -> itkImageCVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorICVF43 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorICVF43
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF43 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorICVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorICVF43
Create a new object of the class itkImageDuplicatorICVF43 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorICVF43.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorICVF43.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorICVF43.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorICVF43.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_Clone, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_SetInputImage, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetOutput, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_GetModifiableOutput, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorICVF43_Update, None, itkImageDuplicatorICVF43)
itkImageDuplicatorICVF43_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorICVF43_swigregister
itkImageDuplicatorICVF43_swigregister(itkImageDuplicatorICVF43)
def itkImageDuplicatorICVF43___New_orig__() -> "itkImageDuplicatorICVF43_Pointer":
"""itkImageDuplicatorICVF43___New_orig__() -> itkImageDuplicatorICVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43___New_orig__()
def itkImageDuplicatorICVF43_cast(obj: 'itkLightObject') -> "itkImageDuplicatorICVF43 *":
"""itkImageDuplicatorICVF43_cast(itkLightObject obj) -> itkImageDuplicatorICVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorICVF43_cast(obj)
class itkImageDuplicatorID2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorID2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorID2_Pointer":
"""__New_orig__() -> itkImageDuplicatorID2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorID2_Pointer":
"""Clone(itkImageDuplicatorID2 self) -> itkImageDuplicatorID2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_Clone(self)
def SetInputImage(self, _arg: 'itkImageD2') -> "void":
"""SetInputImage(itkImageDuplicatorID2 self, itkImageD2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageD2 *":
"""
GetOutput(itkImageDuplicatorID2 self) -> itkImageD2
GetOutput(itkImageDuplicatorID2 self) -> itkImageD2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageD2 *":
"""GetModifiableOutput(itkImageDuplicatorID2 self) -> itkImageD2"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorID2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorID2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorID2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorID2"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorID2
Create a new object of the class itkImageDuplicatorID2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorID2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorID2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorID2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorID2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_Clone, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_SetInputImage, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_GetOutput, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_GetModifiableOutput, None, itkImageDuplicatorID2)
itkImageDuplicatorID2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID2_Update, None, itkImageDuplicatorID2)
itkImageDuplicatorID2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorID2_swigregister
itkImageDuplicatorID2_swigregister(itkImageDuplicatorID2)
def itkImageDuplicatorID2___New_orig__() -> "itkImageDuplicatorID2_Pointer":
"""itkImageDuplicatorID2___New_orig__() -> itkImageDuplicatorID2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2___New_orig__()
def itkImageDuplicatorID2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorID2 *":
"""itkImageDuplicatorID2_cast(itkLightObject obj) -> itkImageDuplicatorID2"""
return _itkImageDuplicatorPython.itkImageDuplicatorID2_cast(obj)
class itkImageDuplicatorID3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorID3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorID3_Pointer":
"""__New_orig__() -> itkImageDuplicatorID3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorID3_Pointer":
"""Clone(itkImageDuplicatorID3 self) -> itkImageDuplicatorID3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_Clone(self)
def SetInputImage(self, _arg: 'itkImageD3') -> "void":
"""SetInputImage(itkImageDuplicatorID3 self, itkImageD3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageD3 *":
"""
GetOutput(itkImageDuplicatorID3 self) -> itkImageD3
GetOutput(itkImageDuplicatorID3 self) -> itkImageD3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageD3 *":
"""GetModifiableOutput(itkImageDuplicatorID3 self) -> itkImageD3"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorID3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorID3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorID3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorID3"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorID3
Create a new object of the class itkImageDuplicatorID3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorID3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorID3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorID3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorID3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_Clone, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_SetInputImage, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_GetOutput, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_GetModifiableOutput, None, itkImageDuplicatorID3)
itkImageDuplicatorID3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorID3_Update, None, itkImageDuplicatorID3)
itkImageDuplicatorID3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorID3_swigregister
itkImageDuplicatorID3_swigregister(itkImageDuplicatorID3)
def itkImageDuplicatorID3___New_orig__() -> "itkImageDuplicatorID3_Pointer":
"""itkImageDuplicatorID3___New_orig__() -> itkImageDuplicatorID3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3___New_orig__()
def itkImageDuplicatorID3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorID3 *":
"""itkImageDuplicatorID3_cast(itkLightObject obj) -> itkImageDuplicatorID3"""
return _itkImageDuplicatorPython.itkImageDuplicatorID3_cast(obj)
class itkImageDuplicatorIF2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIF2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIF2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIF2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIF2_Pointer":
"""Clone(itkImageDuplicatorIF2 self) -> itkImageDuplicatorIF2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_Clone(self)
def SetInputImage(self, _arg: 'itkImageF2') -> "void":
"""SetInputImage(itkImageDuplicatorIF2 self, itkImageF2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageF2 *":
"""
GetOutput(itkImageDuplicatorIF2 self) -> itkImageF2
GetOutput(itkImageDuplicatorIF2 self) -> itkImageF2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageF2 *":
"""GetModifiableOutput(itkImageDuplicatorIF2 self) -> itkImageF2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIF2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIF2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIF2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIF2
Create a new object of the class itkImageDuplicatorIF2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIF2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIF2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIF2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIF2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_Clone, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_SetInputImage, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_GetOutput, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_GetModifiableOutput, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF2_Update, None, itkImageDuplicatorIF2)
itkImageDuplicatorIF2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIF2_swigregister
itkImageDuplicatorIF2_swigregister(itkImageDuplicatorIF2)
def itkImageDuplicatorIF2___New_orig__() -> "itkImageDuplicatorIF2_Pointer":
"""itkImageDuplicatorIF2___New_orig__() -> itkImageDuplicatorIF2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2___New_orig__()
def itkImageDuplicatorIF2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF2 *":
"""itkImageDuplicatorIF2_cast(itkLightObject obj) -> itkImageDuplicatorIF2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF2_cast(obj)
class itkImageDuplicatorIF3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIF3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIF3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIF3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIF3_Pointer":
"""Clone(itkImageDuplicatorIF3 self) -> itkImageDuplicatorIF3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_Clone(self)
def SetInputImage(self, _arg: 'itkImageF3') -> "void":
"""SetInputImage(itkImageDuplicatorIF3 self, itkImageF3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageF3 *":
"""
GetOutput(itkImageDuplicatorIF3 self) -> itkImageF3
GetOutput(itkImageDuplicatorIF3 self) -> itkImageF3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageF3 *":
"""GetModifiableOutput(itkImageDuplicatorIF3 self) -> itkImageF3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIF3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIF3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIF3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIF3
Create a new object of the class itkImageDuplicatorIF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIF3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_Clone, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_SetInputImage, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_GetOutput, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_GetModifiableOutput, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIF3_Update, None, itkImageDuplicatorIF3)
itkImageDuplicatorIF3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIF3_swigregister
itkImageDuplicatorIF3_swigregister(itkImageDuplicatorIF3)
def itkImageDuplicatorIF3___New_orig__() -> "itkImageDuplicatorIF3_Pointer":
"""itkImageDuplicatorIF3___New_orig__() -> itkImageDuplicatorIF3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3___New_orig__()
def itkImageDuplicatorIF3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIF3 *":
"""itkImageDuplicatorIF3_cast(itkLightObject obj) -> itkImageDuplicatorIF3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIF3_cast(obj)
class itkImageDuplicatorIRGBAUC2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBAUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBAUC2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBAUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBAUC2_Pointer":
"""Clone(itkImageDuplicatorIRGBAUC2 self) -> itkImageDuplicatorIRGBAUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBAUC2') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBAUC2 self, itkImageRGBAUC2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBAUC2 *":
"""
GetOutput(itkImageDuplicatorIRGBAUC2 self) -> itkImageRGBAUC2
GetOutput(itkImageDuplicatorIRGBAUC2 self) -> itkImageRGBAUC2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBAUC2 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBAUC2 self) -> itkImageRGBAUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBAUC2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBAUC2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBAUC2
Create a new object of the class itkImageDuplicatorIRGBAUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBAUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBAUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBAUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBAUC2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Clone, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_SetInputImage, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetOutput, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_GetModifiableOutput, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_Update, None, itkImageDuplicatorIRGBAUC2)
itkImageDuplicatorIRGBAUC2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_swigregister
itkImageDuplicatorIRGBAUC2_swigregister(itkImageDuplicatorIRGBAUC2)
def itkImageDuplicatorIRGBAUC2___New_orig__() -> "itkImageDuplicatorIRGBAUC2_Pointer":
"""itkImageDuplicatorIRGBAUC2___New_orig__() -> itkImageDuplicatorIRGBAUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2___New_orig__()
def itkImageDuplicatorIRGBAUC2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC2 *":
"""itkImageDuplicatorIRGBAUC2_cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC2_cast(obj)
class itkImageDuplicatorIRGBAUC3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBAUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBAUC3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBAUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBAUC3_Pointer":
"""Clone(itkImageDuplicatorIRGBAUC3 self) -> itkImageDuplicatorIRGBAUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBAUC3') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBAUC3 self, itkImageRGBAUC3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBAUC3 *":
"""
GetOutput(itkImageDuplicatorIRGBAUC3 self) -> itkImageRGBAUC3
GetOutput(itkImageDuplicatorIRGBAUC3 self) -> itkImageRGBAUC3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBAUC3 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBAUC3 self) -> itkImageRGBAUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBAUC3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBAUC3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBAUC3
Create a new object of the class itkImageDuplicatorIRGBAUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBAUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBAUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBAUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBAUC3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Clone, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_SetInputImage, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetOutput, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_GetModifiableOutput, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_Update, None, itkImageDuplicatorIRGBAUC3)
itkImageDuplicatorIRGBAUC3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_swigregister
itkImageDuplicatorIRGBAUC3_swigregister(itkImageDuplicatorIRGBAUC3)
def itkImageDuplicatorIRGBAUC3___New_orig__() -> "itkImageDuplicatorIRGBAUC3_Pointer":
"""itkImageDuplicatorIRGBAUC3___New_orig__() -> itkImageDuplicatorIRGBAUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3___New_orig__()
def itkImageDuplicatorIRGBAUC3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBAUC3 *":
"""itkImageDuplicatorIRGBAUC3_cast(itkLightObject obj) -> itkImageDuplicatorIRGBAUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBAUC3_cast(obj)
class itkImageDuplicatorIRGBUC2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBUC2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBUC2_Pointer":
"""Clone(itkImageDuplicatorIRGBUC2 self) -> itkImageDuplicatorIRGBUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBUC2') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBUC2 self, itkImageRGBUC2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBUC2 *":
"""
GetOutput(itkImageDuplicatorIRGBUC2 self) -> itkImageRGBUC2
GetOutput(itkImageDuplicatorIRGBUC2 self) -> itkImageRGBUC2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBUC2 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBUC2 self) -> itkImageRGBUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBUC2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBUC2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBUC2
Create a new object of the class itkImageDuplicatorIRGBUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBUC2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Clone, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_SetInputImage, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetOutput, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_GetModifiableOutput, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_Update, None, itkImageDuplicatorIRGBUC2)
itkImageDuplicatorIRGBUC2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_swigregister
itkImageDuplicatorIRGBUC2_swigregister(itkImageDuplicatorIRGBUC2)
def itkImageDuplicatorIRGBUC2___New_orig__() -> "itkImageDuplicatorIRGBUC2_Pointer":
"""itkImageDuplicatorIRGBUC2___New_orig__() -> itkImageDuplicatorIRGBUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2___New_orig__()
def itkImageDuplicatorIRGBUC2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC2 *":
"""itkImageDuplicatorIRGBUC2_cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC2_cast(obj)
class itkImageDuplicatorIRGBUC3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIRGBUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIRGBUC3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIRGBUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIRGBUC3_Pointer":
"""Clone(itkImageDuplicatorIRGBUC3 self) -> itkImageDuplicatorIRGBUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Clone(self)
def SetInputImage(self, _arg: 'itkImageRGBUC3') -> "void":
"""SetInputImage(itkImageDuplicatorIRGBUC3 self, itkImageRGBUC3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageRGBUC3 *":
"""
GetOutput(itkImageDuplicatorIRGBUC3 self) -> itkImageRGBUC3
GetOutput(itkImageDuplicatorIRGBUC3 self) -> itkImageRGBUC3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageRGBUC3 *":
"""GetModifiableOutput(itkImageDuplicatorIRGBUC3 self) -> itkImageRGBUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIRGBUC3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIRGBUC3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIRGBUC3
Create a new object of the class itkImageDuplicatorIRGBUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIRGBUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIRGBUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIRGBUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIRGBUC3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Clone, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_SetInputImage, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetOutput, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_GetModifiableOutput, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_Update, None, itkImageDuplicatorIRGBUC3)
itkImageDuplicatorIRGBUC3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_swigregister
itkImageDuplicatorIRGBUC3_swigregister(itkImageDuplicatorIRGBUC3)
def itkImageDuplicatorIRGBUC3___New_orig__() -> "itkImageDuplicatorIRGBUC3_Pointer":
"""itkImageDuplicatorIRGBUC3___New_orig__() -> itkImageDuplicatorIRGBUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3___New_orig__()
def itkImageDuplicatorIRGBUC3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIRGBUC3 *":
"""itkImageDuplicatorIRGBUC3_cast(itkLightObject obj) -> itkImageDuplicatorIRGBUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIRGBUC3_cast(obj)
class itkImageDuplicatorISS2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISS2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISS2_Pointer":
"""__New_orig__() -> itkImageDuplicatorISS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISS2_Pointer":
"""Clone(itkImageDuplicatorISS2 self) -> itkImageDuplicatorISS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_Clone(self)
def SetInputImage(self, _arg: 'itkImageSS2') -> "void":
"""SetInputImage(itkImageDuplicatorISS2 self, itkImageSS2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSS2 *":
"""
GetOutput(itkImageDuplicatorISS2 self) -> itkImageSS2
GetOutput(itkImageDuplicatorISS2 self) -> itkImageSS2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSS2 *":
"""GetModifiableOutput(itkImageDuplicatorISS2 self) -> itkImageSS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISS2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISS2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISS2
Create a new object of the class itkImageDuplicatorISS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISS2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_Clone, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_SetInputImage, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_GetOutput, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_GetModifiableOutput, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS2_Update, None, itkImageDuplicatorISS2)
itkImageDuplicatorISS2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISS2_swigregister
itkImageDuplicatorISS2_swigregister(itkImageDuplicatorISS2)
def itkImageDuplicatorISS2___New_orig__() -> "itkImageDuplicatorISS2_Pointer":
"""itkImageDuplicatorISS2___New_orig__() -> itkImageDuplicatorISS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2___New_orig__()
def itkImageDuplicatorISS2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS2 *":
"""itkImageDuplicatorISS2_cast(itkLightObject obj) -> itkImageDuplicatorISS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS2_cast(obj)
class itkImageDuplicatorISS3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISS3_Pointer":
"""__New_orig__() -> itkImageDuplicatorISS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISS3_Pointer":
"""Clone(itkImageDuplicatorISS3 self) -> itkImageDuplicatorISS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_Clone(self)
def SetInputImage(self, _arg: 'itkImageSS3') -> "void":
"""SetInputImage(itkImageDuplicatorISS3 self, itkImageSS3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSS3 *":
"""
GetOutput(itkImageDuplicatorISS3 self) -> itkImageSS3
GetOutput(itkImageDuplicatorISS3 self) -> itkImageSS3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSS3 *":
"""GetModifiableOutput(itkImageDuplicatorISS3 self) -> itkImageSS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISS3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISS3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISS3
Create a new object of the class itkImageDuplicatorISS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISS3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_Clone, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_SetInputImage, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_GetOutput, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_GetModifiableOutput, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISS3_Update, None, itkImageDuplicatorISS3)
itkImageDuplicatorISS3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISS3_swigregister
itkImageDuplicatorISS3_swigregister(itkImageDuplicatorISS3)
def itkImageDuplicatorISS3___New_orig__() -> "itkImageDuplicatorISS3_Pointer":
"""itkImageDuplicatorISS3___New_orig__() -> itkImageDuplicatorISS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3___New_orig__()
def itkImageDuplicatorISS3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISS3 *":
"""itkImageDuplicatorISS3_cast(itkLightObject obj) -> itkImageDuplicatorISS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorISS3_cast(obj)
class itkImageDuplicatorISSRTD22(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISSRTD22 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISSRTD22_Pointer":
"""__New_orig__() -> itkImageDuplicatorISSRTD22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISSRTD22_Pointer":
"""Clone(itkImageDuplicatorISSRTD22 self) -> itkImageDuplicatorISSRTD22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Clone(self)
def SetInputImage(self, _arg: 'itkImageSSRTD22') -> "void":
"""SetInputImage(itkImageDuplicatorISSRTD22 self, itkImageSSRTD22 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSSRTD22 *":
"""
GetOutput(itkImageDuplicatorISSRTD22 self) -> itkImageSSRTD22
GetOutput(itkImageDuplicatorISSRTD22 self) -> itkImageSSRTD22
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSSRTD22 *":
"""GetModifiableOutput(itkImageDuplicatorISSRTD22 self) -> itkImageSSRTD22"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISSRTD22 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISSRTD22
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD22 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISSRTD22"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISSRTD22
Create a new object of the class itkImageDuplicatorISSRTD22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISSRTD22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISSRTD22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISSRTD22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISSRTD22.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Clone, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_SetInputImage, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetOutput, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_GetModifiableOutput, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_Update, None, itkImageDuplicatorISSRTD22)
itkImageDuplicatorISSRTD22_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_swigregister
itkImageDuplicatorISSRTD22_swigregister(itkImageDuplicatorISSRTD22)
def itkImageDuplicatorISSRTD22___New_orig__() -> "itkImageDuplicatorISSRTD22_Pointer":
"""itkImageDuplicatorISSRTD22___New_orig__() -> itkImageDuplicatorISSRTD22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22___New_orig__()
def itkImageDuplicatorISSRTD22_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD22 *":
"""itkImageDuplicatorISSRTD22_cast(itkLightObject obj) -> itkImageDuplicatorISSRTD22"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD22_cast(obj)
class itkImageDuplicatorISSRTD33(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorISSRTD33 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorISSRTD33_Pointer":
"""__New_orig__() -> itkImageDuplicatorISSRTD33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorISSRTD33_Pointer":
"""Clone(itkImageDuplicatorISSRTD33 self) -> itkImageDuplicatorISSRTD33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Clone(self)
def SetInputImage(self, _arg: 'itkImageSSRTD33') -> "void":
"""SetInputImage(itkImageDuplicatorISSRTD33 self, itkImageSSRTD33 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageSSRTD33 *":
"""
GetOutput(itkImageDuplicatorISSRTD33 self) -> itkImageSSRTD33
GetOutput(itkImageDuplicatorISSRTD33 self) -> itkImageSSRTD33
"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageSSRTD33 *":
"""GetModifiableOutput(itkImageDuplicatorISSRTD33 self) -> itkImageSSRTD33"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorISSRTD33 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorISSRTD33
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD33 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorISSRTD33"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorISSRTD33
Create a new object of the class itkImageDuplicatorISSRTD33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorISSRTD33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorISSRTD33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorISSRTD33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorISSRTD33.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Clone, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_SetInputImage, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetOutput, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_GetModifiableOutput, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_Update, None, itkImageDuplicatorISSRTD33)
itkImageDuplicatorISSRTD33_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_swigregister
itkImageDuplicatorISSRTD33_swigregister(itkImageDuplicatorISSRTD33)
def itkImageDuplicatorISSRTD33___New_orig__() -> "itkImageDuplicatorISSRTD33_Pointer":
"""itkImageDuplicatorISSRTD33___New_orig__() -> itkImageDuplicatorISSRTD33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33___New_orig__()
def itkImageDuplicatorISSRTD33_cast(obj: 'itkLightObject') -> "itkImageDuplicatorISSRTD33 *":
"""itkImageDuplicatorISSRTD33_cast(itkLightObject obj) -> itkImageDuplicatorISSRTD33"""
return _itkImageDuplicatorPython.itkImageDuplicatorISSRTD33_cast(obj)
class itkImageDuplicatorIUC2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUC2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUC2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUC2_Pointer":
"""Clone(itkImageDuplicatorIUC2 self) -> itkImageDuplicatorIUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_Clone(self)
def SetInputImage(self, _arg: 'itkImageUC2') -> "void":
"""SetInputImage(itkImageDuplicatorIUC2 self, itkImageUC2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUC2 *":
"""
GetOutput(itkImageDuplicatorIUC2 self) -> itkImageUC2
GetOutput(itkImageDuplicatorIUC2 self) -> itkImageUC2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUC2 *":
"""GetModifiableOutput(itkImageDuplicatorIUC2 self) -> itkImageUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUC2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUC2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUC2
Create a new object of the class itkImageDuplicatorIUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUC2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_Clone, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_SetInputImage, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetOutput, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_GetModifiableOutput, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC2_Update, None, itkImageDuplicatorIUC2)
itkImageDuplicatorIUC2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUC2_swigregister
itkImageDuplicatorIUC2_swigregister(itkImageDuplicatorIUC2)
def itkImageDuplicatorIUC2___New_orig__() -> "itkImageDuplicatorIUC2_Pointer":
"""itkImageDuplicatorIUC2___New_orig__() -> itkImageDuplicatorIUC2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2___New_orig__()
def itkImageDuplicatorIUC2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC2 *":
"""itkImageDuplicatorIUC2_cast(itkLightObject obj) -> itkImageDuplicatorIUC2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC2_cast(obj)
class itkImageDuplicatorIUC3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUC3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUC3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUC3_Pointer":
"""Clone(itkImageDuplicatorIUC3 self) -> itkImageDuplicatorIUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_Clone(self)
def SetInputImage(self, _arg: 'itkImageUC3') -> "void":
"""SetInputImage(itkImageDuplicatorIUC3 self, itkImageUC3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUC3 *":
"""
GetOutput(itkImageDuplicatorIUC3 self) -> itkImageUC3
GetOutput(itkImageDuplicatorIUC3 self) -> itkImageUC3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUC3 *":
"""GetModifiableOutput(itkImageDuplicatorIUC3 self) -> itkImageUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUC3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUC3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUC3
Create a new object of the class itkImageDuplicatorIUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUC3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_Clone, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_SetInputImage, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetOutput, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_GetModifiableOutput, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUC3_Update, None, itkImageDuplicatorIUC3)
itkImageDuplicatorIUC3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUC3_swigregister
itkImageDuplicatorIUC3_swigregister(itkImageDuplicatorIUC3)
def itkImageDuplicatorIUC3___New_orig__() -> "itkImageDuplicatorIUC3_Pointer":
"""itkImageDuplicatorIUC3___New_orig__() -> itkImageDuplicatorIUC3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3___New_orig__()
def itkImageDuplicatorIUC3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUC3 *":
"""itkImageDuplicatorIUC3_cast(itkLightObject obj) -> itkImageDuplicatorIUC3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUC3_cast(obj)
class itkImageDuplicatorIUL2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUL2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUL2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUL2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUL2_Pointer":
"""Clone(itkImageDuplicatorIUL2 self) -> itkImageDuplicatorIUL2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_Clone(self)
def SetInputImage(self, _arg: 'itkImageUL2') -> "void":
"""SetInputImage(itkImageDuplicatorIUL2 self, itkImageUL2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUL2 *":
"""
GetOutput(itkImageDuplicatorIUL2 self) -> itkImageUL2
GetOutput(itkImageDuplicatorIUL2 self) -> itkImageUL2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUL2 *":
"""GetModifiableOutput(itkImageDuplicatorIUL2 self) -> itkImageUL2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUL2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUL2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUL2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUL2
Create a new object of the class itkImageDuplicatorIUL2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUL2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUL2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUL2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUL2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_Clone, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_SetInputImage, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetOutput, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_GetModifiableOutput, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL2_Update, None, itkImageDuplicatorIUL2)
itkImageDuplicatorIUL2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUL2_swigregister
itkImageDuplicatorIUL2_swigregister(itkImageDuplicatorIUL2)
def itkImageDuplicatorIUL2___New_orig__() -> "itkImageDuplicatorIUL2_Pointer":
"""itkImageDuplicatorIUL2___New_orig__() -> itkImageDuplicatorIUL2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2___New_orig__()
def itkImageDuplicatorIUL2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL2 *":
"""itkImageDuplicatorIUL2_cast(itkLightObject obj) -> itkImageDuplicatorIUL2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL2_cast(obj)
class itkImageDuplicatorIUL3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUL3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUL3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUL3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUL3_Pointer":
"""Clone(itkImageDuplicatorIUL3 self) -> itkImageDuplicatorIUL3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_Clone(self)
def SetInputImage(self, _arg: 'itkImageUL3') -> "void":
"""SetInputImage(itkImageDuplicatorIUL3 self, itkImageUL3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUL3 *":
"""
GetOutput(itkImageDuplicatorIUL3 self) -> itkImageUL3
GetOutput(itkImageDuplicatorIUL3 self) -> itkImageUL3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUL3 *":
"""GetModifiableOutput(itkImageDuplicatorIUL3 self) -> itkImageUL3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUL3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUL3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUL3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUL3
Create a new object of the class itkImageDuplicatorIUL3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUL3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUL3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUL3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUL3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_Clone, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_SetInputImage, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetOutput, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_GetModifiableOutput, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUL3_Update, None, itkImageDuplicatorIUL3)
itkImageDuplicatorIUL3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUL3_swigregister
itkImageDuplicatorIUL3_swigregister(itkImageDuplicatorIUL3)
def itkImageDuplicatorIUL3___New_orig__() -> "itkImageDuplicatorIUL3_Pointer":
"""itkImageDuplicatorIUL3___New_orig__() -> itkImageDuplicatorIUL3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3___New_orig__()
def itkImageDuplicatorIUL3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUL3 *":
"""itkImageDuplicatorIUL3_cast(itkLightObject obj) -> itkImageDuplicatorIUL3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUL3_cast(obj)
class itkImageDuplicatorIUS2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUS2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUS2_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUS2_Pointer":
"""Clone(itkImageDuplicatorIUS2 self) -> itkImageDuplicatorIUS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_Clone(self)
def SetInputImage(self, _arg: 'itkImageUS2') -> "void":
"""SetInputImage(itkImageDuplicatorIUS2 self, itkImageUS2 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUS2 *":
"""
GetOutput(itkImageDuplicatorIUS2 self) -> itkImageUS2
GetOutput(itkImageDuplicatorIUS2 self) -> itkImageUS2
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUS2 *":
"""GetModifiableOutput(itkImageDuplicatorIUS2 self) -> itkImageUS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUS2 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUS2
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS2 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUS2
Create a new object of the class itkImageDuplicatorIUS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUS2.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_Clone, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_SetInputImage, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetOutput, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_GetModifiableOutput, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS2_Update, None, itkImageDuplicatorIUS2)
itkImageDuplicatorIUS2_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUS2_swigregister
itkImageDuplicatorIUS2_swigregister(itkImageDuplicatorIUS2)
def itkImageDuplicatorIUS2___New_orig__() -> "itkImageDuplicatorIUS2_Pointer":
"""itkImageDuplicatorIUS2___New_orig__() -> itkImageDuplicatorIUS2_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2___New_orig__()
def itkImageDuplicatorIUS2_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS2 *":
"""itkImageDuplicatorIUS2_cast(itkLightObject obj) -> itkImageDuplicatorIUS2"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS2_cast(obj)
class itkImageDuplicatorIUS3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIUS3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIUS3_Pointer":
"""__New_orig__() -> itkImageDuplicatorIUS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIUS3_Pointer":
"""Clone(itkImageDuplicatorIUS3 self) -> itkImageDuplicatorIUS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_Clone(self)
def SetInputImage(self, _arg: 'itkImageUS3') -> "void":
"""SetInputImage(itkImageDuplicatorIUS3 self, itkImageUS3 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageUS3 *":
"""
GetOutput(itkImageDuplicatorIUS3 self) -> itkImageUS3
GetOutput(itkImageDuplicatorIUS3 self) -> itkImageUS3
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageUS3 *":
"""GetModifiableOutput(itkImageDuplicatorIUS3 self) -> itkImageUS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIUS3 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIUS3
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS3 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIUS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIUS3
Create a new object of the class itkImageDuplicatorIUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIUS3.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_Clone, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_SetInputImage, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetOutput, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_GetModifiableOutput, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIUS3_Update, None, itkImageDuplicatorIUS3)
itkImageDuplicatorIUS3_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIUS3_swigregister
itkImageDuplicatorIUS3_swigregister(itkImageDuplicatorIUS3)
def itkImageDuplicatorIUS3___New_orig__() -> "itkImageDuplicatorIUS3_Pointer":
"""itkImageDuplicatorIUS3___New_orig__() -> itkImageDuplicatorIUS3_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3___New_orig__()
def itkImageDuplicatorIUS3_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIUS3 *":
"""itkImageDuplicatorIUS3_cast(itkLightObject obj) -> itkImageDuplicatorIUS3"""
return _itkImageDuplicatorPython.itkImageDuplicatorIUS3_cast(obj)
class itkImageDuplicatorIVF22(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF22 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF22_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF22_Pointer":
"""Clone(itkImageDuplicatorIVF22 self) -> itkImageDuplicatorIVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF22') -> "void":
"""SetInputImage(itkImageDuplicatorIVF22 self, itkImageVF22 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF22 *":
"""
GetOutput(itkImageDuplicatorIVF22 self) -> itkImageVF22
GetOutput(itkImageDuplicatorIVF22 self) -> itkImageVF22
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF22 *":
"""GetModifiableOutput(itkImageDuplicatorIVF22 self) -> itkImageVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF22 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF22
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF22 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF22
Create a new object of the class itkImageDuplicatorIVF22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF22.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_Clone, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_SetInputImage, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetOutput, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_GetModifiableOutput, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF22_Update, None, itkImageDuplicatorIVF22)
itkImageDuplicatorIVF22_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF22_swigregister
itkImageDuplicatorIVF22_swigregister(itkImageDuplicatorIVF22)
def itkImageDuplicatorIVF22___New_orig__() -> "itkImageDuplicatorIVF22_Pointer":
"""itkImageDuplicatorIVF22___New_orig__() -> itkImageDuplicatorIVF22_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22___New_orig__()
def itkImageDuplicatorIVF22_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF22 *":
"""itkImageDuplicatorIVF22_cast(itkLightObject obj) -> itkImageDuplicatorIVF22"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF22_cast(obj)
class itkImageDuplicatorIVF23(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF23 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF23_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF23_Pointer":
"""Clone(itkImageDuplicatorIVF23 self) -> itkImageDuplicatorIVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF23') -> "void":
"""SetInputImage(itkImageDuplicatorIVF23 self, itkImageVF23 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF23 *":
"""
GetOutput(itkImageDuplicatorIVF23 self) -> itkImageVF23
GetOutput(itkImageDuplicatorIVF23 self) -> itkImageVF23
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF23 *":
"""GetModifiableOutput(itkImageDuplicatorIVF23 self) -> itkImageVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF23 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF23
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF23 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF23
Create a new object of the class itkImageDuplicatorIVF23 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF23.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF23.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF23.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF23.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_Clone, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_SetInputImage, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetOutput, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_GetModifiableOutput, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF23_Update, None, itkImageDuplicatorIVF23)
itkImageDuplicatorIVF23_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF23_swigregister
itkImageDuplicatorIVF23_swigregister(itkImageDuplicatorIVF23)
def itkImageDuplicatorIVF23___New_orig__() -> "itkImageDuplicatorIVF23_Pointer":
"""itkImageDuplicatorIVF23___New_orig__() -> itkImageDuplicatorIVF23_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23___New_orig__()
def itkImageDuplicatorIVF23_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF23 *":
"""itkImageDuplicatorIVF23_cast(itkLightObject obj) -> itkImageDuplicatorIVF23"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF23_cast(obj)
class itkImageDuplicatorIVF32(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF32 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF32_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF32_Pointer":
"""Clone(itkImageDuplicatorIVF32 self) -> itkImageDuplicatorIVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF32') -> "void":
"""SetInputImage(itkImageDuplicatorIVF32 self, itkImageVF32 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF32 *":
"""
GetOutput(itkImageDuplicatorIVF32 self) -> itkImageVF32
GetOutput(itkImageDuplicatorIVF32 self) -> itkImageVF32
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF32 *":
"""GetModifiableOutput(itkImageDuplicatorIVF32 self) -> itkImageVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF32 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF32
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF32 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF32
Create a new object of the class itkImageDuplicatorIVF32 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF32.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF32.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF32.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF32.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_Clone, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_SetInputImage, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetOutput, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_GetModifiableOutput, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF32_Update, None, itkImageDuplicatorIVF32)
itkImageDuplicatorIVF32_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF32_swigregister
itkImageDuplicatorIVF32_swigregister(itkImageDuplicatorIVF32)
def itkImageDuplicatorIVF32___New_orig__() -> "itkImageDuplicatorIVF32_Pointer":
"""itkImageDuplicatorIVF32___New_orig__() -> itkImageDuplicatorIVF32_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32___New_orig__()
def itkImageDuplicatorIVF32_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF32 *":
"""itkImageDuplicatorIVF32_cast(itkLightObject obj) -> itkImageDuplicatorIVF32"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF32_cast(obj)
class itkImageDuplicatorIVF33(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF33 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF33_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF33_Pointer":
"""Clone(itkImageDuplicatorIVF33 self) -> itkImageDuplicatorIVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF33') -> "void":
"""SetInputImage(itkImageDuplicatorIVF33 self, itkImageVF33 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF33 *":
"""
GetOutput(itkImageDuplicatorIVF33 self) -> itkImageVF33
GetOutput(itkImageDuplicatorIVF33 self) -> itkImageVF33
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF33 *":
"""GetModifiableOutput(itkImageDuplicatorIVF33 self) -> itkImageVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF33 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF33
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF33 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF33
Create a new object of the class itkImageDuplicatorIVF33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF33.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_Clone, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_SetInputImage, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetOutput, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_GetModifiableOutput, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF33_Update, None, itkImageDuplicatorIVF33)
itkImageDuplicatorIVF33_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF33_swigregister
itkImageDuplicatorIVF33_swigregister(itkImageDuplicatorIVF33)
def itkImageDuplicatorIVF33___New_orig__() -> "itkImageDuplicatorIVF33_Pointer":
"""itkImageDuplicatorIVF33___New_orig__() -> itkImageDuplicatorIVF33_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33___New_orig__()
def itkImageDuplicatorIVF33_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF33 *":
"""itkImageDuplicatorIVF33_cast(itkLightObject obj) -> itkImageDuplicatorIVF33"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF33_cast(obj)
class itkImageDuplicatorIVF42(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF42 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF42_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF42_Pointer":
"""Clone(itkImageDuplicatorIVF42 self) -> itkImageDuplicatorIVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF42') -> "void":
"""SetInputImage(itkImageDuplicatorIVF42 self, itkImageVF42 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF42 *":
"""
GetOutput(itkImageDuplicatorIVF42 self) -> itkImageVF42
GetOutput(itkImageDuplicatorIVF42 self) -> itkImageVF42
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF42 *":
"""GetModifiableOutput(itkImageDuplicatorIVF42 self) -> itkImageVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF42 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF42
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF42 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF42
Create a new object of the class itkImageDuplicatorIVF42 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF42.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF42.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF42.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF42.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_Clone, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_SetInputImage, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetOutput, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_GetModifiableOutput, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF42_Update, None, itkImageDuplicatorIVF42)
itkImageDuplicatorIVF42_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF42_swigregister
itkImageDuplicatorIVF42_swigregister(itkImageDuplicatorIVF42)
def itkImageDuplicatorIVF42___New_orig__() -> "itkImageDuplicatorIVF42_Pointer":
"""itkImageDuplicatorIVF42___New_orig__() -> itkImageDuplicatorIVF42_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42___New_orig__()
def itkImageDuplicatorIVF42_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF42 *":
"""itkImageDuplicatorIVF42_cast(itkLightObject obj) -> itkImageDuplicatorIVF42"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF42_cast(obj)
class itkImageDuplicatorIVF43(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkImageDuplicatorIVF43 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkImageDuplicatorIVF43_Pointer":
"""__New_orig__() -> itkImageDuplicatorIVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkImageDuplicatorIVF43_Pointer":
"""Clone(itkImageDuplicatorIVF43 self) -> itkImageDuplicatorIVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_Clone(self)
def SetInputImage(self, _arg: 'itkImageVF43') -> "void":
"""SetInputImage(itkImageDuplicatorIVF43 self, itkImageVF43 _arg)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_SetInputImage(self, _arg)
def GetOutput(self, *args) -> "itkImageVF43 *":
"""
GetOutput(itkImageDuplicatorIVF43 self) -> itkImageVF43
GetOutput(itkImageDuplicatorIVF43 self) -> itkImageVF43
"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetOutput(self, *args)
def GetModifiableOutput(self) -> "itkImageVF43 *":
"""GetModifiableOutput(itkImageDuplicatorIVF43 self) -> itkImageVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetModifiableOutput(self)
def Update(self) -> "void":
"""Update(itkImageDuplicatorIVF43 self)"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_Update(self)
__swig_destroy__ = _itkImageDuplicatorPython.delete_itkImageDuplicatorIVF43
def cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF43 *":
"""cast(itkLightObject obj) -> itkImageDuplicatorIVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkImageDuplicatorIVF43
Create a new object of the class itkImageDuplicatorIVF43 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkImageDuplicatorIVF43.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkImageDuplicatorIVF43.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkImageDuplicatorIVF43.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
def __internal_call__(self):
"""Create an object, update with the inputs and
attributes, and return the result.
The syntax is the same as the one used in New().
Update() is ran once the input are changed, and
the current output.
"""
self.Update()
return self.GetOutput()
itkImageDuplicatorIVF43.Clone = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_Clone, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.SetInputImage = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_SetInputImage, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.GetOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetOutput, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.GetModifiableOutput = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_GetModifiableOutput, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43.Update = new_instancemethod(_itkImageDuplicatorPython.itkImageDuplicatorIVF43_Update, None, itkImageDuplicatorIVF43)
itkImageDuplicatorIVF43_swigregister = _itkImageDuplicatorPython.itkImageDuplicatorIVF43_swigregister
itkImageDuplicatorIVF43_swigregister(itkImageDuplicatorIVF43)
def itkImageDuplicatorIVF43___New_orig__() -> "itkImageDuplicatorIVF43_Pointer":
"""itkImageDuplicatorIVF43___New_orig__() -> itkImageDuplicatorIVF43_Pointer"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43___New_orig__()
def itkImageDuplicatorIVF43_cast(obj: 'itkLightObject') -> "itkImageDuplicatorIVF43 *":
"""itkImageDuplicatorIVF43_cast(itkLightObject obj) -> itkImageDuplicatorIVF43"""
return _itkImageDuplicatorPython.itkImageDuplicatorIVF43_cast(obj)
def image_duplicator(*args, **kwargs):
"""Procedural interface for ImageDuplicator"""
import itk
instance = itk.ImageDuplicator.New(*args, **kwargs)
return instance.__internal_call__()
def image_duplicator_init_docstring():
import itk
import itkTemplate
if isinstance(itk.ImageDuplicator, itkTemplate.itkTemplate):
image_duplicator.__doc__ = itk.ImageDuplicator.values()[0].__doc__
else:
image_duplicator.__doc__ = itk.ImageDuplicator.__doc__
|
[
"44883043+ssalmaan@users.noreply.github.com"
] |
44883043+ssalmaan@users.noreply.github.com
|
a6ddd8d4f9f38b972820df093ba01dd54e608881
|
d701f0f5027c77fa41b0a9209b8989b9d0316aa8
|
/brain_games/logic/prime.py
|
fc01cab602a8fff9789a05a778f877bafe972ecd
|
[] |
no_license
|
DenisTrukhin/python-project-lvl1
|
8d4c548bbb2bffed117340b8b480fade01cd044c
|
a4c6f8286b3e0dc8f0fcbbd848f54a8293cc4a85
|
refs/heads/main
| 2023-01-31T20:39:58.337152
| 2020-12-16T14:21:12
| 2020-12-16T14:24:31
| 311,041,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
import random
def is_prime(n):
if n % 2 == 0:
return n == 2
d = 3
while d ** 2 <= n and n % d != 0:
d += 2
return d ** 2 > n
def prime():
positive_answer = 'yes'
negative_answer = 'no'
number = random.randint(1, 1000)
expression = f'{number}'
correct_answer = positive_answer if is_prime(number) else negative_answer
answer_type = str
return expression, correct_answer, answer_type
|
[
"denis.trukhin@balance-pl.ru"
] |
denis.trukhin@balance-pl.ru
|
d11816bf434a2bc2cc962cfc510a0957f1ccd0a4
|
04f210ca1ec260a9fd1fa24a55876098a63203ba
|
/6GetParameters2.py
|
8cb6b0427ba39fb2c47d6dc8418baca39921a09a
|
[] |
no_license
|
FocusYangQ/PythonSpider
|
bc757bf68fbea2c7d8ce8ff3a7500fbbd1a93c9f
|
e5d349efe4dd894bec688662a773e4756f466cd0
|
refs/heads/master
| 2023-08-24T22:27:46.400172
| 2021-10-15T13:08:09
| 2021-10-15T13:08:09
| 415,536,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
import requests
if __name__ == '__main__' :
data = {
"name" : "hezhi" ,
"age" : 20
}
response = requests.get ( "http://httpbin.org/get" , params = data )
print ( response.status_code )
print ( response.text )
|
[
"35205142+FocusYangQ@users.noreply.github.com"
] |
35205142+FocusYangQ@users.noreply.github.com
|
ce7fe6a703bd4648ba39e7da18929b3abdecddae
|
1cf84b1c4e0bfc3145b758879e5c4648fab8024d
|
/7.5. Filter out 53 copies of the products to be tested in the table.py
|
c7504bd073b3b26981a001341db332d037c1d306
|
[
"Apache-2.0"
] |
permissive
|
zy-yao/Clothes-Matching-on-Taobao
|
8d8e9c73c8f429748ec89cffec9e1ea8d20e1810
|
1910c25bd764495021c3a4e2f6a11c5cdb56f15f
|
refs/heads/master
| 2023-01-30T15:19:46.948461
| 2020-12-03T15:07:46
| 2020-12-03T15:07:46
| 276,325,917
| 0
| 1
| null | 2020-07-16T09:35:53
| 2020-07-01T08:49:20
|
Python
|
UTF-8
|
Python
| false
| false
| 366
|
py
|
import pandas as pd
for i in range(0,53):
d = pd.read_csv('/Users/YAO/.spyder-py3/ForPython/new method/rule1_weekresult3/result'+str(i+1)+'_3.csv')
testItems = pd.read_csv('testItems.csv')
predict = pd.merge(testItems,d)
predict.to_csv('/Users/YAO/.spyder-py3/ForPython/new method/rule1_weekresult_last/result'+str(i+1)+'last.csv',index = False)
|
[
"33860709+zy-yao@users.noreply.github.com"
] |
33860709+zy-yao@users.noreply.github.com
|
b8fbc89b00c608ef7d1a47c1ca35b6688318a5ea
|
2776f806297ae2f05d6c6bcbf2205ed8eb3b9db8
|
/ico/tests/contracts/test_require_customer_id.py
|
3dfa23e0d176551f9efd672c9a4c5a07982db7b1
|
[
"Apache-2.0"
] |
permissive
|
ZOLTbyZENUM/ico
|
138207db242053ded62ecc9a4f7d273209232a3f
|
26e4ae717e5f04a3f41f32f5f52f7dddedaac65d
|
refs/heads/master
| 2022-12-12T16:01:40.922647
| 2018-02-28T12:47:33
| 2018-02-28T12:47:33
| 123,442,497
| 0
| 0
|
NOASSERTION
| 2022-12-08T00:42:32
| 2018-03-01T14:02:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
"""Customer id tracking."""
import uuid
import pytest
from ethereum.tester import TransactionFailed
from eth_utils import to_wei
from ico.tests.utils import time_travel
from ico.state import CrowdsaleState
from sha3 import keccak_256
from rlp.utils import decode_hex
@pytest.fixture
def crowdsale(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig):
"""Set up a crowdsale with customer id require policy."""
uncapped_flatprice.transact({"from": team_multisig}).setRequireCustomerId(True)
return uncapped_flatprice
@pytest.fixture
def token(uncapped_token):
"""Token contract we are buying."""
return uncapped_token
@pytest.fixture
def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:
"""Generate UUID v4 customer id as a hex string."""
customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4
return customer_id
def test_only_owner_change_change_policy(crowdsale, customer):
"""Only owner change change customerId required policy."""
with pytest.raises(TransactionFailed):
crowdsale.transact({"from": customer}).setRequireCustomerId(False)
def test_participate_with_customer_id(chain, crowdsale, customer, customer_id, token):
"""Buy tokens with a proper customer id."""
time_travel(chain, crowdsale.call().startsAt() + 1)
wei_value = to_wei(1, "ether")
assert crowdsale.call().getState() == CrowdsaleState.Funding
checksumbyte = keccak_256(decode_hex(format(customer_id, 'x').zfill(32))).digest()[:1]
crowdsale.transact({"from": customer, "value": wei_value}).buyWithCustomerIdWithChecksum(customer_id, checksumbyte)
# We got credited
assert token.call().balanceOf(customer) > 0
# We have tracked the investor id
events = crowdsale.pastEvents("Invested").get()
assert len(events) == 1
e = events[0]
assert e["args"]["investor"] == customer
assert e["args"]["weiAmount"] == wei_value
assert e["args"]["customerId"] == customer_id
def test_participate_missing_customer_id(chain, crowdsale, customer, customer_id, token):
"""Cannot bypass customer id process."""
time_travel(chain, crowdsale.call().startsAt() + 1)
wei_value = to_wei(1, "ether")
assert crowdsale.call().getState() == CrowdsaleState.Funding
with pytest.raises(TransactionFailed):
crowdsale.transact({"from": customer, "value": wei_value}).buy()
|
[
"mikko@opensourcehacker.com"
] |
mikko@opensourcehacker.com
|
01bed1080979fef67de4c80a741868bc5950a4d9
|
8ba01e6790ce6d0a3e25b3e05281dd1aa64d2f2f
|
/classification/lightgbm.py
|
77fc6f9f85991a596e3440ead795b2810d5ce5af
|
[] |
no_license
|
weineng-zhou/MachineLearning
|
4a358026627a693f3297d2b770e699d351bfc15c
|
30e327154619096fa9dc6aba3142f7d8cc50c4d6
|
refs/heads/master
| 2022-06-14T11:33:13.014329
| 2020-05-04T07:58:07
| 2020-05-04T07:58:07
| 261,112,109
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,130
|
py
|
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
from sklearn.model_selection import train_test_split
from lightgbm import LGBMClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
import itertools
from sklearn.metrics import confusion_matrix
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve
from scipy import interp
from sklearn.model_selection import StratifiedKFold
import os
try:
os.mkdir("output")
except FileExistsError:
pass
# global variable
date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
author = 'Weineng Zhou'
ML_method = 'Light Gredient Boosting Machine'
abbreviate = 'LGBM'
dev_data = './data/drug_molecule.xlsx'
val_data = './data/validation.xlsx'
class_names = ['非有效药物', '有效药物']
t1 = datetime.datetime.now()
print('开始时间:', t1.strftime('%Y-%m-%d %H:%M:%S'))
###############################################################################
# load the data
a = pd.read_excel('{}'.format(dev_data))
a = np.array(a)
#get independent variable
X = a[:,:-1]
#get the dependent variable
y = a[:,-1]
#split the data at a ratio of 4:1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, \
random_state=0)
# y_train = np.ravel(y_train)
# default model
model = LGBMClassifier()
n_estimators = [100, 200, 300]
learning_rate = [0.01, 0.1, 1]
# Set the parameters by 10-fold cross-validation
tuned_parameters = {'n_estimators': n_estimators,
'learning_rate': learning_rate}
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
classifier = GridSearchCV(model, tuned_parameters, cv=10,
scoring='%s_macro' % score)
classifier.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(classifier.best_params_)
print()
print("Grid scores on development set:")
print()
means = classifier.cv_results_['mean_test_score']
stds = classifier.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, classifier.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, classifier.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# set up the best parameters
classifier = LGBMClassifier(n_estimators=300, learning_rate=0.1, num_leaves=31)
# fit the model
classifier.fit(X_train, y_train)
# predic by X_train
y_train_pred = classifier.predict(X_train)
# predict by X_test
y_test_pred = classifier.predict(X_test)
#the label of confusion matrix
class_names = np.array(class_names)
# plot the confusion matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('真实标签')
plt.xlabel('预测标签')
plt.show()
# calculate the training set confusion matrix
cnf_matrix = confusion_matrix(y_train, y_train_pred)
np.set_printoptions(precision=2)
# without normalization confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,title='训练集')
plt.savefig('./output/train_matrix_{}.tiff'.format(abbreviate), dpi=500)
plt.show()
# calculate the test set confusion matrix
cnf_matrix = confusion_matrix(y_test, y_test_pred)
np.set_printoptions(precision=2)
# without normalization confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,title='测试集')
plt.savefig('./output/test_matrix_{}.tiff'.format(abbreviate), dpi=500)
plt.show()
###############################################################################
# 10-fold cross validation for accuracy
from sklearn.model_selection import cross_val_score
import seaborn as sns
scores = cross_val_score(model, X, y, cv=10)
scores
scores.mean()
print(scores)
print(scores.mean())
scores_df = pd.DataFrame(scores)
name = ['LGBM']*10
name_df = pd.DataFrame(name)
M = pd.concat([name_df, scores_df], axis=1) #横向拼接数据框
M.columns=['Model', 'Accuracy']
M.to_excel('./output/{}_Accuracy.xlsx'.format(abbreviate), index=False)
sns.boxplot(data=M, x = 'Model', y = 'Accuracy', color='#00b8e5')
plt.savefig("./output/boxplot.tiff", dpi=600)
###############################################################################
# the predicted label of validation
b = pd.read_excel('{}'.format(val_data))
#get independent variable
b = np.array(b)
#get independent variable
X_val = b[:,:-1]
#get the dependent variable
y_val = b[:,-1]
#predict the X_val
y_val_pred = classifier.predict(X_val)
#output the label of validation
y_val_pred_df = pd.DataFrame(y_val_pred)
y_val_pred_df.to_excel('./output/label_{}.xlsx'.format(abbreviate))
###############################################################################
# plot the confusion matrix
#calculate the validation set confusion matrix
cnf_matrix = confusion_matrix(y_val, y_val_pred)
np.set_printoptions(precision=2)
#without normalization confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, title='验证集')
plt.savefig('./output/val_matrix_{}.tiff'.format(abbreviate), dpi=500)
###############################################################################
ax = plt.gca()
lgbc_disp = plot_roc_curve(classifier, X_train, y_train, ax=ax, color='#00bc57', lw=2, alpha=0.8)
#ROC plot of training set
ax.plot([0, 1], [0, 1], color='#00bc57', lw=2, linestyle='--')
# ax.xlim([0.0, 1.0])
# ax.ylim([0.0, 1.05])
ax.set(xlim=[-0.05, 1.05],
ylim=[-0.05, 1.05],
xlabel='False Positive Rate (1-Specificity)',
ylabel='Ture Positive Rate (Sensitivity)',
title="ROC curve for training set")
ax.legend(loc="lower right")
ax.legend(loc="lower right")
plt.savefig('./output/roc_train_{}.tiff'.format(abbreviate), dpi=500)
plt.show()
ax = plt.gca()
lgbc_disp = plot_roc_curve(classifier, X_test, y_test, ax=ax, color='#00b8e5', lw=2, alpha=0.8)
#ROC plot of training set
ax.plot([0, 1], [0, 1], color='#00bc57', lw=2, linestyle='--')
# ax.xlim([0.0, 1.0])
# ax.ylim([0.0, 1.05])
ax.set(xlim=[-0.05, 1.05],
ylim=[-0.05, 1.05],
xlabel='False Positive Rate (1-Specificity)',
ylabel='Ture Positive Rate (Sensitivity)',
title="ROC curve for test set")
ax.legend(loc="lower right")
plt.savefig('./output/roc_test_{}.tiff'.format(abbreviate), dpi=500)
plt.show()
ax = plt.gca()
lgbc_disp = plot_roc_curve(classifier, X_val, y_val, ax=ax, color='#ff7f0e', lw=2, alpha=0.8)
#ROC plot of training set
ax.plot([0, 1], [0, 1], color='#00bc57', lw=2, linestyle='--')
# ax.xlim([0.0, 1.0])
# ax.ylim([0.0, 1.05])
ax.set(xlim=[-0.05, 1.05],
ylim=[-0.05, 1.05],
xlabel='False Positive Rate (1-Specificity)',
ylabel='Ture Positive Rate (Sensitivity)',
title="ROC curve for validation set")
ax.legend(loc="lower right")
plt.savefig('./output/roc_val_{}.tiff'.format(abbreviate), dpi=500)
plt.show()
###############################################################################
# 10-fold cross validation for ROC
#load the data
a = pd.read_excel('{}'.format(dev_data))
a = np.array(a)
#get independent variable
X = a[:,:-1]
#get the dependent variable
y = a[:,-1]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# #############################################################################
# Classification and ROC analysis
cv = StratifiedKFold(n_splits=10)
classifier = LGBMClassifier(n_estimators=300, learning_rate=0.1, random_state=0)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X[train], y[train])
viz = plot_roc_curve(classifier, X[test], y[test],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05],
ylim=[-0.05, 1.05],
xlabel='False Positive Rate (1-Specificity)',
ylabel='Ture Positive Rate (Sensitivity)',
title=''.format(ML_method))
ax.legend(loc="lower right")
plt.savefig('./output/roc_crossval_{}.tiff'.format(abbreviate), dpi=500)
plt.show()
###############################################################################
# 开始时间
print('开始时间:', t1.strftime('%Y-%m-%d %H:%M:%S'))
# 结束时间
t2 = datetime.datetime.now()
print('结束时间:', t2.strftime('%Y-%m-%d %H:%M:%S'))
delta = t2 - t1
if delta.seconds > 3600:
if t1.strftime('%Y-%m-%d %H:%M:%S')[-2:] < t2.strftime('%Y-%m-%d %H:%M:%S')[-2:]:
print('总共耗时:'
+ str(int(round(delta.seconds / 3600, 0))) + '时'
+ str(int(round(delta.seconds / 60, 0) % 60)) + '分'
+ str(delta.seconds % 60) + '秒')
elif t1.strftime('%Y-%m-%d %H:%M:%S')[-2:] == t2.strftime('%Y-%m-%d %H:%M:%S')[-2:]:
print('总共耗时:'
+ str(int(round(delta.seconds / 3600, 0))) + '时'
+ str(int(round(delta.seconds / 60, 0) % 60)) + '分'
+ '0秒')
elif t1.strftime('%Y-%m-%d %H:%M:%S')[-2:] > t2.strftime('%Y-%m-%d %H:%M:%S')[-2:]:
print('总共耗时:'
+ str(int(round(delta.seconds / 3600, 0))) + '时'
+ str(int(round(delta.seconds / 60, 0) % 60)-1) + '分'
+ str(delta.seconds % 60) + '秒')
elif delta.seconds > 60:
if t1.strftime('%Y-%m-%d %H:%M:%S')[-2:] < t2.strftime('%Y-%m-%d %H:%M:%S')[-2:]:
print('总共耗时:' + str(int(round(delta.seconds / 60, 0))) + '分'
+ str(delta.seconds % 60) + '秒')
elif t1.strftime('%Y-%m-%d %H:%M:%S')[-2:] == t2.strftime('%Y-%m-%d %H:%M:%S')[-2:]:
print('总共耗时:' + str(int(round(delta.seconds / 60, 0))) + '分'
+ '0秒')
elif t1.strftime('%Y-%m-%d %H:%M:%S')[-2:] > t2.strftime('%Y-%m-%d %H:%M:%S')[-2:]:
print('总共耗时:' + str(int(round(delta.seconds / 60, 0))-1) + '分'
+ str(delta.seconds % 60 +1) + '秒')
else:
print('总共耗时:' + str(delta.seconds) + '秒')
|
[
"zwn911208@163.com"
] |
zwn911208@163.com
|
a51f49b8ad549be6578aed13784e031c214b6f48
|
03f23bb43a8161d4479486db4f0cf4df5ead360c
|
/search/services.py
|
74a44bf460b9d45c4c2920333046ff63c6a5bf2a
|
[] |
no_license
|
nalimhahs/shopnani-backend
|
6f070440b64d389090da05307f18bec82e32b537
|
d6ebf193fa1f57b18a5aa7a94f166e18a0fa162f
|
refs/heads/master
| 2022-12-11T16:17:24.029075
| 2019-08-24T07:52:01
| 2019-08-24T07:52:01
| 195,423,400
| 0
| 0
| null | 2022-04-22T22:13:08
| 2019-07-05T14:31:09
|
Python
|
UTF-8
|
Python
| false
| false
| 719
|
py
|
import os
import requests
# The function accepts a query and passes it to the flipkart api.
# The results are then converted to json and then returned.
# The affiliate ID and Token are stored as environment variables to prevent unintentional exposure.
def getResultsService(query):
affiliateId = os.environ.get('DJANGO_FK_AFFILIATE_ID')
affiliateToken = os.environ.get('DJANGO_FK_AFFILIATE_TOKEN')
url = 'https://affiliate-api.flipkart.net/affiliate/1.0/search.json'
headers = {"Fk-Affiliate-Id": affiliateId,
"Fk-Affiliate-Token": affiliateToken}
params = {'query': query, 'resultCount': 10}
r = requests.get(url=url, headers=headers, params=params)
results = r.json()
return results
|
[
"milanshah1@gmail.com"
] |
milanshah1@gmail.com
|
24a0d4647a4f5c7bfff17bb405ead7562f3e47a3
|
48caf249eaa70e9f71f9b94696316af62284d036
|
/how bad did you fail your test.py
|
32c190787aa77a4d25b72cbad2ebc52fcae8a799
|
[] |
no_license
|
wannabruh/Python2020-08-17-minecraft-coding-
|
e4d6194fd241277c06ff8ee2a3003c3e7ad45e40
|
5252bace5b642758d32f8df4dc2160c36c586d8a
|
refs/heads/master
| 2022-12-03T10:52:54.774328
| 2020-08-17T08:52:36
| 2020-08-17T08:52:36
| 288,129,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
math = int(input("math"))
eng = int(input("eng"))
if math >=0 and math <=100 and eng >=0 and eng <=100:
if math >=90 and eng >=90:
print("eeeeeeeee")
if math >=80 and eng >=80:
print("teeeeeeeee")
if math >=70 and eng >=70:
print("reeeeeeeee")
if math >=60 and eng >=60:
print("REEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE")
else:
print("bruh REEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE")
|
[
"noreply@github.com"
] |
wannabruh.noreply@github.com
|
633fdf3de990a8d52a022b237ae5b520e5aea688
|
ee968d0cb10c6fc57cfc53e45ac9d758e82f7749
|
/TSA/ABSAPyTorch/temp.py
|
e228ef4d514b86865b2cadb926f6fab0799c5a9f
|
[
"MIT"
] |
permissive
|
jakeyap/Stance-Detection
|
12f86bdfb2078ae32eec9d28201d04e3e421c806
|
2cb98bf84f7303bc7f436b38084a2b5c32a0ea8e
|
refs/heads/master
| 2022-11-19T09:47:19.441536
| 2020-06-02T17:34:05
| 2020-06-02T17:34:05
| 281,282,637
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 14:00:39 2020
@author: lweiren
"""
from twitter import *
import os
import tweepy
import datetime
import sys
import time
CONSUMER_KEY="A2FjDYLSKc8tfvGLcexZywK35"
CONSUMER_SECRET='kDR9HRGR54otDoAqp7XWFFbRIadUmG9g9li7aaY12qOnKQt2oK'
MY_TWITTER_CREDS = "/home/lweiren/TSA/ABSAPyTorch/.my_app_credentials"
if not os.path.exists(MY_TWITTER_CREDS):
oauth_dance("Semeval sentiment analysis", CONSUMER_KEY, CONSUMER_SECRET, MY_TWITTER_CREDS)
oauth_token, oauth_secret = read_token_file(MY_TWITTER_CREDS)
tobj = Twitter(auth=OAuth(oauth_token, oauth_secret, CONSUMER_KEY, CONSUMER_SECRET))
# =============================================================================
# auth = tweepy.AppAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
# api = tweepy.API(auth)
# # test authentication
# for tweet in tweepy.Cursor(api.search, q = "tweepy").items(10):
# print(tweet.text)
# =============================================================================
l = []
i = 90000
pages = i / 100
max_id = -1
since_id = 0
for n in range(1):
try:
if(n == 0):
searchresult = tobj.search.tweets(q = "Donald Trump lang:en -filter:retweets", count = 100, until = "2019-03-10")
#since_id = searchresult["search_metadata"]["since_id"]
else:
searchresult = tobj.search.tweets(q = "Donald Trump lang:en -filter:retweets", count = 100 , max_id = max_id, until= "2019-03-10")
lis = [tweet["id"] for tweet in searchresult["statuses"]]
max_id = lis[-1]
#max_id = searchresult["search_metadata"]["max_id"]
#print(searchresult["search_metadata"])
#print(max_id)
l.extend(lis)
except:
rate = tobj.application.rate_limit_status()
reset = rate['resources']['statuses']['/statuses/show/:id']['reset']
now = datetime.datetime.today()
future = datetime.datetime.fromtimestamp(reset)
seconds = (future-now).seconds+1
if seconds < 10000:
sys.stderr.write("Rate limit exceeded, sleeping for %s seconds until %s\n" % (seconds, future))
time.sleep(seconds)
length= len(set(l))
|
[
"lweiren@instance-1.asia-southeast1-b.c.model-augury-245215.internal"
] |
lweiren@instance-1.asia-southeast1-b.c.model-augury-245215.internal
|
ce69a986c534d70a5aa60a0025175768ba380815
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/clouds_20200703155257.py
|
611b20c61a4e392632a8177a3ecf3c4d6ae86dde
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
def jumpingClouds(c):
i = 0
jumps = 0
while i < len(c)-2:
if c[i+2] == 0:
print('c------>',i,c[i])
print('here1')
jumps +=1
elif c[i+1] == 0:
print('here')
jumps +=1
i +=1
print(jumps)
jumpingClouds([0,0,1,0,0,1,0])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
e41960c52ed9f1d8f4899297c7aa4df4e18f5413
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/6/og-.py
|
e881559c8f292114595ae1314a66e46d1d5952e6
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'oG-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
3ea0bb442577424dd93a06877b4cb480971dc827
|
d7f4e330f5d803c8cd495729fd86da61b89565f3
|
/torch/_meta_registrations.py
|
0511b5188fbea63e9c0427f06428dc9859aa3885
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
awf/pytorch
|
55ff84549c17579a1f62910ef2ac7b1dcd6fa897
|
0dceaf07cd1236859953b6f85a61dc4411d10f87
|
refs/heads/master
| 2023-02-08T13:19:22.073279
| 2023-01-29T10:36:40
| 2023-01-29T10:36:43
| 239,372,903
| 0
| 0
|
NOASSERTION
| 2020-02-09T20:55:23
| 2020-02-09T20:55:22
| null |
UTF-8
|
Python
| false
| false
| 82,649
|
py
|
import math
from typing import List, Optional, Union
import torch
import torch._prims_common as utils
from torch import Tensor
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
from torch._ops import OpOverload
from torch._prims import _elementwise_meta, ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND
from torch._prims_common import (
check,
corresponding_complex_dtype,
corresponding_real_dtype,
elementwise_dtypes,
ELEMENTWISE_TYPE_PROMOTION_KIND,
FloatLike,
IntLike,
make_contiguous_strides_for,
)
from torch._prims_common.wrappers import out_wrapper
from torch._refs import _broadcast_shapes
from torch._subclasses.fake_tensor import check_no_bool_index_tensors
from torch.utils._pytree import tree_map
aten = torch.ops.aten
_meta_lib_dont_use_me_use_register_meta = torch.library.Library("aten", "IMPL", "Meta")
def register_meta(op):
def wrapper(fn):
def register(op):
_add_op_to_registry(meta_table, op, fn)
tree_map(register, op)
return fn
return wrapper
def toRealValueType(dtype):
from_complex = {
torch.complex32: torch.half,
torch.cfloat: torch.float,
torch.cdouble: torch.double,
}
return from_complex.get(dtype, dtype)
@register_meta([aten._fft_c2c.default, aten._fft_c2c.out])
@out_wrapper()
def meta_fft_c2c(self, dim, normalization, forward):
assert self.dtype.is_complex
return self.new_empty(self.size())
@register_meta([aten._fft_r2c.default, aten._fft_r2c.out])
@out_wrapper()
def meta_fft_r2c(self, dim, normalization, onesided):
assert self.dtype.is_floating_point
output_sizes = list(self.size())
if onesided:
last_dim = dim[-1]
last_dim_halfsize = (output_sizes[last_dim] // 2) + 1
output_sizes[last_dim] = last_dim_halfsize
return self.new_empty(
output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype)
)
@register_meta(aten.randperm.generator_out)
def meta_randperm(n, *, generator=None, out):
assert out.ndim == 1 and out.size(0) == n
return out
@register_meta(aten.randint.default)
def meta_randint(
high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.randint.low)
def meta_randint_low(
low, high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.rand.default)
def meta_rand_default(size, *, dtype=None, layout=None, device=None, pin_memory=None):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta([aten._fft_c2r.default, aten._fft_c2r.out])
@out_wrapper()
def meta_fft_c2r(self, dim, normalization, lastdim):
assert self.dtype.is_complex
output_sizes = list(self.size())
output_sizes[dim[-1]] = lastdim
return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype))
@register_meta(aten.copy_.default)
def meta_copy_(self, src, non_blocking=False):
return self
def inferUnsqueezeGeometry(tensor, dim):
result_sizes = list(tensor.size())
result_strides = list(tensor.stride())
new_stride = 1 if dim >= tensor.dim() else result_sizes[dim] * result_strides[dim]
result_sizes.insert(dim, 1)
result_strides.insert(dim, new_stride)
return result_sizes, result_strides
@register_meta(aten.unsqueeze_.default)
def meta_unsqueeze_(self, dim):
dim = maybe_wrap_dim(dim, self.dim() + 1)
g_sizes, g_strides = inferUnsqueezeGeometry(self, dim)
self.as_strided_(g_sizes, g_strides)
return self
# Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py
@register_meta(aten.index_select.default)
def meta_index_select(self, dim, index):
result_size = list(self.size())
if self.dim() > 0:
result_size[dim] = index.numel()
return self.new_empty(result_size)
@register_meta(aten.index_select.out)
def meta_index_select_out(self, dim, index, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.index_select(self, dim, index))
@register_meta([aten.max.default, aten.max.unary_out])
@out_wrapper()
def meta_max(self):
return self.new_empty(())
@register_meta(aten.max.dim)
def meta_max_dim(self, dim, keepdim=False):
dim = utils.reduction_dims(self.shape, (dim,))
output_shape = _compute_reduction_shape(self, dim, keepdim)
return (
self.new_empty(output_shape),
self.new_empty(output_shape, dtype=torch.long),
)
@register_meta([aten.min.default])
def meta_min(self):
return self.new_empty(())
@register_meta(aten.angle.default)
def meta_angle(self):
if self.is_complex():
result_dtype = corresponding_real_dtype(self.dtype)
else:
_, result_dtype = elementwise_dtypes(
self, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
return torch.empty_like(self, dtype=result_dtype)
@register_meta(aten.angle.out)
def meta_angle_out(self, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.angle(self))
# From aten/src/ATen/native/LinearAlgebraUtils.h
def squareCheckInputs(self: Tensor, f_name: str):
assert (
self.dim() >= 2
), f"{f_name}: The input tensor must have at least 2 dimensions."
assert self.size(-1) == self.size(
-2
), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices"
# From aten/src/ATen/native/LinearAlgebraUtils.h
def checkFloatingOrComplex(
t: Tensor, f_name: str, allow_low_precision_dtypes: bool = True
):
dtype = t.dtype
check(
t.is_floating_point() or t.is_complex(),
lambda: f"{f_name}, : Expected a floating point or complex tensor as input. Got , {dtype}",
)
if allow_low_precision_dtypes:
check(
dtype in (torch.float, torch.double, torch.cfloat, torch.cdouble),
lambda: f"{f_name} : Low precision dtypes not supported. Got {dtype}",
)
# From aten/src/ATen/native/LinearAlgebraUtils.h
def checkIsMatrix(A: Tensor, f_name: str, arg_name: str = "A"):
check(
A.dim() >= 2,
lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
)
def checkUplo(uplo: str):
uplo_uppercase = uplo.upper()
assert (
len(uplo) == 1 and uplo_uppercase == "U" or uplo_uppercase == "L"
), f"Expected UPLO argument to be 'L' or 'U', but got {uplo}"
# @register_meta(aten.linalg_eigh.default)
def meta_linalg_eigh(self, uplo="L"):
squareCheckInputs(self, "linalg_eigh")
checkUplo(uplo)
real_dtype = toRealValueType(self.dtype)
assert self.dim() >= 2
values = self.new_empty(self.shape, dtype=real_dtype)
values.transpose_(-2, -1)
vectors = self.new_empty(self.shape[:-1])
return (values, vectors)
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
@register_meta(aten.linalg_cholesky_ex.default)
def linalg_cholesky_ex(A: Tensor, upper: bool = False, check_errors: bool = False):
squareCheckInputs(A, "linalg.cholesky")
checkFloatingOrComplex(A, "linalg.cholesky")
A_shape = A.shape
ndim = len(A_shape)
# L
L_strides = make_contiguous_strides_for(A_shape, False)
L = A.new_empty(A_shape)
L.as_strided_(A_shape, L_strides)
# infos
infos = A.new_empty(A_shape[0 : ndim - 2], dtype=torch.int32)
return L, infos
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
@register_meta(aten.linalg_inv_ex.default)
def linalg_inv_ex_meta(A: Tensor, check_errors: bool = False):
squareCheckInputs(A, "linalg.inv_ex")
checkFloatingOrComplex(A, "linalg.inv_ex", allow_low_precision_dtypes=False)
L = A.new_empty(A.shape)
L.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
infos = A.new_empty(A.shape[:-2], dtype=torch.int32)
return L, infos
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
# NOTE: matching defaults in aten/src/ATen/native/native_functions.yaml
@register_meta(aten._linalg_svd.default)
def _linalg_svd_meta(
A: Tensor, full_matrices: bool = False, compute_uv: bool = True, driver: str = None
):
checkIsMatrix(A, "linalg.svd")
checkFloatingOrComplex(A, "linalg.svd")
batch_dims = list(A.shape[:-2])
m = A.shape[-2]
n = A.shape[-1]
k = min(m, n)
if compute_uv:
U_shape = batch_dims + [m, m if full_matrices else k]
U = A.new_empty(U_shape)
U.as_strided_(U_shape, make_contiguous_strides_for(U_shape, row_major=False))
V_shape = batch_dims + [n if full_matrices else k, n]
V = A.new_empty(V_shape)
# TODO: need to distinguish cuSOLVER case? (see original code)
V.as_strided_(V_shape, make_contiguous_strides_for(V_shape, row_major=False))
else:
# doesn't matter
U = A.new_empty([0])
V = A.new_empty([0])
# S is always real, even when A is complex.
S = A.new_empty(batch_dims + [k], dtype=toRealValueType(A.dtype))
return U, S, V
# From aten/src/ATen/native/LinearAlgebra.cpp
@register_meta(aten._linalg_det.default)
def _linalg_det_meta(A):
squareCheckInputs(A, "linalg.det")
checkFloatingOrComplex(A, "linalg.det")
det = A.new_empty(A.shape[:-2])
LU = A.new_empty(A.shape)
LU.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
pivots = A.new_empty(A.shape[:-1], dtype=torch.int32)
return det, LU, pivots
# From aten/src/ATen/native/ReflectionPad.cpp
@register_meta(
[aten.reflection_pad2d_backward.default, aten.replication_pad2d_backward.default]
)
def meta_pad2d_backward(grad_output, self, padding):
dim_w = 2
dim_h = 1
dim_plane = 0
nbatch = 1
self_shape = self.shape
if self.dim() == 4:
nbatch = self_shape[0]
dim_w += 1
dim_h += 1
dim_plane += 1
pad_l = padding[0]
pad_r = padding[1]
pad_t = padding[2]
pad_b = padding[3]
nplane = self_shape[dim_plane]
input_h = self_shape[dim_h]
input_w = self_shape[dim_w]
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
check(
output_w == grad_output.shape[dim_w],
lambda: f"gradOutput width unexpected. Expected: {output_w}, Got: {grad_output.shape[dim_w]}",
)
check(
output_h == grad_output.shape[dim_h],
lambda: f"gradOutput height unexpected. Expected: {output_h}, Got: {grad_output.shape[dim_h]}",
)
return self.new_empty(self.shape)
@register_meta(aten.reflection_pad2d.default)
def meta_pad2d(self, padding):
valid_dims = self.size(1) != 0 and self.size(2) != 0
check(
(self.ndim == 3 and valid_dims)
or (self.ndim == 4 and valid_dims and self.size(3) != 0),
lambda: f"3D or 4D (batch mode) tensor expected for input, but got: {self}",
)
if self.ndim == 4:
nbatch, nplane, input_h, input_w = self.shape
else:
nbatch = 1
nplane, input_h, input_w = self.shape
pad_l, pad_r, pad_t, pad_b = padding
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
if self.ndim == 3:
return self.new_empty((nplane, output_h, output_w))
else:
return self.new_empty((nbatch, nplane, output_h, output_w))
@register_meta([aten.bernoulli.default, aten.bernoulli.out])
@out_wrapper()
def meta_bernoulli(self, *, generator=None):
# https://github.com/pytorch/pytorch/issues/88612
return torch.empty_like(self).contiguous()
@register_meta(aten.bernoulli_.float)
def meta_bernoulli_(self, p=0.5, generator=None):
return self
@register_meta(aten.bernoulli.p)
def meta_bernoulli_p(self, p=0.5, generator=None):
# https://github.com/pytorch/pytorch/issues/88612
return torch.empty_like(self).contiguous()
@register_meta(aten._fused_moving_avg_obs_fq_helper.default)
def meta__fused_moving_avg_obs_fq_helper(
self,
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant=False,
symmetric_quant=False,
):
check(
ch_axis < self.dim(),
lambda: "Error in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()",
)
mask = torch.empty_like(self, dtype=torch.bool)
return (torch.empty_like(self), mask)
def dot_check(self, other):
check(
self.dim() == 1 and other.dim() == 1,
lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors",
)
@register_meta(aten.dot.default)
def meta_dot(self, tensor):
dot_check(self, tensor)
return self.new_empty(())
@register_meta([aten.mm.default])
def meta_mm(a, b):
check(a.dim() == 2, lambda: "a must be 2D")
check(b.dim() == 2, lambda: "b must be 2D")
N, M1 = a.shape
M2, P = b.shape
check(M1 == M2, lambda: "a and b must have same reduction dim")
return a.new_empty(N, P)
def _compute_reduction_shape(self, dims, keepdim):
if keepdim:
return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim))
return utils.compute_reduction_output_shape(self.shape, dims)
# FakeTensors (meta tensors with a device) will report device as meta
# when running meta kernels. Here, access the "fake device" of FakeTensor if it
# exists so meta kernels which have diverge per device will be more
# accurate when run with FakeTensors
def device_hint(tensor) -> "str":
if isinstance(tensor, torch._subclasses.FakeTensor):
return tensor.fake_device.type
else:
return "cuda" # default to cuda
def calc_conv_nd_return_shape(
input_tensor: torch.Tensor,
weight: torch.Tensor,
stride: Union[List[int], int],
padding: Union[List[int], int],
dilation: Union[List[int], int],
is_transposed: bool,
groups: int,
output_padding: Optional[Union[List[int], int]] = None,
):
def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
Returns:
The output length
"""
return (ln + 2 * p - d * (k - 1) - 1) // s + 1
def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
if transposed convolution is used.
See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
op: output padding in that dim
Returns:
The output length
"""
return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
kernel_size = weight.shape[2:]
dims = input_tensor.shape[2:]
if is_transposed:
out_channels = groups * weight.shape[1]
else:
out_channels = weight.shape[0]
if weight.shape[1] * groups != input_tensor.shape[1]:
raise RuntimeError("Invalid channel dimensions")
ret_shape = [input_tensor.shape[0], out_channels]
if isinstance(stride, IntLike):
stride = [stride] * len(dims)
elif len(stride) == 1:
stride = [stride[0]] * len(dims)
if isinstance(padding, IntLike):
padding = [padding] * len(dims)
elif len(padding) == 1:
padding = [padding[0]] * len(dims)
if isinstance(dilation, IntLike):
dilation = [dilation] * len(dims)
elif len(dilation) == 1:
dilation = [dilation[0]] * len(dims)
output_padding_list: Optional[List[int]] = None
if output_padding:
if isinstance(output_padding, IntLike):
output_padding_list = [output_padding] * len(dims)
elif len(output_padding) == 1:
output_padding_list = [output_padding[0]] * len(dims)
else:
output_padding_list = output_padding
for i in range(len(dims)):
# If output_padding is present, we are dealing with a transposed convolution
if output_padding_list:
ret_shape.append(
_formula_transposed(
dims[i],
padding[i],
dilation[i],
kernel_size[i],
stride[i],
output_padding_list[i],
)
)
else:
ret_shape.append(
_formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i])
)
return ret_shape
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
@register_meta(aten.convolution.default)
def meta_conv(
input_tensor: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
stride: List[int],
padding: List[int],
dilation: List[int],
is_transposed: bool,
output_padding: List[int],
groups: int,
):
def pick_memory_format():
if device_hint(input_tensor) == "cuda":
if is_channels_last(input_tensor) or is_channels_last(weight):
return torch.channels_last
else:
if is_channels_last(input_tensor):
return torch.channels_last
if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
shape_out = calc_conv_nd_return_shape(
input_tensor,
weight,
stride,
padding,
dilation,
is_transposed,
groups,
output_padding if is_transposed else None,
)
out = input_tensor.new_empty(shape_out)
out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
return out
if torch._C.has_mkldnn:
_meta_lib_dont_use_me_use_register_meta_for_mkldnn = torch.library.Library(
"mkldnn", "IMPL", "Meta"
)
def pick_mkldnn_conv_memory_format(input_tensor, weight):
if weight.is_mkldnn:
return torch.channels_last
if is_channels_last(input_tensor) or is_channels_last(weight):
return torch.channels_last
if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
@register_meta(torch.ops.mkldnn._convolution_pointwise.default)
def meta_mkldnn_convolution_default(
input_tensor,
weight,
bias,
padding,
stride,
dilation,
groups,
attr,
scalars,
algorithm,
):
shape_out = calc_conv_nd_return_shape(
input_tensor, weight, stride, padding, dilation, False, groups, []
)
out = input_tensor.new_empty(shape_out)
out_memory_format = torch.channels_last
out = out.to(memory_format=out_memory_format) # type: ignore[call-overload]
return out
@register_meta(torch.ops.mkldnn._convolution_pointwise.binary)
def meta_mkldnn_convolution_binary(
input_tensor,
other,
weight,
bias,
padding,
stride,
dilation,
groups,
binary_attr,
alpha,
unary_attr,
unary_scalars,
unary_algorithm,
):
out = input_tensor.new_empty(other.size())
out = out.to(memory_format=torch.channels_last) # type: ignore[call-overload]
return out
@register_meta(torch.ops.mkldnn._convolution_pointwise_.binary)
def meta_mkldnn_convolution_binary_inplace(
input_tensor,
other,
weight,
bias,
padding,
stride,
dilation,
groups,
binary_attr,
alpha,
unary_attr,
unary_scalars,
unary_algorithm,
):
return other
@register_meta(torch.ops.mkldnn._linear_pointwise.default)
def meta_linear_pointwise_default(
input_tensor, weight, bias, attr, scalars, algorithm
):
return input_tensor.new_empty((*input_tensor.shape[:-1], weight.shape[0]))
@register_meta(torch.ops.mkldnn._linear_pointwise.binary)
def meta_linear_pointwise_binary(input_tensor, other, weight, bias, attr):
out = input_tensor.new_empty(other.size())
return out
if torch._C.has_mkl:
_meta_lib_dont_use_me_use_register_meta_for_mkl = torch.library.Library(
"mkl", "IMPL", "Meta"
)
@register_meta(torch.ops.mkl._mkl_linear)
def meta_mkl_linear(
input_tensor,
packed_weight,
orig_weight,
bias,
batch_size,
):
return input_tensor.new_empty(
(*input_tensor.shape[:-1], orig_weight.shape[0])
)
# from check_dim_size() in aten/src/ATen/TensorUtils.cpp.
def check_dim_size(tensor, dim, dim_size, size):
check(
tensor.dim() == dim and tensor.shape[dim_size] == size,
lambda: f"Expected a tensor of dimension {dim} and tensor.size[{dim_size}] == {size}, "
+ f"but got : dimension {tensor.dim()} and tensor.size[{dim_size}] = {tensor.shape[dim_size]}",
)
@register_meta(aten.avg_pool2d.default)
def meta_avg_pool2d(
input,
kernel_size,
stride=(),
padding=(0,),
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
):
def unpack(name, val):
check(
len(val) in [1, 2],
lambda: f"avg_pool2d: {name} must either be a single int, or a tuple of two ints",
)
H = val[0]
W = H if len(val) == 1 else val[1]
return H, W
kH, kW = unpack("kernel_size", kernel_size)
check(
len(stride) in [0, 1, 2],
lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
if len(stride) == 0:
dH, dW = kH, kW
elif len(stride) == 1:
dH, dW = stride[0], stride[0]
else:
dH, dW = unpack("stride", stride)
padH, padW = unpack("padding", padding)
check(
divisor_override is None or divisor_override != 0,
lambda: "divisor must be not zero",
)
nbatch = input.size(-4) if input.dim() == 4 else 1
nInputPlane = input.size(-3)
inputHeight = input.size(-2)
inputWidth = input.size(-1)
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
memory_format = utils.suggest_memory_format(input)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
1,
1,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
)
if input.dim() == 3:
size = [nInputPlane, outputHeight, outputWidth]
else:
size = [nbatch, nInputPlane, outputHeight, outputWidth]
return torch.empty(
size, dtype=input.dtype, device=input.device, memory_format=memory_format
)
# from avg_pool2d_backward_shape_check() in aten/src/ATen/native/Pool.h.
def avg_pool2d_backward_shape_check(
input,
gradOutput,
nbatch,
kH,
kW,
dH,
dW,
padH,
padW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
):
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
1,
1,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
)
ndim = input.dim()
nOutputPlane = nInputPlane
check_dim_size(gradOutput, ndim, ndim - 3, nOutputPlane)
check_dim_size(gradOutput, ndim, ndim - 2, outputHeight)
check_dim_size(gradOutput, ndim, ndim - 1, outputWidth)
# Don't override the C++ registration.
@register_meta(aten.avg_pool2d_backward.default)
def meta_avg_pool2d_backward(
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
):
# From aten/src/ATen/native/AveragePool2d.cpp structured kernel meta func.
check(
len(kernel_size) == 1 or len(kernel_size) == 2,
lambda: "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints",
)
kH = kernel_size[0]
kW = kH if len(kernel_size) == 1 else kernel_size[1]
check(
len(stride) == 0 or len(stride) == 1 or len(stride) == 2,
lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
dH = kH if len(stride) == 0 else stride[0]
dW = kW if len(stride) == 0 else dH if len(stride) == 1 else stride[1]
check(
len(padding) == 1 or len(padding) == 2,
lambda: "avg_pool2d: padding must either be a single int, or a tuple of two ints",
)
padH = padding[0]
padW = padH if len(padding) == 1 else padding[1]
check(
divisor_override is None or divisor_override != 0,
lambda: "divisor must be not zero",
)
input_size = input.shape
nbatch = input_size[-4] if input.dim() == 4 else 1
nInputPlane = input_size[-3]
inputHeight = input_size[-2]
inputWidth = input_size[-1]
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
mem_format = utils.suggest_memory_format(input)
avg_pool2d_backward_shape_check(
input,
gradOutput_,
nbatch,
kH,
kW,
dH,
dW,
padH,
padW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
)
return torch.empty(
input_size, dtype=input.dtype, device=input.device, memory_format=mem_format
)
@register_meta(aten._adaptive_avg_pool2d.default)
def meta_adaptive_avg_pool2d(self, output_size):
check(
self.ndim == 3 or self.ndim == 4,
lambda: f"Expected 3D or 4D tensor, but got {self.shape}",
)
output_shape = self.shape[:-2] + tuple(output_size)
memory_format = utils.suggest_memory_format(self)
# need to set memory_format to preserve the memory format of the input
# channel last input should have channel last output
return torch.empty(
output_shape, dtype=self.dtype, device=self.device, memory_format=memory_format
)
@register_meta(aten._adaptive_avg_pool3d.default)
def meta_adaptive_avg_pool3d(self, output_size):
check(
self.ndim == 4 or self.ndim == 5,
lambda: f"Expected 4D or 5D tensor, but got {self.shape}",
)
return self.new_empty(self.shape[:-3] + tuple(output_size))
@register_meta(aten._adaptive_avg_pool2d_backward.default)
def meta__adaptive_avg_pool2d_backward(grad_out, self):
ndim = grad_out.ndim
for i in range(1, ndim):
check(
grad_out.size(i) > 0,
lambda: f"adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero \
size for non-batch dimensions, {grad_out.shape} with dimension {i} being empty",
)
check(
ndim == 3 or ndim == 4,
lambda: f"adaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got {self.shape}",
)
check(
self.dtype == grad_out.dtype,
lambda: f"expected dtype {self.dtype} for `grad_output` but got dtype {grad_out.dtype}",
)
return self.new_empty(self.shape)
@register_meta(aten.repeat_interleave.Tensor)
def meta_repeat_interleave_Tensor(repeats, output_size=None):
if output_size is None:
raise RuntimeError("cannot repeat_interleave a meta tensor without output_size")
return repeats.new_empty(output_size)
@register_meta([aten.complex.default, aten.complex.out])
@out_wrapper()
def meta_complex(real, imag):
assert real.dtype.is_floating_point
assert imag.dtype.is_floating_point
out_shape = _broadcast_shapes(real.shape, imag.shape)
return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype))
@register_meta(aten.vdot.default)
def vdot(self, other):
if not self.is_complex:
return torch.dot(self, other)
if self.is_conj():
if other.is_conj():
return torch.vdot(other.conj(), self.conj())
else:
return torch.dot(self.conj(), other)
elif other.is_conj():
return torch.dot(self, other.conj()).conj()
dot_check(self, other)
return self.new_empty(())
# Leaving this function around because a python implementation
# of indexing shape inference is useful,
# but not registering it to the dispatcher because we already
# get shape inference through structured kernels
@register_meta(aten.index.Tensor)
def meta_index_Tensor(self, indices):
check_no_bool_index_tensors(aten.index.Tensor, self, indices)
check(indices, lambda: "at least one index must be provided")
# aten::index is the internal advanced indexing implementation
# checkIndexTensorTypes and expandTensors
result: List[Optional[Tensor]] = []
for i, index in enumerate(indices):
if index is not None:
check(
index.dtype in [torch.long, torch.int, torch.int8, torch.bool],
lambda: "tensors used as indices must be long, int, byte or bool tensors",
)
if index.dtype in [torch.int8, torch.bool]:
nonzero = index.nonzero()
k = len(result)
check(
k + index.ndim <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim}",
IndexError,
)
for j in range(index.ndim):
check(
index.shape[j] == self.shape[k + j],
lambda: f"The shape of the mask {index.shape} at index {i} "
f"does not match the shape of the indexed tensor {self.shape} at index {k + j}",
IndexError,
)
result.append(nonzero.select(1, j))
else:
result.append(index)
else:
result.append(index)
indices = result
check(
len(indices) <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})",
)
# expand_outplace
import torch._refs as refs # avoid import cycle in mypy
indices = list(refs._maybe_broadcast(*indices))
# add missing null tensors
while len(indices) < self.ndim:
indices.append(None)
# hasContiguousSubspace
# true if all non-null tensors are adjacent
# See:
# https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing
# https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency
state = 0
has_contiguous_subspace = False
for index in indices:
if state == 0:
if index is not None:
state = 1
elif state == 1:
if index is None:
state = 2
else:
if index is not None:
break
else:
has_contiguous_subspace = True
# transposeToFront
# This is the logic that causes the newly inserted dimensions to show up
# at the beginning of the tensor, if they're not contiguous
if not has_contiguous_subspace:
dims = []
transposed_indices = []
for i, index in enumerate(indices):
if index is not None:
dims.append(i)
transposed_indices.append(index)
for i, index in enumerate(indices):
if index is None:
dims.append(i)
transposed_indices.append(index)
self = self.permute(dims)
indices = transposed_indices
# AdvancedIndex::AdvancedIndex
# Now we can assume the indices have contiguous subspace
# This is simplified from AdvancedIndex which goes to more effort
# to put the input and indices in a form so that TensorIterator can
# take them. If we write a ref for this, probably that logic should
# get implemented
before_shape: List[int] = []
after_shape: List[int] = []
replacement_shape: List[int] = []
for dim, index in enumerate(indices):
if index is None:
if replacement_shape:
after_shape.append(self.shape[dim])
else:
before_shape.append(self.shape[dim])
else:
replacement_shape = list(index.shape)
return self.new_empty(before_shape + replacement_shape + after_shape)
@register_meta([aten.convolution_backward.default])
def meta_convolution_backward(
grad_output_,
input_,
weight_,
bias_sizes_opt,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
output_mask,
):
# High level logic taken from slow_conv3d_backward_cpu which should
# be representative of all convolution_backward impls
backend_grad_input = None
backend_grad_weight = None
backend_grad_bias = None
if output_mask[0]:
backend_grad_input = grad_output_.new_empty(input_.size())
if output_mask[1]:
backend_grad_weight = grad_output_.new_empty(weight_.size())
if output_mask[2]:
backend_grad_bias = grad_output_.new_empty(bias_sizes_opt)
return (backend_grad_input, backend_grad_weight, backend_grad_bias)
@register_meta([aten.addbmm.default, aten.addbmm.out])
@out_wrapper()
def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1):
dim1 = batch1.size(1)
dim2 = batch2.size(2)
self = self.expand((dim1, dim2))
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
check(
batch1.size(0) == batch2.size(0),
lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}",
)
check(
batch1.size(2) == batch2.size(1),
lambda: (
f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} "
f"and {batch2.size(1)}x{batch2.size(2)})"
),
)
check(
self.size(0) == dim1 and self.size(1) == dim2,
lambda: "self tensor does not match matmul output shape",
)
return self.new_empty(self.size())
@register_meta(aten._cdist_forward.default)
def meta_cdist_forward(x1, x2, p, compute_mode):
check(
x1.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
)
check(
x2.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
)
check(
x1.size(-1) == x2.size(-1),
lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}",
)
check(
utils.is_float_dtype(x1.dtype),
lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
)
check(
utils.is_float_dtype(x2.dtype),
lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
)
check(p >= 0, lambda: "cdist only supports non-negative p values")
check(
compute_mode in (None, 1, 2),
lambda: f"possible modes: None, 1, 2, but was: {compute_mode}",
)
r1 = x1.size(-2)
r2 = x2.size(-2)
batch_tensor1 = x1.shape[:-2]
batch_tensor2 = x2.shape[:-2]
output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
output_shape.extend([r1, r2])
return x1.new_empty(output_shape)
@register_meta(aten._embedding_bag.default)
def meta_embedding_bag(
weight,
indices,
offsets,
scale_grad_by_freq=False,
mode=0,
sparse=False,
per_sample_weights=None,
include_last_offset=False,
padding_idx=-1,
):
check(
indices.dtype in (torch.long, torch.int),
lambda: f"expected indices to be long or int, got {indices.dtype}",
)
check(
offsets.dtype in (torch.long, torch.int),
lambda: f"expected offsets to be long or int, got {offsets.dtype}",
)
check(
utils.is_float_dtype(weight.dtype),
lambda: f"expected weight to be floating point type, got {weight.dtype}",
)
num_bags = offsets.size(0)
if include_last_offset:
check(
num_bags >= 1, lambda: "include_last_offset: numBags should be at least 1"
)
num_bags -= 1
output = weight.new_empty(num_bags, weight.size(1))
MODE_SUM, MODE_MEAN, MODE_MAX = range(3)
if per_sample_weights is not None:
check(
mode == MODE_SUM,
lambda: "embedding_bag: per_sample_weights only supported with mode='sum'",
)
check(
per_sample_weights.dtype == weight.dtype,
lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype",
)
check(
per_sample_weights.ndim == 1,
lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D",
)
check(
per_sample_weights.numel() == indices.numel(),
lambda: (
f"expected per_sample_weights.numel() ({per_sample_weights.numel()} "
f"to be the same as indices.numel() ({indices.numel()})"
),
)
def is_fast_path_index_select_scale(src, scale, output, padding_idx):
return (
is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1
)
def is_fast_path_index_select(src, output, padding_idx):
return (
(src.dtype == torch.float or src.dtype == torch.half)
and src.stride(1) == 1
and output.stride(1) == 1
and padding_idx < 0
)
def is_fast_path(src, scale, output, padding_idx):
if scale is not None:
return is_fast_path_index_select_scale(src, scale, output, padding_idx)
else:
return is_fast_path_index_select(src, output, padding_idx)
if device_hint(offsets) != "cpu":
offset2bag = indices.new_empty(indices.size(0))
bag_size = indices.new_empty(offsets.size())
if mode == MODE_MAX:
max_indices = indices.new_empty(num_bags, weight.size(1))
else:
max_indices = indices.new_empty(0)
else:
fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx)
if mode == MODE_MEAN or mode == MODE_MAX or not fast_path_sum:
offset2bag = offsets.new_empty(indices.size(0))
else:
offset2bag = offsets.new_empty(0)
bag_size = offsets.new_empty(num_bags)
# This part of the logic comes from make_max_indices_out in EmbeddingBag.cpp
numBags = offsets.shape[0]
if mode == MODE_MAX:
if include_last_offset:
check(
numBags >= 1,
lambda: "include_last_offset: numBags should be at least 1",
)
numBags -= 1
max_indices = offsets.new_empty(numBags, weight.shape[1])
else:
max_indices = offsets.new_empty(bag_size.size())
return output, offset2bag, bag_size, max_indices
@register_meta(aten._embedding_bag_forward_only.default)
def meta_embedding_bag_forward_only(weight, indices, offsets, *args):
output, offset2bag, bag_size, max_indices = meta_embedding_bag(
weight, indices, offsets, *args
)
if device_hint(offsets) == "cpu":
bag_size = offsets.new_empty(offsets.size())
return output, offset2bag, bag_size, max_indices
def _get_reduction_dtype(input, dtype, promote_int_to_long=True):
# if specified, dtype takes precedence
if dtype:
return dtype
if input.dtype.is_floating_point or input.dtype.is_complex:
return input.dtype
elif promote_int_to_long:
return torch.long
return input.dtype
@register_meta([aten.nansum.default, aten.nansum.out])
@out_wrapper()
def meta_nansum(input, dims=None, keepdim=False, *, dtype=None):
output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True)
dims = utils.reduction_dims(input.shape, dims)
output_shape = _compute_reduction_shape(input, dims, keepdim)
return input.new_empty(output_shape, dtype=output_dtype)
@register_meta(aten.nanmedian.default)
def meta_nanmedian(input):
output_shape = utils.compute_reduction_output_shape(
input.shape, tuple(range(input.dim()))
)
return input.new_empty(output_shape)
@register_meta([aten.nanmedian.dim, aten.nanmedian.dim_values])
@out_wrapper("values", "indices")
def meta_nanmedian_dim(input, dim=-1, keepdim=False):
dim = utils.reduction_dims(input.shape, (dim,))
output_shape = _compute_reduction_shape(input, dim, keepdim)
return (
input.new_empty(output_shape),
input.new_empty(output_shape, dtype=torch.long),
)
@register_meta(aten.logical_not_.default)
def meta_logical_not_(self):
return self
@register_meta(aten.repeat.default)
def meta_repeat(self, repeats):
check(
len(repeats) >= self.dim(),
lambda: "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor",
)
# Add new leading dimensions to the tensor if the
# number of target dimensions is larger than the
# number of source dimensions.
num_new_dimensions = len(repeats) - self.dim()
padded_size = (1,) * num_new_dimensions + tuple(self.shape)
target_size = [padded_size[i] * repeats[i] for i in range(len(repeats))]
return self.new_empty(target_size)
@register_meta(aten.zero_.default)
def meta_zero_(self):
return self
@register_meta(
[
aten.mul_.Scalar,
aten.div_.Scalar,
aten.mul_.Tensor,
aten.div_.Tensor,
aten.logical_and_.default,
aten.logical_or_.default,
aten.logical_xor_.default,
],
)
def meta_binop_inplace(self, other):
return self
@register_meta(
[
aten.add_.Scalar,
aten.sub_.Scalar,
aten.add_.Tensor,
aten.sub_.Tensor,
],
)
def meta_binop_inplace_alpha(self, other, alpha=1):
return self
@register_meta([aten.round.default, aten.round.decimals])
def meta_round(self, **kwargs):
return _elementwise_meta(
self, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT
)
@register_meta(aten.zero.default)
def meta_zero(self):
return self.new_empty(self.shape)
@register_meta([aten.fill_.Tensor, aten.fill_.Scalar])
def meta_fill_(self, val):
return self
@register_meta([aten.fill.Tensor, aten.fill.Scalar])
def meta_fill(self, val):
return torch.empty_like(self)
@register_meta(aten.relu_.default)
def meta_relu_(self):
return self
@register_meta(aten.index_put.default)
def meta_index_put(self, indices, values, accumulate=False):
return torch.empty_like(self)
@register_meta(aten.masked_fill_.Scalar)
def meta_masked_fill_(self, mask, value):
return self
@register_meta(aten.index_put_.default)
def meta_index_put_(self, indices, values, accumulate=False):
return self
@register_meta(aten.alias.default)
def meta_alias(self):
return self.view(self.shape)
def common_meta_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None):
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
batch1_sizes = batch1.size()
batch2_sizes = batch2.size()
bs = batch1_sizes[0]
contraction_size = batch1_sizes[2]
res_rows = batch1_sizes[1]
res_cols = batch2_sizes[2]
output_size = (bs, res_rows, res_cols)
check(
batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size,
lambda: f"Expected size for first two dimensions of batch2 tensor to be: [{bs}"
f", {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}].",
)
# TODO: handle out
output = batch2.new_empty(output_size)
if not is_bmm and self_baddbmm is not None:
check(self_baddbmm.dim() == 3, lambda: "self must be a 3D tensor")
check(
self_baddbmm.size() == output_size,
lambda: "Expected an input tensor shape with shape {output_size} but got shape: {self.size()}",
)
return output
@register_meta(aten.bmm.default)
def meta_bmm(self, mat2):
return common_meta_baddbmm_bmm(self, mat2, True)
def div_rtn(x, y):
q = x // y
r = x % y
# WARNING: explicit bool conversion here is necessary;
# would be fixed by SymBool
if r != 0 and (bool(r < 0) != bool(y < 0)):
q -= 1
return q
def pooling_output_shape_pad_lr(
inputSize, kernelSize, pad_l, pad_r, stride, dilation, ceil_mode
):
outputSize = (
div_rtn(
inputSize
+ pad_l
+ pad_r
- dilation * (kernelSize - 1)
- 1
+ (stride - 1 if ceil_mode else 0),
stride,
)
+ 1
)
if ceil_mode:
if (outputSize - 1) * stride >= inputSize + pad_l:
outputSize -= 1
return outputSize
def pooling_output_shape(inputSize, kernelSize, pad, stride, dilation, ceil_mode):
check(stride != 0, lambda: "stride should not be zero")
check(pad >= 0, lambda: f"pad must be non-negative, but got pad: {pad}")
check(
pad <= kernelSize // 2,
lambda: f"pad should be at most half of kernel size, but got pad={pad} and kernel_size={kernelSize}",
)
return pooling_output_shape_pad_lr(
inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode
)
def pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
):
ndim = input.dim()
nOutputPlane = nInputPlane
check(
kW > 0 and kH > 0,
lambda: "kernel size should be greater than zero, but got kH: {kH}, kW: {kW}",
)
check(
dW > 0 and dH > 0,
lambda: "stride should be greater than zero, but got dH: {dH}, dW: {dW}",
)
check(
dilationH > 0 and dilationW > 0,
lambda: "dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}",
)
valid_dims = input.size(1) != 0 and input.size(2) != 0
if memory_format == torch.channels_last:
check(
ndim == 4 and valid_dims and input.size(3) != 0,
lambda: "Expected 4D (batch mode) tensor expected for input with channels_last layout"
" with optional 0 dim batch size for input, but got: {input.size()}",
)
else:
check(
(ndim == 3 and input.size(0) != 0 and valid_dims)
or (ndim == 4 and valid_dims and input.size(3) != 0),
lambda: f"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: {input.size()}",
)
check(
kW // 2 >= padW and kH // 2 >= padH,
lambda: "pad should be smaller than or equal to half of kernel size, but got "
f"padW = {padW}, padH = {padH}, kW = {kW}, kH = {kH}",
)
check(
outputWidth >= 1 and outputHeight >= 1,
lambda: f"Given input size: ({nInputPlane}x{inputHeight}x{inputWidth}). "
f"Calculated output size: ({nOutputPlane}x{outputHeight}x{outputWidth}). "
"Output size is too small",
)
def max_pool2d_checks_and_compute_shape(
input, kernel_size, stride, padding, dilation, ceil_mode
):
# Reference: aten/src/ATen/native/DilatedMaxPool2d.cpp
def unpack(name, val):
check(
len(val) in [1, 2],
lambda: f"max_pool2d: {name} must either be a single int, or a tuple of two ints",
)
H = val[0]
W = H if len(val) == 1 else val[1]
return H, W
kH, kW = unpack("kernel_size", kernel_size)
check(
len(stride) in [0, 1, 2],
lambda: "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
if len(stride) == 0:
dH, dW = kH, kW
else:
dH, dW = unpack("stride", stride)
padH, padW = unpack("padding", padding)
dilationH, dilationW = unpack("dilation", dilation)
nInputPlane = input.size(-3)
inputHeight = input.size(-2)
inputWidth = input.size(-1)
memory_format = utils.suggest_memory_format(input)
if memory_format == torch.channels_last:
check(
input.dim() == 4,
lambda: "non-empty 4D (batch mode) tensor expected for input with channels_last layout",
)
elif memory_format == torch.contiguous_format:
check(
input.dim() in [3, 4],
lambda: "non-empty 3D or 4D (batch mode) tensor expected for input",
)
else:
check(
False,
lambda: "Unsupport memory format. Supports only ChannelsLast, Contiguous",
)
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
)
return nInputPlane, outputHeight, outputWidth
@register_meta(aten.max_pool2d_with_indices_backward.default)
def meta_max_pool2d_with_indices_backward(
grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices
):
nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
self, kernel_size, stride, padding, dilation, ceil_mode
)
check(
self.dtype == grad_output.dtype,
lambda: "expected dtype {self.dtype} for `gradOutput` but got dtype {grad_output.dtype}",
)
nOutputPlane = nInputPlane
ndim = self.ndim
def _check_dim_size(t):
check_dim_size(t, ndim, ndim - 3, nOutputPlane)
check_dim_size(t, ndim, ndim - 2, outputHeight)
check_dim_size(t, ndim, ndim - 1, outputWidth)
_check_dim_size(grad_output)
_check_dim_size(indices)
memory_format = utils.suggest_memory_format(self)
return torch.empty(
self.shape, dtype=self.dtype, device=self.device, memory_format=memory_format
)
@register_meta(aten.max_pool2d_with_indices.default)
def meta_max_pool2d_with_indices(
input, kernel_size, stride=(), padding=(0,), dilation=(1,), ceil_mode=False
):
nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
input, kernel_size, stride, padding, dilation, ceil_mode
)
nbatch = input.size(-4) if input.dim() == 4 else 1
memory_format = utils.suggest_memory_format(input)
if input.dim() == 3:
size = [nInputPlane, outputHeight, outputWidth]
else:
size = [nbatch, nInputPlane, outputHeight, outputWidth]
return (
torch.empty(
size, dtype=input.dtype, device=input.device, memory_format=memory_format
),
torch.empty(
size, dtype=torch.int64, device=input.device, memory_format=memory_format
),
)
@register_meta(aten.grid_sampler_2d_backward.default)
def grid_sampler_2d_backward_meta(
grad_output,
input,
grid,
interpolation_mode,
padding_mode,
align_corners,
output_mask,
):
input_requires_grad = output_mask[0]
if input_requires_grad:
grad_input = torch.zeros_like(input, memory_format=torch.contiguous_format)
else:
grad_input = None
grad_grid = torch.empty_like(grid, memory_format=torch.contiguous_format)
return (grad_input, grad_grid)
@register_meta([aten.full.default])
def full(size, fill_value, *args, **kwargs):
return torch.empty(size, *args, **kwargs)
@register_meta(
[
aten.randint_like.default,
aten.randint_like.low_dtype,
aten.randn_like.default,
aten.rand_like.default,
aten.full_like.default,
aten.ones_like.default,
]
)
def meta_like(self, *args, **kwargs):
return aten.empty_like.default(self, **kwargs)
# zeros_like is special cased to work for sparse
@register_meta(aten.zeros_like.default)
def zeros_like(
self, dtype=None, layout=None, device=None, pin_memory=None, memory_format=None
):
if layout == torch.sparse_coo:
check(
memory_format is None,
lambda: "memory format option is only supported by strided tensors",
)
res = torch.empty(
0,
dtype=self.dtype if dtype is None else dtype,
layout=layout,
device=self.device if device is None else device,
pin_memory=pin_memory,
)
if self.is_sparse:
res.sparse_resize_and_clear_(
self.size(), self.sparse_dim(), self.dense_dim()
)
else:
res.sparse_resize_and_clear_(self.size(), self.dim(), 0)
res._coalesced_(True)
return res
return aten.empty_like.default(
self,
dtype=dtype,
layout=layout,
device=device,
pin_memory=pin_memory,
memory_format=memory_format,
)
# hacky: Please remove after math.ceil works with arange
@register_meta(aten.arange.default)
def arange(end, **kwargs):
if isinstance(end, FloatLike):
end = math.ceil(end) # type: ignore[arg-type]
def is_integral(x):
return isinstance(x, IntLike) or isinstance(x, bool)
set_to_integral_dtype = kwargs.get("dtype", None) is None and is_integral(end)
if set_to_integral_dtype:
kwargs["dtype"] = torch.int64
return aten.empty([end], **kwargs)
@register_meta(aten.arange.start)
def arange_start(start, end, **kwargs):
return aten.arange(end - start, **kwargs)
@register_meta(aten.select.int)
def meta_select(self, dim, index):
ndim = self.dim()
check(
ndim != 0, lambda: "select() cannot be applied to a 0-dim tensor.", IndexError
)
dim = dim if dim >= 0 else dim + ndim
size = self.size(dim)
check(
not (-index > size or index >= size),
lambda: f"select(): index {index} out of range for tensor of size "
f"{self.size()} at dimension {dim}",
IndexError,
)
index = index if index >= 0 else index + size
new_size = list(self.size())
new_stride = list(self.stride())
new_storage_offset = self.storage_offset() + index * new_stride[dim]
del new_size[dim]
del new_stride[dim]
return self.as_strided(new_size, new_stride, new_storage_offset)
@register_meta(aten.select_scatter.default)
def meta_select_scatter(self, src, dim, index):
return utils.clone_preserve_strides(self)
@register_meta(aten.slice_scatter.default)
def meta_slice_scatter(self, src, dim=0, start=None, end=None, step=1):
return utils.clone_preserve_strides(self)
# TODO: Deduplicate this with canonicalize_dim
def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
if dim_post_expr <= 0:
assert wrap_scalar
dim_post_expr = 1
min = -dim_post_expr
max = dim_post_expr - 1
assert not (dim < min or dim > max), f"dim {dim} out of bounds ({min}, {max})"
if dim < 0:
dim += dim_post_expr
return dim
def ensure_nonempty_size(t, dim):
return 1 if t.dim() == 0 else t.shape[dim]
# From aten/src/ATen/native/ScatterGatherChecks.h
def gather_shape_check(self, dim, index):
self_dims = max(self.dim(), 1)
index_dims = max(index.dim(), 1)
check(
self_dims == index_dims,
lambda: "Index tensor must have the same number of dimensions as input tensor",
)
for i in range(self_dims):
if i != dim:
check(
ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
lambda: f"Size does not match at dimension {i} expected index {index.shape}"
+ f" to be smaller than self {self.shape} apart from dimension {dim}",
)
@register_meta(aten.gather.default)
def meta_gather(self, dim, index, sparse_grad=False):
wrapped_dim = maybe_wrap_dim(dim, self.dim())
is_index_empty = index.numel() == 0
if not is_index_empty:
check(
index.dtype == torch.long,
lambda: f"gather(): Expected dtype int64 for index, but got {index.dtype}",
)
gather_shape_check(self, wrapped_dim, index)
return self.new_empty(index.shape)
# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
def get_operator_enum(reduce_, use_new_options=False):
if use_new_options:
if reduce_ == "sum":
return "REDUCE_ADD"
elif reduce_ == "prod":
return "REDUCE_MULTIPLY"
elif reduce_ == "mean":
return "REDUCE_MEAN"
elif reduce_ == "amax":
return "REDUCE_MAXIMUM"
elif reduce_ == "amin":
return "REDUCE_MINIMUM"
check(
False,
lambda: "reduce argument must be either sum, prod, mean, amax or amin.",
)
return
else:
if reduce_ == "add":
return "REDUCE_ADD"
elif reduce_ == "multiply":
return "REDUCE_MULTIPLY"
check(False, lambda: "reduce argument must be either add or multiply.")
return
# From aten/src/ATen/native/ScatterGatherChecks.h
def scatter_gather_dtype_check(method_name, self, index, src_opt=None):
if index.numel() != 0:
check(
index.dtype == torch.long,
lambda: f"{method_name}(): Expected dtype int64 for index",
)
if src_opt is not None:
check(
self.dtype == src_opt.dtype,
lambda: f"{method_name}(): Expected self.dtype to be equal to src.dtype",
)
def ensure_nonempty_dim(dim):
return max(dim, 1)
# From aten/src/ATen/native/ScatterGatherChecks.h
def scatter_shape_check(self, dim, index, src_opt=None):
if index.numel() == 0:
return
check(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
lambda: "Index tensor must have the same number of dimensions as self tensor",
)
is_wrong_shape = False
self_dims = ensure_nonempty_dim(self.dim())
# Check: index.size(d) <= self.size(d) for all d != dim
for d in range(self_dims):
index_d_size = ensure_nonempty_size(index, d)
if d == dim:
continue
if index_d_size > ensure_nonempty_size(self, d):
is_wrong_shape = True
break
# Check: index.size(d) <= src.size(d) for all d if src is Tensor
if not is_wrong_shape and src_opt is not None:
for d in range(self_dims):
index_d_size = ensure_nonempty_size(index, d)
if index_d_size > ensure_nonempty_size(src_opt, d):
is_wrong_shape = True
break
if src_opt is not None:
check(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
lambda: "Index tensor must have the same number of dimensions as self tensor",
)
check(
not is_wrong_shape,
lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ f" apart from dimension {dim} and to be smaller than src {src_opt.shape}",
)
else:
check(
not is_wrong_shape,
lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ f" apart from dimension {dim}",
)
# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
def scatter_meta_impl(self, dim, index, src=None, reduce_=None, use_new_options=False):
wrapped_dim = maybe_wrap_dim(dim, self.dim())
scatter_gather_dtype_check("scatter", self, index, src)
scatter_shape_check(self, wrapped_dim, index, src)
if reduce_ is not None:
# Check if we have a valid reduce operator.
get_operator_enum(reduce_, use_new_options)
@register_meta(aten.scatter_add.default)
def meta_scatter_add(self, dim, index, src):
scatter_meta_impl(self, dim, index, src, "add")
return self.new_empty(self.shape)
@register_meta(aten.scatter_add_)
def meta_scatter_add_(self, dim, index, src):
scatter_meta_impl(self, dim, index, src, "add")
return self
@register_meta(
[
aten.scatter.src,
aten.scatter.value,
aten.scatter.reduce,
aten.scatter.value_reduce,
]
)
@out_wrapper()
def meta_scatter(self, dim, index, src_or_value, reduce=None):
src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
scatter_meta_impl(self, dim, index, src, reduce)
return self.new_empty(self.shape)
@register_meta(
[
aten.scatter_.src,
aten.scatter_.value,
aten.scatter_.reduce,
aten.scatter_.value_reduce,
]
)
def meta_scatter_(self, dim, index, src_or_value, reduce=None):
src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
scatter_meta_impl(self, dim, index, src, reduce)
return self
@register_meta(
[
aten._scaled_dot_product_flash_attention,
]
)
def meta__scaled_dot_product_flash(
query: Tensor,
key: Tensor,
value: Tensor,
dropout_p: float = 0.0,
is_causal: bool = False,
):
batch_size = query.size(0)
num_heads = query.size(1)
max_seqlen_batch_q = query.size(2)
head_dim = query.size(3)
max_seqlen_batch_k = key.size(2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
Nnz_q = batch_size * max_seqlen_batch_q
output = torch.empty(
(Nnz_q, num_heads, head_dim), dtype=query.dtype, device=query.device
)
ouput = output.view(batch_size, max_seqlen_batch_q, num_heads, head_dim).transpose(
1, 2
)
max_seqlen_q = math.ceil(max_seqlen_batch_q / 16) * 16
logsumexp = torch.empty(
(batch_size, num_heads, max_seqlen_q),
dtype=torch.float,
device=query.device,
)
is_sm80 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)
is_sm75 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)
head_size_rounded = 64 if head_dim <= 64 else 128
blocksize_c = (
128
if (head_size_rounded == 128 and (dropout_p != 0.0 or not is_sm80))
or (is_sm75 and head_size_rounded == 64 and dropout_p != 0.0)
else 256
)
max_seqlen_k = math.ceil(max_seqlen_batch_k / blocksize_c) * blocksize_c
if max_seqlen_k <= 128:
max_seqlen_k = 128
elif max_seqlen_k <= 256:
max_seqlen_k = 256
return ouput, logsumexp
@register_meta(
[
aten._scaled_dot_product_efficient_attention,
]
)
def meta__scaled_dot_product_efficient(
query: Tensor,
key: Tensor,
value: Tensor,
compute_log_sumexp: bool,
is_causal: bool = False,
):
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
B = query.size(0)
M = query.size(1)
N = key.size(1)
num_heads = query.size(-2)
K = query.size(-1)
Kv = value.size(-1)
res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device)
logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
logsum_exp = torch.empty(
(B, num_heads, logsumexp_dim),
dtype=torch.float,
device=query.device,
)
res = res.transpose(1, 2)
return res, logsum_exp
@register_meta(
[
aten._scaled_dot_product_efficient_attention_backward,
]
)
def meta__scaled_dot_product_efficient_backward(
grad_out: Tensor,
query: Tensor,
key: Tensor,
value: Tensor,
out: Tensor,
logsumexp: Tensor,
is_causal: bool = False,
chunk_grad_outputs=False,
):
grad_out = grad_out.transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
B = query.size(0)
M = query.size(1)
N = key.size(1)
nH = query.size(2)
K = query.size(3)
grad_kv_needs_init = is_causal and N > M
if chunk_grad_outputs:
chunk = torch.empty((B, M, 3, nH, K), dtype=query.dtype, device=query.device)
grad_q = chunk.select(2, 0)
grad_k = chunk.select(2, 1)
grad_v = chunk.select(2, 2)
else:
grad_q = torch.empty(query.shape, dtype=query.dtype, device=query.device)
grad_k = (
torch.zeros(key.shape, dtype=key.dtype, device=key.device)
if grad_kv_needs_init
else torch.empty(key.shape, dtype=key.dtype, device=key.device)
)
grad_v = (
torch.zeros(value.shape, dtype=value.dtype, device=value.device)
if grad_kv_needs_init
else torch.empty(value.shape, dtype=value.dtype, device=value.device)
)
return grad_q.transpose(1, 2), grad_k.transpose(1, 2), grad_v.transpose(1, 2)
@register_meta([aten.scatter_reduce.two, aten.scatter_reduce.two_out])
@out_wrapper()
def meta_scatter_reduce_two(self, dim, index, src, reduce, include_self=True):
scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
return self.new_empty(self.shape)
@register_meta(aten.scatter_reduce_.two)
def meta_scatter_reduce__two(self, dim, index, src, reduce, include_self=True):
scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
return self
def multiply_integers(vs):
r = 1
for v in vs:
r *= v
return r
def upsample_common_check(input_size, output_size, num_spatial_dims):
check(
len(output_size) == num_spatial_dims,
lambda: f"It is expected output_size equals to {num_spatial_dims}, but got size {len(output_size)}",
)
expected_input_dims = num_spatial_dims + 2 # N, C, ...
check(
len(input_size) == expected_input_dims,
lambda: f"It is expected input_size equals to {expected_input_dims}, but got size {len(input_size)}",
)
check(
all([s > 0 for s in input_size[2:]]) and all([s > 0 for s in output_size]),
lambda: f"Input and output sizes should be greater than 0, but got "
f"input size {input_size} and output size {output_size}",
)
nbatch, channels = input_size[:2]
return (nbatch, channels, *output_size)
@register_meta(aten.upsample_nearest1d.default)
def upsample_nearest1d(input, output_size, scales=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 3D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=1
)
return input.new_empty(full_output_size).to(
memory_format=utils.suggest_memory_format(input)
)
@register_meta(aten.upsample_nearest2d.default)
def upsample_nearest2d(input, output_size, scales_h=None, scales_w=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=2
)
output = input.new_empty(full_output_size)
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(input)
# following "heuristic: only use channels_last path when it's faster than the contiguous path"
_, n_channels, _, _ = input.shape
if input.device.type == "cuda" and n_channels < 4:
memory_format = torch.contiguous_format
output = output.contiguous(memory_format=memory_format)
return output
@register_meta(aten.upsample_nearest3d.default)
def upsample_nearest3d(input, output_size, scales_d=None, scales_h=None, scales_w=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 5D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=3
)
return input.new_empty(full_output_size).to(
memory_format=utils.suggest_memory_format(input)
)
@register_meta([aten.sort.default, aten.sort.stable])
def meta_sort(self, stable=None, dim=-1, descending=False):
return torch.empty_like(self), torch.empty_like(self, dtype=torch.int64)
def rnn_cell_checkSizes(
input_gates, hidden_gates, input_bias, hidden_bias, factor, prev_hidden
):
check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2")
check(
input_gates.shape == hidden_gates.shape,
lambda: f"{input_gates.shape} != {hidden_gates.shape}",
)
gates_size = input_gates.size(1)
if input_bias is not None:
check(input_bias.ndim == 1, lambda: f"{input_bias.ndim} != 1")
check(
input_bias.numel() == gates_size,
lambda: f"{input_bias.numel()} != {gates_size}",
)
check(
input_bias.shape == hidden_bias.shape,
lambda: f"{input_bias.shape} != {hidden_bias.shape}",
)
check(prev_hidden.ndim == 2, lambda: f"{prev_hidden.ndim} != 2")
expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor
check(
prev_hidden.numel() == expected_prev_hidden_numel,
lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected_prev_hidden_numel})",
)
check(
all(
x.device == input_gates.device
for x in [hidden_gates, input_bias, hidden_bias, prev_hidden]
),
lambda: "expected all inputs to be same device",
)
@register_meta(aten._thnn_fused_lstm_cell.default)
def _thnn_fused_lstm_cell_meta(
input_gates, hidden_gates, cx, input_bias=None, hidden_bias=None
):
rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx)
workspace = torch.empty_like(input_gates, memory_format=torch.contiguous_format)
hy = torch.empty_like(cx, memory_format=torch.contiguous_format)
cy = torch.empty_like(cx, memory_format=torch.contiguous_format)
return (hy, cy, workspace)
@register_meta(aten._cudnn_rnn.default)
def _cudnn_rnn(
input,
weight,
weight_stride0,
weight_buf,
hx,
cx,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
):
is_input_packed = len(batch_sizes) != 0
if is_input_packed:
seq_length = len(batch_sizes)
mini_batch = batch_sizes[0]
batch_sizes_sum = input.shape[0]
else:
seq_length = input.shape[1] if batch_first else input.shape[0]
mini_batch = input.shape[0] if batch_first else input.shape[1]
batch_sizes_sum = -1
num_directions = 2 if bidirectional else 1
out_size = proj_size if proj_size != 0 else hidden_size
if is_input_packed:
out_shape = [batch_sizes_sum, out_size * num_directions]
else:
out_shape = (
[mini_batch, seq_length, out_size * num_directions]
if batch_first
else [seq_length, mini_batch, out_size * num_directions]
)
output = input.new_empty(out_shape)
cell_shape = [num_layers * num_directions, mini_batch, hidden_size]
if cx is None:
cy = torch.empty(0, device=input.device)
else:
cy = cx.new_empty(cell_shape)
hy = hx.new_empty([num_layers * num_directions, mini_batch, out_size])
# TODO: Query cudnnGetRNNTrainingReserveSize (expose to python)
reserve_shape = 0 if train else 0
reserve = input.new_empty(reserve_shape, dtype=torch.uint8)
return output, hy, cy, reserve, weight_buf
@register_meta(aten.mkldnn_rnn_layer.default)
def mkldnn_rnn_layer(
input,
w0,
w1,
w2,
w3,
hx_,
cx_,
reverse,
batch_sizes,
mode,
hidden_size,
num_layers,
has_biases,
bidirectional,
batch_first,
train,
):
seq_length = input.shape[1] if batch_first else input.shape[0]
mini_batch = input.shape[0] if batch_first else input.shape[1]
output_chanels = hidden_size
out_shape = (
[mini_batch, seq_length, output_chanels]
if batch_first
else [seq_length, mini_batch, output_chanels]
)
output = input.new_empty(out_shape)
if hx_ is None:
hy = torch.empty(0, device=input.device)
else:
hy = hx_.new_empty(hx_.shape)
if cx_ is None:
cy = torch.empty(0, device=input.device)
else:
cy = cx_.new_empty(cx_.shape)
workspace = torch.empty(0, device=input.device, dtype=torch.uint8)
return output, hy, cy, workspace
def zero_numel_check_dims(self, dim, fn_name):
if self.ndim == 0:
check(
dim == 0 or dim == -1,
lambda: f"{fn_name}: Expected reduction dim -1 or 0 for scalar but got {dim}",
IndexError,
)
else:
check(
self.size(dim) != 0,
lambda: f"{fn_name}: Expected reduction dim {dim} to have non-zero size.",
IndexError,
)
# From aten/src/ATen/native/ReduceOps.cpp
def check_argmax_argmin(name, self, dim):
if dim is not None:
dim = maybe_wrap_dim(dim, self.dim())
zero_numel_check_dims(self, dim, name)
else:
check(
self.numel() != 0,
lambda: f"{name}: Expected reduction dim to be specified for input.numel() == 0.",
)
@register_meta([aten.argmax.default, aten.argmin.default])
def argmax_argmin_meta(self, dim=None, keepdim=False):
check_argmax_argmin("argmax", self, dim)
dims = utils.reduction_dims(self.shape, (dim,) if dim is not None else None)
shape = _compute_reduction_shape(self, dims, keepdim)
return self.new_empty(shape, dtype=torch.int64)
@register_meta(aten.scalar_tensor.default)
def scalar_tensor(s, dtype=None, layout=None, device=None, pin_memory=None):
return torch.empty(
(), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.topk.default)
def topk_meta(self, k, dim=-1, largest=True, sorted=True):
# From aten/src/ATen/native/Sorting.cpp
dim = maybe_wrap_dim(dim, self.dim(), wrap_scalar=True)
check(
k >= 0 and k <= (self.size(dim) if self.dim() > 0 else 1),
lambda: "selected index k out of range",
)
sliceSize = 1 if self.dim() == 0 else self.size(dim)
check(k >= 0 and k <= sliceSize, lambda: "k not in range for dimension")
topKSize = list(self.shape)
if len(topKSize) > 0:
topKSize[dim] = k
return self.new_empty(topKSize), self.new_empty(topKSize, dtype=torch.int64)
legacy_contiguous_memory_format = torch.contiguous_format
# From aten/src/ATen/native/cuda/RNN.cu
def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace):
defined_grad = grad_hy if grad_hy is not None else grad_cy
check(defined_grad.dim() == 2, lambda: "")
exp_size = defined_grad.size()
if grad_hy is not None:
check(grad_hy.size() == exp_size, lambda: "")
if grad_cy is not None:
check(grad_cy.size() == exp_size, lambda: "")
check(cx.size() == exp_size, lambda: "")
check(cy.size() == exp_size, lambda: "")
check(workspace.dim() == 2, lambda: "")
check(workspace.numel() == exp_size[0] * exp_size[1] * 4, lambda: "")
# From aten/src/ATen/native/cuda/RNN.cu
@register_meta(aten._thnn_fused_lstm_cell_backward_impl.default)
def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias):
if grad_hy is None and grad_cy is None:
return None, None, None
checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
grad_gates = torch.empty_like(
workspace, memory_format=legacy_contiguous_memory_format
)
grad_cx = torch.empty_like(cx, memory_format=legacy_contiguous_memory_format)
grad_bias = grad_gates.sum(0, keepdim=False) if has_bias else None
return grad_gates, grad_cx, grad_bias
@register_meta(aten.pixel_shuffle.default)
def meta_pixel_shuffle(self, upscale_factor):
assert (
len(self.shape) > 2 and self.shape[-3] % (upscale_factor * upscale_factor) == 0
), f"Invalid input shape for pixel_shuffle: {self.shape} with upscale_factor = {upscale_factor}"
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
def pick_memory_format():
if is_channels_last(self):
if device_hint(self) == "cuda":
return torch.contiguous_format
else:
return torch.channels_last
elif self.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif self.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
C = self.shape[-3] // (upscale_factor * upscale_factor)
Hr = self.shape[-2] * upscale_factor
Wr = self.shape[-1] * upscale_factor
out_shape = (*self.shape[:-3], C, Hr, Wr)
out = self.new_empty(out_shape)
out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
return out
@register_meta(aten.mkldnn_rnn_layer_backward.default)
def mkldnn_rnn_layer_backward(
input,
weight0,
weight1,
weight2,
weight3,
hx_,
cx_tmp,
output,
hy_,
cy_,
grad_output_r_opt,
grad_hy_r_opt,
grad_cy_r_opt,
reverse,
mode,
hidden_size,
num_layers,
has_biases,
train,
bidirectional,
batch_sizes,
batch_first,
workspace,
):
diff_x = input.new_empty(input.shape)
diff_hx = hx_.new_empty(hx_.shape)
diff_cx = cx_tmp.new_empty(cx_tmp.shape)
diff_w1 = weight0.new_empty(weight0.shape)
diff_w2 = weight1.new_empty(weight1.shape)
diff_b = weight2.new_empty(weight2.shape)
return diff_x, diff_w1, diff_w2, diff_b, diff_b, diff_hx, diff_cx
# We must also trigger meta registrations from PrimTorch ref
# decompositions
import torch._refs
import torch._refs.nn.functional
import torch._refs.special
def activate_meta():
activate_meta_table = {}
# For a given op, we pick the most specific decomp function from
# global_decomp_table in the precedence order of meta > post_autograd > pre_autograd
for type in ["meta", "post_autograd", "pre_autograd"]:
registry = global_decomposition_table[type]
for opo in registry:
if opo not in activate_meta_table:
activate_meta_table[opo] = registry[opo]
for op_overload, fn in activate_meta_table.items():
assert isinstance(op_overload, OpOverload)
op_overload.py_impl(torch._C.DispatchKey.Meta)(fn)
if torch._C._dispatch_has_kernel_for_dispatch_key(
op_overload.name(), "CompositeImplicitAutograd"
):
# Internally, we shouldn't be registering meta kernels for any operators that
# have CompositeImplicitAutograd kernels.
# Instead, we should be letting those decompositions run, and writing meta kernels
# only for the base operators.
if op_overload in global_decomposition_table["meta"]:
raise RuntimeError(
f"{op_overload} is a CompositeImplicitAutograd op, we shouldn't "
"register meta function for it. Instead, we should let the decomposition run and write "
"meta kernels for the base operators."
)
pass
elif op_overload.is_view:
# Attempting to register a python meta kernel for a view operator.
# We shouldn't do this, because the output will report as not having aliased storages.
# All view ops have meta kernels in C++ today, so we should use those instead.
pass
elif op_overload.name() in {
"aten::empty_strided", # causing infinite recursion, test_meta.py
"aten::clone", # causing infinite recursion
"aten::_to_copy", # causing infinite recursion, test_serialization.py -k test_tensor_subclass_getstate_overwrite # noqa: B950
"aten::copy_", # Exception not raised, test_torch.py -k test_storage_meta_errors_cpu_int64 # noqa: B950
"aten::constant_pad_nd", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_amp_istft_cuda_float32 # noqa: B950
"aten::rot90", # requires_grad mismatch! test_ops.py -k test_fake_crossref_backward_amp_rot90_cuda_float32 # noqa: B950
"aten::as_strided_scatter", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_no_amp_as_strided_scatter_cuda_float32 # noqa: B950
}:
pass
else:
if "mkldnn::" in op_overload.name():
_meta_lib_dont_use_me_use_register_meta_for_mkldnn.impl(op_overload, fn)
elif "mkl::" in op_overload.name():
_meta_lib_dont_use_me_use_register_meta_for_mkl.impl(op_overload, fn)
else:
_meta_lib_dont_use_me_use_register_meta.impl(op_overload, fn)
activate_meta()
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
3d81722193663c5a56f5fcd5a04e2b8555f01b4e
|
c8471c09e9bf146a5dcec34a381e5ea4fcf7d1c2
|
/day1_star2.py
|
3440901f68795b5c27e1fb7b59a4eb553e45816b
|
[] |
no_license
|
cmueh/AoC19
|
dea8456bb814b99efc53ce16fab6f488d7e4f896
|
2c219e221bddb33616ea5dcaa4824820fdb65598
|
refs/heads/master
| 2020-09-23T02:35:22.233081
| 2019-12-07T16:36:43
| 2019-12-07T16:36:43
| 225,380,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
f = open('input.txt')
def calc(n):
s = 0
while True:
v = n // 3 - 2
if v <= 0:
break
s += v
n = v
return s
arr = [calc(int(x.strip())) for x in f.readlines()]
print(sum(arr))
|
[
"noreply@github.com"
] |
cmueh.noreply@github.com
|
eafd957ce79a41486e0e5515ec49bc2b2ecf71e1
|
7ef991ef5d595ef987eb9ef74d5ed0469bdbb39e
|
/examples/ensemble/plot_monotonic_constraints.py
|
8b3f69f1d542e9a891b51bb11f879321501d143a
|
[
"BSD-3-Clause"
] |
permissive
|
ogrisel/scikit-learn-github-actions
|
1762eb7961dabfd60c574c54b6b4ed1044ca986f
|
d88deccde974c70d8d89b9c880a809d53c2e32eb
|
refs/heads/master
| 2023-03-05T10:32:07.363536
| 2020-12-01T12:20:55
| 2020-12-01T12:20:55
| 317,551,992
| 3
| 3
|
BSD-3-Clause
| 2021-02-08T19:28:13
| 2020-12-01T13:36:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
"""
=====================
Monotonic Constraints
=====================
This example illustrates the effect of monotonic constraints on a gradient
boosting estimator.
We build an artificial dataset where the target value is in general
positively correlated with the first feature (with some random and
non-random variations), and in general negatively correlated with the second
feature.
By imposing a positive (increasing) or negative (decreasing) constraint on
the features during the learning process, the estimator is able to properly
follow the general trend instead of being subject to the variations.
This example was inspired by the `XGBoost documentation
<https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html>`_.
"""
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.inspection import plot_partial_dependence
import numpy as np
import matplotlib.pyplot as plt
print(__doc__)
rng = np.random.RandomState(0)
n_samples = 5000
f_0 = rng.rand(n_samples) # positive correlation with y
f_1 = rng.rand(n_samples) # negative correlation with y
X = np.c_[f_0, f_1]
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
y = (5 * f_0 + np.sin(10 * np.pi * f_0) -
5 * f_1 - np.cos(10 * np.pi * f_1) +
noise)
fig, ax = plt.subplots()
# Without any constraint
gbdt = HistGradientBoostingRegressor()
gbdt.fit(X, y)
disp = plot_partial_dependence(
gbdt,
X,
features=[0, 1],
line_kw={"linewidth": 4, "label": "unconstrained", "color": "tab:blue"},
ax=ax,
)
# With positive and negative constraints
gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, -1])
gbdt.fit(X, y)
plot_partial_dependence(
gbdt,
X,
features=[0, 1],
feature_names=(
"First feature\nPositive constraint",
"Second feature\nNegtive constraint",
),
line_kw={"linewidth": 4, "label": "constrained", "color": "tab:orange"},
ax=disp.axes_,
)
for f_idx in (0, 1):
disp.axes_[0, f_idx].plot(
X[:, f_idx], y, "o", alpha=0.3, zorder=-1, color="tab:green"
)
disp.axes_[0, f_idx].set_ylim(-6, 6)
plt.legend()
fig.suptitle("Monotonic constraints illustration")
plt.show()
|
[
"noreply@github.com"
] |
ogrisel.noreply@github.com
|
c96341db1a252c067b17e8947a829457d1b9a95c
|
81632cda811a3ca43497457dc25c382e53685c01
|
/week2_algorithmic_warmup/1_fibonacci_number/fibonacci.py
|
bbf8f4211aae3a7847f0fae6b8d1522ef93ff585
|
[] |
no_license
|
fernandoyto/algorithmic-toolbox
|
63511da14801ae3bfabe8da008f6d51a3e2b5572
|
059a897ad7edadd1d9278f0f87c7f5adb42e49e5
|
refs/heads/master
| 2022-09-22T09:45:55.736963
| 2020-06-01T01:59:58
| 2020-06-01T01:59:58
| 262,603,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
# Uses python3
def calc_fib(n):
if n <= 1:
return n
arr = [0, 1]
while len(arr) <= n:
arr.append(arr[-1] + arr[-2])
return arr[-1]
n = int(input())
print(calc_fib(n))
|
[
"fernando.toshioyto@gmail.com"
] |
fernando.toshioyto@gmail.com
|
d935698ed1490a86579c7639a9248a6761ca3fde
|
0125bbe0ce453e94604ff5834fbc280fe44f3220
|
/transquest/algo/sentence_level/siamesetransquest/readers/__init__.py
|
ae4c6526b6069a91c156cfd8a0f55c7f847bb325
|
[
"Apache-2.0"
] |
permissive
|
mfomicheva/TransQuest
|
fc51bcb90e386534845841fd75a3860054e76dd7
|
4225f7195a703414ed13ce597854cc1a59703229
|
refs/heads/master
| 2023-06-12T14:52:49.066705
| 2021-05-07T10:35:21
| 2021-05-07T10:35:21
| 263,876,762
| 6
| 1
|
Apache-2.0
| 2020-05-14T09:52:07
| 2020-05-14T09:52:06
| null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
# from .input_example import InputExample
# from .label_sentence_reader import LabelSentenceReader
# from .nli_data_reader import NLIDataReader
# from .qe_data_reader import QEDataReader
# from .triplet_reader import TripletReader
|
[
"rhtdranasinghe@gmail.com"
] |
rhtdranasinghe@gmail.com
|
86fcd24ccd2ff7f53cde4be30ecfe987c53c067a
|
0c823c3975deff4d12edeb2628b0380e9282fb4b
|
/venv/bin/pew
|
047ec0aafcc0b94fbada6a1f164fb179627743fe
|
[] |
no_license
|
shocktrop89/helloworld
|
a8a69927e35fd1198c30f458a426f5c060bf7bbb
|
eae76cb2e585e403edaa9e3775e820a646c80574
|
refs/heads/master
| 2022-12-14T19:56:46.990044
| 2018-02-22T15:07:20
| 2018-02-22T15:07:20
| 122,350,951
| 0
| 0
| null | 2022-09-16T17:46:27
| 2018-02-21T15:03:54
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
#!/Users/mbp13/Documents/GitHub/helloworld/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pew.pew import pew
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(pew())
|
[
"neo_x2k@hotmail.com"
] |
neo_x2k@hotmail.com
|
|
fc1654e347aab42f2c8356ba6bf4279a456c1caf
|
afd7a171d1d75b1f202745ddd355c05b7013aa77
|
/tensorflow_fix/flags.py
|
ce5bf191a99ad2f55815d47ebd3d683667429856
|
[] |
no_license
|
erdnase1902/web-demo-vm
|
06f9464253bf3110d542e1ef7989325f59fe7693
|
8d7c16ec4402659e8c6fcbcd1cfbcf1cc292b6de
|
refs/heads/master
| 2023-02-27T02:40:43.300761
| 2021-02-02T00:24:03
| 2021-02-02T00:24:03
| 335,121,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,756
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import router for absl.flags. See https://github.com/abseil/abseil-py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import sys as _sys
"""
Joseph fix for
Traceback (most recent call last):
File "../model/coreFunc.py", line 7, in <module>
from utils import *
File "../model/utils.py", line 190, in <module>
def writeIndex(filename, index, embLen = FLAGS.hash_code_len):
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/platform/flags.py", line 84, in __getattr__
wrapped(_sys.argv)
AttributeError: module 'sys' has no attribute 'argv'
"""
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
# go/tf-wildcard-import
from absl.flags import * # pylint: disable=wildcard-import
import six as _six
from tensorflow.python.util import tf_decorator
# Since we wrap absl.flags DEFINE functions, we need to declare this module
# does not affect key flags.
disclaim_key_flags() # pylint: disable=undefined-variable
_RENAMED_ARGUMENTS = {
'flag_name': 'name',
'default_value': 'default',
'docstring': 'help',
}
def _wrap_define_function(original_function):
"""Wraps absl.flags's define functions so tf.flags accepts old names."""
def wrapper(*args, **kwargs):
"""Wrapper function that turns old keyword names to new ones."""
has_old_names = False
for old_name, new_name in _six.iteritems(_RENAMED_ARGUMENTS):
if old_name in kwargs:
has_old_names = True
value = kwargs.pop(old_name)
kwargs[new_name] = value
if has_old_names:
_logging.warning(
'Use of the keyword argument names (flag_name, default_value, '
'docstring) is deprecated, please use (name, default, help) instead.')
return original_function(*args, **kwargs)
return tf_decorator.make_decorator(original_function, wrapper)
class _FlagValuesWrapper(object):
"""Wrapper class for absl.flags.FLAGS.
The difference is that tf.flags.FLAGS implicitly parses flags with sys.argv
when accessing the FLAGS values before it's explicitly parsed,
while absl.flags.FLAGS raises an exception.
"""
def __init__(self, flags_object):
self.__dict__['__wrapped'] = flags_object
def __getattribute__(self, name):
if name == '__dict__':
return super(_FlagValuesWrapper, self).__getattribute__(name)
return self.__dict__['__wrapped'].__getattribute__(name)
def __getattr__(self, name):
wrapped = self.__dict__['__wrapped']
# To maintain backwards compatibility, implicitly parse flags when reading
# a flag.
if not wrapped.is_parsed():
wrapped(_sys.argv)
return wrapped.__getattr__(name)
def __setattr__(self, name, value):
return self.__dict__['__wrapped'].__setattr__(name, value)
def __delattr__(self, name):
return self.__dict__['__wrapped'].__delattr__(name)
def __dir__(self):
return self.__dict__['__wrapped'].__dir__()
def __getitem__(self, name):
return self.__dict__['__wrapped'].__getitem__(name)
def __setitem__(self, name, flag):
return self.__dict__['__wrapped'].__setitem__(name, flag)
def __len__(self):
return self.__dict__['__wrapped'].__len__()
def __iter__(self):
return self.__dict__['__wrapped'].__iter__()
def __str__(self):
return self.__dict__['__wrapped'].__str__()
def __call__(self, *args, **kwargs):
return self.__dict__['__wrapped'].__call__(*args, **kwargs)
# pylint: disable=invalid-name,used-before-assignment
# absl.flags APIs use `default` as the name of the default value argument.
# Allow the following functions continue to accept `default_value`.
DEFINE_string = _wrap_define_function(DEFINE_string)
DEFINE_boolean = _wrap_define_function(DEFINE_boolean)
DEFINE_bool = DEFINE_boolean
DEFINE_float = _wrap_define_function(DEFINE_float)
DEFINE_integer = _wrap_define_function(DEFINE_integer)
# pylint: enable=invalid-name,used-before-assignment
FLAGS = _FlagValuesWrapper(FLAGS) # pylint: disable=used-before-assignment
|
[
"josephlu85@engineering.ucla.edu"
] |
josephlu85@engineering.ucla.edu
|
a29754c4c378ca8e960133b73f1081e25847597c
|
a91bea54a2d440f4333d48278ff9d870e5777069
|
/wxjlibrary/backend/urls.py
|
0786cc48f287fa09295e5a9e811d5aca2736bcb7
|
[] |
no_license
|
angelawxj/backend
|
f22908373b019d0feb1085928e13765e15882b3a
|
d53c88fde56d4bda039d9a9844ead9ecfeb28354
|
refs/heads/master
| 2021-06-30T16:39:24.527606
| 2019-02-20T09:37:31
| 2019-02-20T09:37:31
| 136,135,401
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('bookList.urls')),
]
|
[
"2472569272@qq.com"
] |
2472569272@qq.com
|
158c8568933800e1a190e58735a06e07f2d82e6b
|
21d21402c70d8a95d9a4b492078e3fb36e2c9af1
|
/shivi_khanuja/django/DojoNinja/apps/dojoninja/apps.py
|
d4941bb2d14a3111f7bbf5c0ed2410810df42a05
|
[] |
no_license
|
hmp36/python_aug_2017
|
df897a1b0aa161300386192d48e3fcac9eb495c8
|
8747429b91b09349e5b5469d8932593b06f645e1
|
refs/heads/master
| 2021-04-29T23:16:50.149226
| 2017-09-11T20:14:37
| 2017-09-11T20:14:37
| 121,552,666
| 1
| 0
| null | 2018-02-14T19:34:54
| 2018-02-14T19:34:54
| null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DojoninjaConfig(AppConfig):
name = 'dojoninja'
|
[
"shivalikhanuja.net@gmail.com"
] |
shivalikhanuja.net@gmail.com
|
699b405df3c5154f4dc032a56062766d9dea7aac
|
501fa9383672b83f0d1f5516c9c4e9971ac37399
|
/Planar Model/Equilibrium/Analyze.py
|
e5021c8d67d459f2b78c21a2d7f59b2412ad9ab0
|
[] |
no_license
|
vsa1920/Monte-Carlo-methods-in-Ising-Model
|
a42b2a8e8494a5efe15fed9b8c5e56865a97ee7c
|
426647a63ea121f40799c6e9022c813c925ee580
|
refs/heads/main
| 2023-04-10T06:37:35.907962
| 2023-03-29T03:42:25
| 2023-03-29T03:42:25
| 339,046,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
from matplotlib import pyplot
from Metropolis import nearest_neighbours, acceptance_ratio
import random
from math import e, cos, sin, pi
import numpy as np
# A function to read states from text files
# Constants
J = 1
def read_func(file_name):
file = open(file_name, 'r')
state = []
for i in file.readlines():
row = []
for j in i.split():
row.append(float(j))
state.append(row)
return state
def evaluate_energy(state):
E = 0
n = len(state)
for i in range(n):
for j in range(n):
for pos in nearest_neighbours(i, j, n):
E += -J * cos(state[i][j] - state[pos[0]][pos[1]])
return E / 2
def evaluate_magnetization(state):
"""
Returns Magnetisation per spin of a state.
"""
M = np.array([0, 0])
n = len(state)
for i in range(n):
for j in range(n):
M = M + np.array([cos(state[i][j]), sin(state[i][j])])
return abs((np.dot(M, M) ** 0.5) / (n * n))
|
[
"noreply@github.com"
] |
vsa1920.noreply@github.com
|
d8590c08bf2977e339d8f90929d589b87f9ce6f4
|
54c269be3d6882141fc43fba55fccaafb773cc59
|
/PythonExercises/desafio32.py
|
5998f9be211e9845d51b8633de64c32baa68d73b
|
[] |
no_license
|
ClaudioJrCode/SQLZOOexercises
|
80fb8eb2500419a52cdde0553eddd1561f3aecab
|
e4b90823b7309f2f1c7f260db4cee2f56084e0f7
|
refs/heads/main
| 2023-04-10T06:11:23.857484
| 2021-04-22T01:15:04
| 2021-04-22T01:15:04
| 360,316,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
while 1:
ano = int(input('Digite um ano: '))
flag = True
if ano % 4 != 0:
if ano % 400 != 0:
print('O ano não é bissexto')
if ano % 4 == 0:
if ano % 100 != 0:
print('O ano é bissexto')
|
[
"noreply@github.com"
] |
ClaudioJrCode.noreply@github.com
|
8ddbe698c1c73e311e9b99a820e4b99697b3fd9b
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/576811_Rename_MP3_files_ID3_tags_does_not_require/recipe-576811.py
|
292eebcc019eb27b01d8285188c18bb6380e649e
|
[
"MIT"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844
| 2021-02-24T15:39:59
| 2021-02-24T15:39:59
| 341,878,663
| 0
| 0
|
MIT
| 2021-02-24T15:40:00
| 2021-02-24T11:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 26,410
|
py
|
""" Read ID3 tags from a file.
Ned Batchelder, http://nedbatchelder.com/code/modules/id3reader.html
http://nedbatchelder.com/code/modules/id3reader.py
* original code modified by ccpizza: added code to main method to rename
files in current folder from ID3 tags,
e.g. 'Track_01.mp3' >> '01 - Chan chan.mp3'
* added safe console printing of unicode characters
* added indexing for duplicate file names, i.e. '01 - Chan
chan.mp3[2]'
* fixed indexing for duplicated ID3 tags
* added -d option to create "artist\album" directories:
e.g. 'Track_01.mp3' >> 'Compay Segundo\Mojito\01 - Chan chan.mp3'
* added fallback to 'latin1' in case of non-unicode tag text
"""
__version__ = '1.53.20070415' # History at the end of the file.
# ID3 specs: http://www.id3.org/develop.html
import struct, sys, zlib
import re
MP3=u'mp3'
# These are the text encodings, indexed by the first byte of a text value.
_encodings = ['iso8859-1', 'utf-16', 'utf-16be', 'utf-8']
# Simple pseudo-id's, mapped to their various representations.
# Use these ids with getValue, and you don't need to know what
# version of ID3 the file contains.
_simpleDataMapping = {
'album': ('TALB', 'TAL', 'v1album', 'TOAL'),
'performer': ('TPE1', 'TP1', 'v1performer', 'TOPE'),
'title': ('TIT2', 'TT2', 'v1title'),
'track': ('TRCK', 'TRK', 'v1track'),
'year': ('TYER', 'TYE', 'v1year'),
'genre': ('TCON', 'TCO', 'v1genre'),
'comment': ('COMM', 'COM', 'v1comment'),
}
# Provide booleans for older Pythons.
try:
True, False
except NameError:
True, False = 1==1, 1==0
# Tracing
_t = False
def _trace(msg):
print msg
# Coverage
_c = False
_features = {}
def _coverage(feat):
#if _t: _trace('feature '+feat)
_features[feat] = _features.setdefault(feat, 0)+1
def _safestr(s):
""" Get a good string for printing, that won't throw exceptions,
no matter what's in it.
"""
try:
return unicode(s).encode(sys.getdefaultencoding())
except UnicodeError:
return '?: '+repr(s)
# Can I just say that I think the whole concept of genres is bogus,
# since they are so subjective? And the idea of letting someone else pick
# one of these things and then have it affect the categorization of my music
# is extra bogus. And the list itself is absurd. Polsk Punk?
_genres = [
# 0-19
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco', 'Funk', 'Grunge', 'Hip - Hop', 'Jazz', 'Metal',
'New Age', 'Oldies', 'Other', 'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
# 20-39
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack', 'Euro - Techno', 'Ambient', 'Trip - Hop', 'Vocal', 'Jazz + Funk',
'Fusion', 'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game', 'Sound Clip', 'Gospel', 'Noise',
# 40-59
'Alt Rock', 'Bass', 'Soul', 'Punk', 'Space', 'Meditative', 'Instrumental Pop', 'Instrumental Rock', 'Ethnic', 'Gothic',
'Darkwave', 'Techno - Industrial', 'Electronic', 'Pop - Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy', 'Cult', 'Gangsta Rap',
# 60-79
'Top 40', 'Christian Rap', 'Pop / Funk', 'Jungle', 'Native American', 'Cabaret', 'New Wave', 'Psychedelic', 'Rave', 'Showtunes',
'Trailer', 'Lo - Fi', 'Tribal', 'Acid Punk', 'Acid Jazz', 'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock',
# 80-99
'Folk', 'Folk / Rock', 'National Folk', 'Swing', 'Fast - Fusion', 'Bebob', 'Latin', 'Revival', 'Celtic', 'Bluegrass',
'Avantgarde', 'Gothic Rock', 'Progressive Rock', 'Psychedelic Rock', 'Symphonic Rock', 'Slow Rock', 'Big Band', 'Chorus', 'Easy Listening', 'Acoustic',
# 100-119
'Humour', 'Speech', 'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony', 'Booty Bass', 'Primus', 'Porn Groove',
'Satire', 'Slow Jam', 'Club', 'Tango', 'Samba', 'Folklore', 'Ballad', 'Power Ballad', 'Rhythmic Soul', 'Freestyle',
# 120-139
'Duet', 'Punk Rock', 'Drum Solo', 'A Cappella', 'Euro - House', 'Dance Hall', 'Goa', 'Drum & Bass', 'Club - House', 'Hardcore',
'Terror', 'Indie', 'BritPop', 'Negerpunk', 'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal', 'Black Metal', 'Crossover',
# 140-147
'Contemporary Christian', 'Christian Rock', 'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'JPop', 'Synthpop'
]
class Id3Error(Exception):
""" An exception caused by id3reader properly handling a bad ID3 tag.
"""
pass
class _Header:
""" Represent the ID3 header in a tag.
"""
def __init__(self):
self.majorVersion = 0
self.revision = 0
self.flags = 0
self.size = 0
self.bUnsynchronized = False
self.bExperimental = False
self.bFooter = False
def __str__(self):
return str(self.__dict__)
class _Frame:
""" Represent an ID3 frame in a tag.
"""
def __init__(self):
self.id = ''
self.size = 0
self.flags = 0
self.rawData = ''
self.bTagAlterPreserve = False
self.bFileAlterPreserve = False
self.bReadOnly = False
self.bCompressed = False
self.bEncrypted = False
self.bInGroup = False
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def _interpret(self):
""" Examine self.rawData and create a self.value from it.
"""
if len(self.rawData) == 0:
# This is counter to the spec, but seems harmless enough.
#if _c: _coverage('zero data')
return
if self.bCompressed:
# Decompress the compressed data.
self.rawData = zlib.decompress(self.rawData)
if self.id[0] == 'T':
# Text fields start with T
encoding = ord(self.rawData[0])
if 0 <= encoding < len(_encodings):
#if _c: _coverage('encoding%d' % encoding)
value = self.rawData[1:].decode(_encodings[encoding])
else:
#if _c: _coverage('bad encoding')
value = self.rawData[1:]
# Don't let trailing zero bytes fool you.
if value:
value = value.strip('\0')
# The value can actually be a list.
if '\0' in value:
value = value.split('\0')
#if _c: _coverage('textlist')
self.value = value
elif self.id[0] == 'W':
# URL fields start with W
self.value = self.rawData.strip('\0')
if self.id == 'WXXX':
self.value = self.value.split('\0')
elif self.id == 'CDM':
# ID3v2.2.1 Compressed Data Metaframe
if self.rawData[0] == 'z':
self.rawData = zlib.decompress(self.rawData[5:])
else:
#if _c: _coverage('badcdm!')
raise Id3Error, 'Unknown CDM compression: %02x' % self.rawData[0]
#@TODO: re-interpret the decompressed frame.
elif self.id in _simpleDataMapping['comment']:
# comment field
# In limited testing a typical comment looks like
# '\x00XXXID3v1 Comment\x00comment test' so in this
# case we need to find the second \x00 to know where
# where we start for a comment. In case we only find
# one \x00, lets just start at the beginning for the
# value
s = str(self.rawData)
pos = 0
count = 0
while pos < len(s) and count < 2:
if ord(s[pos]) == 0:
count = count + 1
pos = pos + 1
if count < 2:
pos = 1
if pos > 0 and pos < len(s):
s = s[pos:]
if ord(s[-1]) == 0:
s = s[:-1]
self.value = s
class Reader:
""" An ID3 reader.
Create one on a file object, and then use getValue('TIT2') (for example)
to pull values.
"""
def __init__(self, file):
""" Create a reader from a file or filename. """
self.file = file
self.header = None
self.frames = {}
self.allFrames = []
self.bytesLeft = 0
self.padbytes = ''
bCloseFile = False
# If self.file is a string of some sort, then open it to get a file.
if isinstance(self.file, (type(''), type(u''))):
self.file = open(self.file, 'rb')
bCloseFile = True
self._readId3()
if bCloseFile:
self.file.close()
def _readBytes(self, num, desc=''):
""" Read some bytes from the file.
This method implements the "unsynchronization" scheme,
where 0xFF bytes may have had 0x00 bytes stuffed after
them. These zero bytes have to be removed transparently.
"""
#if _t: _trace("ask %d (%s)" % (num,desc))
if num > self.bytesLeft:
#if _c: _coverage('long!')
raise Id3Error, 'Long read (%s): (%d > %d)' % (desc, num, self.bytesLeft)
bytes = self.file.read(num)
self.bytesLeft -= num
if len(bytes) < num:
#if _t: _trace("short read with %d left, %d total" % (self.bytesLeft, self.header.size))
#if _c: _coverage('short!')
raise Id3Error, 'Short read (%s): (%d < %d)' % (desc, len(bytes), num)
if self.header.bUnsynchronized:
nUnsync = 0
i = 0
while True:
i = bytes.find('\xFF\x00', i)
if i == -1:
break
#if _t: _trace("unsync at %d" % (i+1))
#if _c: _coverage('unsyncbyte')
nUnsync += 1
# This is a stuffed byte to remove
bytes = bytes[:i+1] + bytes[i+2:]
# Have to read one more byte from the file to adjust
bytes += self.file.read(1)
self.bytesLeft -= 1
i += 1
#if _t: _trace("unsync'ed %d" % (nUnsync))
return bytes
def _unreadBytes(self, num):
self.file.seek(-num, 1)
self.bytesLeft += num
def _getSyncSafeInt(self, bytes):
assert len(bytes) == 4
if type(bytes) == type(''):
bytes = [ ord(c) for c in bytes ]
return (bytes[0] << 21) + (bytes[1] << 14) + (bytes[2] << 7) + bytes[3]
def _getInteger(self, bytes):
i = 0;
if type(bytes) == type(''):
bytes = [ ord(c) for c in bytes ]
for b in bytes:
i = i*256+b
return i
def _addV1Frame(self, id, rawData):
if id == 'v1genre':
assert len(rawData) == 1
nGenre = ord(rawData)
try:
value = _genres[nGenre]
except IndexError:
value = "(%d)" % nGenre
else:
value = rawData.strip(' \t\r\n').split('\0')[0]
if value:
frame = _Frame()
frame.id = id
frame.rawData = rawData
frame.value = value
self.frames[id] = frame
self.allFrames.append(frame)
def _pass(self):
""" Do nothing, for when we need to plug in a no-op function.
"""
pass
def _readId3(self):
header = self.file.read(10)
if len(header) < 10:
return
hstuff = struct.unpack('!3sBBBBBBB', header)
if hstuff[0] != "ID3":
# Doesn't look like an ID3v2 tag,
# Try reading an ID3v1 tag.
self._readId3v1()
return
self.header = _Header()
self.header.majorVersion = hstuff[1]
self.header.revision = hstuff[2]
self.header.flags = hstuff[3]
self.header.size = self._getSyncSafeInt(hstuff[4:8])
self.bytesLeft = self.header.size
self._readExtHeader = self._pass
if self.header.majorVersion == 2:
#if _c: _coverage('id3v2.2.%d' % self.header.revision)
self._readFrame = self._readFrame_rev2
elif self.header.majorVersion == 3:
#if _c: _coverage('id3v2.3.%d' % self.header.revision)
self._readFrame = self._readFrame_rev3
elif self.header.majorVersion == 4:
#if _c: _coverage('id3v2.4.%d' % self.header.revision)
self._readFrame = self._readFrame_rev4
else:
#if _c: _coverage('badmajor!')
raise Id3Error, "Unsupported major version: %d" % self.header.majorVersion
# Interpret the flags
self._interpretFlags()
# Read any extended header
self._readExtHeader()
# Read the frames
while self.bytesLeft > 0:
frame = self._readFrame()
if frame:
frame._interpret()
self.frames[frame.id] = frame
self.allFrames.append(frame)
else:
#if _c: _coverage('padding')
break
def _interpretFlags(self):
""" Interpret ID3v2.x flags.
"""
if self.header.flags & 0x80:
self.header.bUnsynchronized = True
#if _c: _coverage('unsynctag')
if self.header.majorVersion == 2:
if self.header.flags & 0x40:
#if _c: _coverage('compressed')
# "Since no compression scheme has been decided yet,
# the ID3 decoder (for now) should just ignore the entire
# tag if the compression bit is set."
self.header.bCompressed = True
if self.header.majorVersion >= 3:
if self.header.flags & 0x40:
#if _c: _coverage('extheader')
if self.header.majorVersion == 3:
self._readExtHeader = self._readExtHeader_rev3
else:
self._readExtHeader = self._readExtHeader_rev4
if self.header.flags & 0x20:
#if _c: _coverage('experimental')
self.header.bExperimental = True
if self.header.majorVersion >= 4:
if self.header.flags & 0x10:
#if _c: _coverage('footer')
self.header.bFooter = True
def _readExtHeader_rev3(self):
""" Read the ID3v2.3 extended header.
"""
# We don't interpret this yet, just eat the bytes.
size = self._getInteger(self._readBytes(4, 'rev3ehlen'))
self._readBytes(size, 'rev3ehdata')
def _readExtHeader_rev4(self):
""" Read the ID3v2.4 extended header.
"""
# We don't interpret this yet, just eat the bytes.
size = self._getSyncSafeInt(self._readBytes(4, 'rev4ehlen'))
self._readBytes(size-4, 'rev4ehdata')
def _readId3v1(self):
""" Read the ID3v1 tag.
spec: http://www.id3.org/id3v1.html
"""
self.file.seek(-128, 2)
tag = self.file.read(128)
if len(tag) != 128:
return
if tag[0:3] != 'TAG':
return
self.header = _Header()
self.header.majorVersion = 1
self.header.revision = 0
self._addV1Frame('v1title', tag[3:33])
self._addV1Frame('v1performer', tag[33:63])
self._addV1Frame('v1album', tag[63:93])
self._addV1Frame('v1year', tag[93:97])
self._addV1Frame('v1comment', tag[97:127])
self._addV1Frame('v1genre', tag[127])
if tag[125] == '\0' and tag[126] != '\0':
#if _c: _coverage('id3v1.1')
self.header.revision = 1
self._addV1Frame('v1track', str(ord(tag[126])))
else:
#if _c: _coverage('id3v1.0')
pass
return
_validIdChars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def _isValidId(self, id):
""" Determine if the id bytes make a valid ID3 id.
"""
for c in id:
if not c in self._validIdChars:
#if _c: _coverage('bad id')
return False
#if _c: _coverage('id '+id)
return True
def _readFrame_rev2(self):
""" Read a frame for ID3v2.2: three-byte ids and lengths.
spec: http://www.id3.org/id3v2-00.txt
"""
if self.bytesLeft < 6:
return None
id = self._readBytes(3, 'rev2id')
if len(id) < 3 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBB', self._readBytes(3, 'rev2len'))
frame = _Frame()
frame.id = id
frame.size = self._getInteger(hstuff[0:3])
frame.rawData = self._readBytes(frame.size, 'rev2data')
return frame
def _readFrame_rev3(self):
""" Read a frame for ID3v2.3: four-byte ids and lengths.
"""
if self.bytesLeft < 10:
return None
id = self._readBytes(4,'rev3id')
if len(id) < 4 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBBBh', self._readBytes(6,'rev3head'))
frame = _Frame()
frame.id = id
frame.size = self._getInteger(hstuff[0:4])
cbData = frame.size
frame.flags = hstuff[4]
#if _t: _trace('flags = %x' % frame.flags)
frame.bTagAlterPreserve = (frame.flags & 0x8000 != 0)
frame.bFileAlterPreserve = (frame.flags & 0x4000 != 0)
frame.bReadOnly = (frame.flags & 0x2000 != 0)
frame.bCompressed = (frame.flags & 0x0080 != 0)
if frame.bCompressed:
frame.decompressedSize = self._getInteger(self._readBytes(4, 'decompsize'))
cbData -= 4
#if _c: _coverage('compress')
frame.bEncrypted = (frame.flags & 0x0040 != 0)
if frame.bEncrypted:
frame.encryptionMethod = self._readBytes(1, 'encrmethod')
cbData -= 1
#if _c: _coverage('encrypt')
frame.bInGroup = (frame.flags & 0x0020 != 0)
if frame.bInGroup:
frame.groupid = self._readBytes(1, 'groupid')
cbData -= 1
#if _c: _coverage('groupid')
frame.rawData = self._readBytes(cbData, 'rev3data')
return frame
def _readFrame_rev4(self):
""" Read a frame for ID3v2.4: four-byte ids and lengths.
"""
if self.bytesLeft < 10:
return None
id = self._readBytes(4,'rev4id')
if len(id) < 4 or not self._isValidId(id):
self._unreadBytes(len(id))
return None
hstuff = struct.unpack('!BBBBh', self._readBytes(6,'rev4head'))
frame = _Frame()
frame.id = id
frame.size = self._getSyncSafeInt(hstuff[0:4])
cbData = frame.size
frame.flags = hstuff[4]
frame.bTagAlterPreserve = (frame.flags & 0x4000 != 0)
frame.bFileAlterPreserve = (frame.flags & 0x2000 != 0)
frame.bReadOnly = (frame.flags & 0x1000 != 0)
frame.bInGroup = (frame.flags & 0x0040 != 0)
if frame.bInGroup:
frame.groupid = self._readBytes(1, 'groupid')
cbData -= 1
#if _c: _coverage('groupid')
frame.bCompressed = (frame.flags & 0x0008 != 0)
if frame.bCompressed:
#if _c: _coverage('compress')
pass
frame.bEncrypted = (frame.flags & 0x0004 != 0)
if frame.bEncrypted:
frame.encryptionMethod = self._readBytes(1, 'encrmethod')
cbData -= 1
#if _c: _coverage('encrypt')
frame.bUnsynchronized = (frame.flags & 0x0002 != 0)
if frame.bUnsynchronized:
#if _c: _coverage('unsyncframe')
pass
if frame.flags & 0x0001:
frame.datalen = self._getSyncSafeInt(self._readBytes(4, 'datalen'))
cbData -= 4
#if _c: _coverage('datalenindic')
frame.rawData = self._readBytes(cbData, 'rev3data')
return frame
def getValue(self, id):
""" Return the value for an ID3 tag id, or for a
convenience label ('title', 'performer', ...),
or return None if there is no such value.
"""
if self.frames.has_key(id):
if hasattr(self.frames[id], 'value'):
return self.frames[id].value
if _simpleDataMapping.has_key(id):
for id2 in _simpleDataMapping[id]:
v = self.getValue(id2)
if v:
return v
return None
def getRawData(self, id):
if self.frames.has_key(id):
return self.frames[id].rawData
return None
def dump(self):
import pprint
print "Header:"
print self.header
print "Frames:"
for fr in self.allFrames:
if len(fr.rawData) > 30:
fr.rawData = fr.rawData[:30]
pprint.pprint(self.allFrames)
for fr in self.allFrames:
if hasattr(fr, 'value'):
print '%s: %s' % (fr.id, _safestr(fr.value))
else:
print '%s= %s' % (fr.id, _safestr(fr.rawData))
for label in _simpleDataMapping.keys():
v = self.getValue(label)
if v:
print 'Label %s: %s' % (label, _safestr(v))
def dumpCoverage(self):
feats = _features.keys()
feats.sort()
for feat in feats:
print "Feature %-12s: %d" % (feat, _features[feat])
# chars not allowed in filenames
illegal_chars = u'/\?=+<>:;"*|!@#$%^&*'
# http://code.activestate.com/recipes/65441/
def has_chars(raw, bad_chars):
try:
for c in bad_chars:
if c in raw: return True
return False
except UnicodeDecodeError:
return False
def replace_illegal_chars(raw):
return ''.join([c in illegal_chars and '_' or c for c in raw])
def asci(*args):
for arg in args:
print arg.encode('us-ascii','xmlcharrefreplace'),
print
def is_dupe(oldmp3, newmp3):
#return bool(re.search(u'^'+orig+ r'(\[\d+\])?$', new))
old=os.path.splitext(oldmp3)[0]
new=os.path.splitext(newmp3)[0]
return old.lower().startswith(new.lower())
def parse_index(f):
rx = re.compile('(?P<name>.+)\[(?P<index>\d+?)\]$')
mo = rx.search(f)
if mo:
return mo.group('name'), mo.group('index')
else:
return f, 0
if __name__ == '__main__':
import os
import optparse
dodirs = False
parser = optparse.OptionParser()
parser.add_option('-l','--list',
action="store_true",
help='List ID3 tags only with no renaming',
default=False
)
parser.add_option('-d', '--dirs',
action="store_true",
help="create album dirs",
default=False)
(opts, args) = parser.parse_args(sys.argv[1:])
if len(args):
mp3dir = unicode(args[0])
else:
mp3dir = u'.'
print
if opts.list:
print 'Listing ID3 tags in folder:', asci(os.path.abspath(mp3dir))
else:
print 'Renaming MP3 files in folder:', asci(os.path.abspath(mp3dir))
print
for fname in os.listdir(mp3dir):
# uncomment if you want to process only files with .mp3 extension
# if not fname.lower().endswith(MP3):
# continue
if os.path.isdir(fname):
continue
absfname = os.path.join(mp3dir, fname)
try:
id3r = Reader(absfname)
except (Id3Error, UnicodeDecodeError), e:
print e
continue
#id3r.dump()
album = id3r.getValue('album')
track = id3r.getValue('track')
artist = id3r.getValue('performer')
title = id3r.getValue('title')
year = id3r.getValue('year')
### move files to dirs according to artist, album
if opts.dirs and artist and album:
dodirs = True
mp3dir_full = os.path.join(mp3dir,
replace_illegal_chars(artist),
replace_illegal_chars(album))
if not os.path.exists(mp3dir_full):
try:
os.makedirs(mp3dir_full)
except (IOError,WindowsError), e :
print
else:
mp3dir_full = mp3dir
if not title:
continue
# replace tracks like '2/15' >> '02'
if track and u'/' in track:
track = track.split('/')[0]
if track:
track = track.zfill(2) # zero fill, i. e. '1' >> '01'
if not track:
track = ''
if has_chars(title, illegal_chars):
title = replace_illegal_chars(title)
try:
if isinstance(track, unicode) or isinstance(title, unicode):
new_fname = track + u' - ' + title + u'.' + MP3
## try to fix non-unicode strings, only trying 'latin1'
if isinstance(track, str) or isinstance(title, str):
new_fname = track + ' - ' + title.decode('latin1') + '.' + MP3
except UnicodeDecodeError:
print 'Encoding error while processing title/track'
continue
new_dir = dodirs and mp3dir_full or mp3dir
proposed_new_name = os.path.join(new_dir, new_fname)
maxwidth = 35
if opts.list:
print '>',
else:
if not is_dupe(absfname, proposed_new_name):
if os.path.exists(proposed_new_name):
for i in range(1,1000): # max 200 duplicates
parsed_name, idx = parse_index(os.path.splitext(proposed_new_name)[0])
new_fname = parsed_name + u'[' + unicode(idx+i) + u'].' + MP3
if not os.path.exists(new_fname):
break
try:
os.rename(absfname, os.path.join(new_dir, new_fname))
except Exception, e:
asci( 'Error: ', absfname.ljust(maxwidth), '>>>', proposed_new_name, str(e) )
else:
maxwidth -= len('Skipping...') + 1
print 'Skipping...',
asci((len(fname) > maxwidth and fname[:maxwidth-3] or fname).ljust(maxwidth), ' >>> ', new_fname)
|
[
"betty@qburst.com"
] |
betty@qburst.com
|
b6dc0cc56bc60c50f7a85c67bbc5ce45005f3b89
|
79f2838de509de74350a10abf256ca6c394bce65
|
/listasevariaveis/exemplomultiplaslistasbusca.py
|
c75b27df0df7fdc0781614ba70f82989ee962f46
|
[] |
no_license
|
psanrosa13/pythonEstudo
|
b520825025219adb1752a2cc71cdfc2693026f42
|
6a130390750d7783be969de4a8c75122f71f9d32
|
refs/heads/master
| 2023-01-18T21:11:00.543839
| 2020-11-21T00:03:59
| 2020-11-21T00:03:59
| 313,464,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
equipamentos = []
valores = []
seriais = []
departamentos = []
resposta ='S'
while resposta=='S':
equipamentos.append(input('Equipamento: '))
valores.append(float(input('Valor: ')))
seriais.append(int(input('Número Serial: ')))
departamentos.append(input('Departamento: '))
resposta=input('Digite \'S\' para continuar: ').upper()
busca = input('/nDigite o nome do equipamento que deseja buscar: ')
for indice in range(0, len(equipamentos)):
if busca == equipamentos[indice]:
print('Nome..........: ',equipamentos[indice])
print('Valor.........: ', valores[indice])
print('Serial........: ', seriais[indice])
print('Departamento..: ', departamentos[indice])
|
[
"psanrosa13@gmail.com"
] |
psanrosa13@gmail.com
|
12e9c4dcfd45ee85d030c29f1e736d00d2d4d700
|
ec3e7aed2d85b1ad83500ff6944ac04c3f213b4c
|
/statinf/data/ProcessData.py
|
adfe35ae6f12625eebd8e3decdda3a63cf557817
|
[
"MIT"
] |
permissive
|
vishalbelsare/statinf
|
2ef58526a93a85709c9177d437979be13d8f6072
|
c1dd7e0855809f4676845d33e0b9b040169463cb
|
refs/heads/master
| 2023-09-04T04:46:03.561451
| 2023-08-18T17:33:27
| 2023-08-18T17:33:27
| 230,207,975
| 0
| 0
|
MIT
| 2023-08-19T03:20:31
| 2019-12-26T06:26:23
|
Python
|
UTF-8
|
Python
| false
| false
| 27,132
|
py
|
import numpy as np
import pandas as pd
import re
import warnings
from types import SimpleNamespace
# Ranking data
def rankdata(x):
"""Assigns rank to data.
This is mainly used for analysis like Spearman's correlation.
:param x: Input vector. Format can be :obj:`numpy.array`, :obj:`list` or :obj:`pandas.Series`.
:type x: :obj:`numpy.array`
:example:
>>> rankdata([2., 5.44, 3.93, 3.3, 1.1])
... array([1, 4, 3, 2, 0])
:return: Vector with ranked values.
:rtype: :obj:`numpy.array`
"""
x_arr = np.asarray(x)
sorted_array = sorted(x_arr)
rk = [sorted_array.index(i) for i in x_arr]
return np.array(rk)
#######################################################################################################################
# Parse formula and data transformations
def parse_formula(formula, data, check_values=True, return_all=False):
"""This function is used in regression models in order to apply transformations on the data from a formula.
It allows to apply transformations from a :obj:`str` formula. See below for examples.
:param formula: Regression formula to be run of the form :obj:`y ~ x1 + x2`. Accepted functions are:
* :math:`\\log(x)` \\: :obj:`log(X)`
* :math:`\\exp(x)` \\: :obj:`exp(X)`
* :math:`\\sqrt{x}` \\: :obj:`sqrt(X)`
* :math:`\\cos(x)` \\: :obj:`cos(X)`
* :math:`\\sin(x)` \\: :obj:`sin(X)`
* :math:`x^{z}` \\: :obj:`X ** Z`
* :math:`\\dfrac{x}{z}` \\: :obj:`X/Z`
* :math:`x \\times z` \\: :obj:`X*Z`
:type formula: :obj:`str`
:param data: Data on which to perform the transformations.
:type data: :obj:`pandas.DataFrame`
:param check_values: For each transformation check whether the data range satisfy the domain definition of the function, defaults to True.
:type check_values: bool, optional
:param return_all: Returns the transformed data, column :obj:`Y` and columns :obj:`X`, defaults to False.
:type return_all: bool, optional
:example:
>>> from statinf.data import parse_formula
>>> print(input_df)
... +-----------+-----------+-----------+
... | X1 | X2 | Y |
... +-----------+-----------+-----------+
... | 0.555096 | 0.681083 | -1.383428 |
... | 1.155661 | 0.391129 | -7.780989 |
... | -0.299251 | -0.445602 | -8.146673 |
... | -0.978311 | 1.312146 | 8.653818 |
... | -0.225917 | 0.522016 | -9.684332 |
... +-----------+-----------+-----------+
>>> form = 'Y ~ X1 + X2 + exp(X2) + X1*X2'
>>> new_df = parse_formula(form, data=input_df)
>>> print(new_df)
... +-----------+-----------+-----------+-----------+-----------+
... | X1 | X2 | Y | exp(X2) | X1*X2 |
... +-----------+-----------+-----------+-----------+-----------+
... | 0.555096 | 0.681083 | -1.383428 | 1.976017 | 0.378066 |
... | 1.155661 | 0.391129 | -7.780989 | 1.478649 | 0.452012 |
... | -0.299251 | -0.445602 | -8.146673 | 0.640438 | 0.133347 |
... | -0.978311 | 1.312146 | 8.653818 | 3.714134 | -1.283687 |
... | -0.225917 | 0.522016 | -9.684332 | 1.685422 | -0.117932 |
... +-----------+-----------+-----------+-----------+-----------+
:raises ValueError: Returns an error when the data cannot satisfy the domain definition for the required transformation.
:return: Transformed data set
:rtype: :obj:`pandas.DataFrame`
"""
warnings.filterwarnings('ignore')
# Parse formula
no_space_formula = formula.replace(' ', '')
Y_col = no_space_formula.split('~')[0]
X_col = no_space_formula.split('~')[1].split('+')
# Non-linear transformations
log_cols = [re.search('(?<=log\\().*?(?=\\))', x).group(0) for x in X_col if re.findall('log\\(', x)] # log
exp_cols = [re.search('(?<=exp\\().*?(?=\\))', x).group(0) for x in X_col if re.findall('exp\\(', x)] # exp
sqrt_cols = [re.search('(?<=sqrt\\().*?(?=\\))', x).group(0) for x in X_col if re.findall('sqrt\\(', x)] # sqrt
cos_cols = [re.search('(?<=cos\\().*?(?=\\))', x).group(0) for x in X_col if re.findall('cos\\(', x)] # cos
sin_cols = [re.search('(?<=sin\\().*?(?=\\))', x).group(0) for x in X_col if re.findall('sin\\(', x)] # sin
# Transformation functions
transformations_functional = {'log': {'func': np.log, 'cols': log_cols},
'exp': {'func': np.exp, 'cols': exp_cols},
'cos': {'func': np.cos, 'cols': cos_cols},
'sin': {'func': np.sin, 'cols': sin_cols},
'sqrt': {'func': np.sqrt, 'cols': sqrt_cols},
}
# Apply transformations
for key, transformation in transformations_functional.items():
for c in transformation['cols']:
col_to_transform = c # .split('(')[1].split(')')[0]
# Transform
data.loc[:, f'{key}({col_to_transform})'] = transformation['func'](data[col_to_transform])
# Multiplications, power and ration functions
pow_cols = [x for x in X_col if re.findall('[a-zA-Z0-9\\(\\)][*][*][a-zA-Z0-9]', x)] # X1 ** x
inter_cols = [x for x in X_col if re.findall('[a-zA-Z0-9\\(\\)][*][a-zA-Z0-9]', x)] # X1 * X2
div_cols = [x for x in X_col if re.findall('[a-zA-Z0-9\\(\\)][/][a-zA-Z0-9]', x)] # X1 / X2
# Exponents
for c in pow_cols:
c_left = c.split('**')[0]
c_power = c.split('**')[1]
# Get components as number or column from data
left = data[c_left].values if c_left in data.columns else float(c_left)
power = data[c_power].values if c_power in data.columns else float(c_power)
# Transform
data.loc[:, c] = left ** power
# Multiplications
for c in inter_cols:
c_left = c.split('*')[0]
c_right = c.split('*')[1]
# Get components as number or column from data
try:
left = data[c_left].values if c_left in list(data.columns) + X_col else float(c_left)
right = data[c_right].values if c_right in list(data.columns) + X_col else float(c_right)
except Exception:
raise ValueError(f'Columns {c_left} or {c_right} not found in data.')
# Transform
data.loc[:, c] = left * right
# Divide
for c in div_cols:
c_num = c.split('/')[0]
c_denom = c.split('/')[1]
# Get components as number or column from data
num = data[c_num].values if c_num in list(data.columns) + X_col else float(c_num)
denom = data[c_denom].values if c_denom in list(data.columns) + X_col else float(c_denom)
if check_values:
assert (denom == 0.).sum() == 0, f'Column {col_to_transform} contains null values.'
# Transform
data.loc[:, c] = num / denom
if '1' in X_col:
data['1'] = 1
# Putting pandas' warning message back
warnings.filterwarnings('default')
if return_all:
return data, X_col, Y_col
else:
return data
#######################################################################################################################
# Adding One Hot Encoding
def OneHotEncoding(data, columns, drop=True, verbose=False):
"""Performs One Hot Encoding (OHE) usally used in Machine Learning.
:param data: Data Frame on which we apply One Hot Encoding.
:type data: :obj:`pandas.DataFrame`
:param columns: Column to be converted to dummy variables.
:type columns: :obj:`list`
:param drop: Drop the column for one attribute (first value that appears in the dataset). This helps avoid multicolinearity issues in subsequent models, defaults to True.
:type drop: :obj:`bool`, optional
:param verbose: Display progression, defaults to False.
:type verbose: :obj:`bool`, optional
:example:
>>> from statinf.data import OneHotEncoding
>>> print(df)
... +----+--------+----------+-----+
... | Id | Gender | Category | Age |
... +----+--------+----------+-----+
... | 1 | Male | A | 23 |
... | 2 | Female | B | 21 |
... | 3 | Female | A | 31 |
... | 4 | Male | C | 22 |
... | 5 | Female | A | 26 |
... +----+--------+----------+-----+
>>> # Encoding columns "Gender" and "Category"
>>> new_df = OneHotEncoding(df, columns=["Gender", "Category"])
>>> print(new_df)
... +----+---------------+------------+------------+-----+
... | Id | Gender_Female | Category_B | Category_C | Age |
... +----+---------------+------------+------------+-----+
... | 1 | 0 | 0 | 0 | 23 |
... | 2 | 1 | 1 | 0 | 21 |
... | 3 | 1 | 0 | 0 | 31 |
... | 4 | 0 | 0 | 1 | 22 |
... | 5 | 1 | 0 | 0 | 26 |
... +----+---------------+------------+------------+-----+
>>> # Listing the newly created columns
>>> print(new_df.meta._ohe)
... {'Gender': ['Gender_Female'],
... 'Category': ['Category_A', 'Category_B']}
>>> # Get the aggregated list of encoded columns
>>> print(new_df.meta._ohe_all_columns)
... ['Gender_Female', 'Category_B', 'Category_C']
:return: Transformed data with One Hot Encoded variables.
New attributes are added to the data frame:
* :obj:`df.meta._ohe`: contains the encoded columns and the created columns.
* :obj:`df.meta._ohe_all_columns`: aggregates the newly created columns in one list. This list can directly be passed or appended to the input columns argument of subsequent models.
:rtype: :obj:`pandas.DataFrame`
"""
dataset = data.copy()
try:
if dataset.meta._ohe_exists:
dataset.meta._ohe_exists = True
except Exception:
dataset.meta = SimpleNamespace()
dataset.meta._ohe_exists = True
dataset.meta._ohe = {}
dataset.meta._ohe_all_columns = []
cols = [columns] if type(columns) == str else columns
# Start encoding column by column
for column in cols:
# Get all values from the column
all_values = dataset[column].unique()
all_values = all_values[1:] if drop else all_values
new_cols = [f'{column}_{val}' for val in all_values]
# Add column metadata
dataset.meta._ohe.update({column: new_cols})
dataset.meta._ohe_all_columns += new_cols
# Encode values
for val in all_values:
if verbose:
print('Encoding for value: ' + str(val))
colname = column + '_' + str(val)
dataset.loc[:, colname] = 0
dataset.loc[dataset[column] == val, colname] = 1
# Drop the original categorical column
dataset.drop(columns=[column], inplace=True)
return(dataset)
#######################################################################################################################
# Convert an array of values into a dataset matrix: used for LSTM data pre-processing
def create_dataset(data, n_in=1, n_out=1, dropnan=True):
"""Function to convert a DataFrame into into multivariate time series format readable by Keras LSTM.
:param data: DataFrame on which to aply the transformation.
:type data: :obj:`pandas.DataFrame`
:param n_in: Input dimension also known as look back or size of the window, defaults to 1
:type n_in: :obj:`int`, optional
:param n_out: Output dimension, defaults to 1
:type n_out: :obj:`int`, optional
:param dropnan: Remove empty values in the series, defaults to True
:type dropnan: :obj:`bool`, optional
:return: Features converted for Keras LSTM.
:rtype: :obj:`pandas.DataFrame`
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
def split_sequences(data, look_back=1):
"""Split a multivariate time series from :py:meth:`statinf.data.ProcessData.multivariate_time_series` into a Keras' friendly format for LSTM.
:param data: Data in the format of sequences to transform.
:type data: :obj:`numpy.ndarray`
:param look_back: Size of the trailing window, number of time steps to consider, defaults to 1.
:type look_back: :obj:`int`
:exemple:
>>> from statinf.data import multivariate_time_series, split_sequences
>>> train_to_split = multivariate_time_series(train)
>>> X, y = split_sequences(train_to_split, look_back=7)
:return: * :obj:`x`: Input data converted for Keras LSTM.
* :obj:`y`: Target series converted for Keras LSTM.
:rtype: * :obj:`numpy.ndarray`
* :obj:`numpy.ndarray`
"""
X, y = list(), list()
for i in range(len(data)):
# find the end of this pattern
end_ix = i + look_back
# check if we are beyond the dataset
if end_ix > len(data) - 1:
break
# gather input and output parts of the pattern
seq_x, seq_y = data[i:end_ix, :], data[end_ix, :]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def multivariate_time_series(data):
"""Convert a dataframe into numpy array multivariate time series.
:param data: Input data to transform.
:type data: :obj:`pandas.DataFrame`
:exemple:
>>> from statinf.data import multivariate_time_series, split_sequences
>>> train_to_split = multivariate_time_series(train)
>>> X, y = split_sequences(train_to_split, look_back=7)
:return: Transformed multivariate time series data.
:rtype: :obj:`numpy.ndarray`
"""
to_stack = ()
for _c in data.columns:
series = data[_c].to_numpy()
to_stack += (series.reshape((len(series), 1)), )
return np.hstack(to_stack)
#######################################################################################################################
# Scale dataset
class Scaler:
def __init__(self, data, columns):
"""Data scaler.
:param data: Data set to scale.
:type data: :obj:`pandas.DataFrame`
:param columns: Columns to scale.
:type columns: :obj:`list`
:example:
>>> from statinf.data import Scaler, generate_dataset
>>> coeffs = [1.2556, -0.465, 1.665414, 2.5444, -7.56445]
>>> data = generate_dataset(coeffs, n=10, std_dev=2.6)
>>> # Original dataset
>>> print(data)
... +-----------+-----------+-----------+-----------+-----------+-----------+
... | X0 | X1 | X2 | X3 | X4 | Y |
... +-----------+-----------+-----------+-----------+-----------+-----------+
... | 0.977594 | 1.669510 | -1.385569 | 0.696975 | -1.207098 | 8.501692 |
... | -0.953802 | 1.025392 | -0.639291 | 0.658251 | 0.746814 | -7.186085 |
... | -0.148140 | -0.972473 | 0.843746 | 1.306845 | 0.269834 | 1.939924 |
... | 0.499385 | -1.081926 | 2.646441 | 0.910503 | 0.857189 | 0.389257 |
... | -0.563977 | -0.511933 | -0.726744 | -0.630345 | -0.486822 | -0.125787 |
... | -0.434994 | -0.396210 | 1.101739 | -0.660236 | -1.197566 | 7.735832 |
... | 0.032478 | -0.114952 | -0.097337 | 1.794769 | 1.239423 | -5.510332 |
... | 0.085569 | -0.600019 | 0.224186 | 0.301771 | 1.278387 | -8.648084 |
... | -0.028844 | -0.329940 | -0.301762 | 0.946077 | -0.359133 | 5.099971 |
... | -0.665312 | 0.270254 | -1.263288 | 0.545625 | 0.499162 | -6.126528 |
... +-----------+-----------+-----------+-----------+-----------+-----------+
>>> # Load scaler class
>>> scaler = Scaler(data=data, columns=['X1', 'X2'])
>>> # Scale our dataset with MinMax method
>>> scaled_df = scaler.MinMax()
>>> print(scaled_df)
... +-----------+-----------+-----------+-----------+-----------+-----------+
... | X0 | X1 | X2 | X3 | X4 | Y |
... +-----------+-----------+-----------+-----------+-----------+-----------+
... | 0.977594 | 1.000000 | 0.000000 | 0.696975 | -1.207098 | 8.501692 |
... | -0.953802 | 0.765898 | 0.185088 | 0.658251 | 0.746814 | -7.186085 |
... | -0.148140 | 0.039781 | 0.552904 | 1.306845 | 0.269834 | 1.939924 |
... | 0.499385 | 0.000000 | 1.000000 | 0.910503 | 0.857189 | 0.389257 |
... | -0.563977 | 0.207162 | 0.163399 | -0.630345 | -0.486822 | -0.125787 |
... | -0.434994 | 0.249221 | 0.616890 | -0.660236 | -1.197566 | 7.735832 |
... | 0.032478 | 0.351444 | 0.319501 | 1.794769 | 1.239423 | -5.510332 |
... | 0.085569 | 0.175148 | 0.399244 | 0.301771 | 1.278387 | -8.648084 |
... | -0.028844 | 0.273307 | 0.268801 | 0.946077 | -0.359133 | 5.099971 |
... | -0.665312 | 0.491445 | 0.030328 | 0.545625 | 0.499162 | -6.126528 |
... +-----------+-----------+-----------+-----------+-----------+-----------+
>>> # Unscale the new dataset to retreive previous data scale
>>> unscaled_df = scaler.unscaleMinMax(scaled_df)
>>> print(unscaled_df)
... +-----------+-----------+-----------+-----------+-----------+-----------+
... | X0 | X1 | X2 | X3 | X4 | Y |
... +-----------+-----------+-----------+-----------+-----------+-----------+
... | 0.977594 | 1.669510 | -1.385569 | 0.696975 | -1.207098 | 8.501692 |
... | -0.953802 | 1.025392 | -0.639291 | 0.658251 | 0.746814 | -7.186085 |
... | -0.148140 | -0.972473 | 0.843746 | 1.306845 | 0.269834 | 1.939924 |
... | 0.499385 | -1.081926 | 2.646441 | 0.910503 | 0.857189 | 0.389257 |
... | -0.563977 | -0.511933 | -0.726744 | -0.630345 | -0.486822 | -0.125787 |
... | -0.434994 | -0.396210 | 1.101739 | -0.660236 | -1.197566 | 7.735832 |
... | 0.032478 | -0.114952 | -0.097337 | 1.794769 | 1.239423 | -5.510332 |
... | 0.085569 | -0.600019 | 0.224186 | 0.301771 | 1.278387 | -8.648084 |
... | -0.028844 | -0.329940 | -0.301762 | 0.946077 | -0.359133 | 5.099971 |
... | -0.665312 | 0.270254 | -1.263288 | 0.545625 | 0.499162 | -6.126528 |
... +-----------+-----------+-----------+-----------+-----------+-----------+
"""
super(Scaler, self).__init__()
self.data = data.copy()
self.scalers = {}
self.columns = list(columns)
for c in columns:
_min = self.data[c].min()
_max = self.data[c].max()
_mean = self.data[c].mean()
_std = self.data[c].std()
_scale_temp = {'min': float(_min),
'max': float(_max),
'mean': float(_mean),
'std': float(_std),
}
self.scalers.update({c: _scale_temp})
def _col_to_list(self, columns):
"""Transforms column names to be scaled as list.
:param columns: Column names to be scaled.
:type columns: :obj:`list` or :obj:`str`
:return: Column name(s) as a list
:rtype: :obj:`list`
"""
if columns is None:
cols = self.columns
elif type(columns) == str:
cols = [columns]
else:
cols = columns
return cols
def MinMax(self, data=None, columns=None, feature_range=(0, 1), col_suffix=''):
"""Min-max scaler. Data we range between 0 and 1.
:param data: Data set to scale, defaults to None, takes data provided in :py:meth:`__init__`, defaults to None.
:type data: :obj:`pandas.DataFrame`, optional
:param columns: Columns to be scaled, defaults to None, takes the list provided in :py:meth:`__init__`, defaults to None.
:type columns: :obj:`list`, optional
:param feature_range: Expected value range of the scaled data, defaults to (0, 1).
:type feature_range: :obj:`tuple`, optional
:param col_suffix: Suffix to add to colum names, defaults to '', overrides the existing columns.
:type col_suffix: :obj:`str`, optional
:formula: .. math:: x_{\\text{scaled}} = \\dfrac{x - \\min(x)}{\\max(x) - \\min(x)} \\cdot (f\\_max - f\\_min) + f\\_min
where :math:`(f\\_min, f\\_max)` defaults to :math:`(0, 1)` and corresponds to the expected data range of the scaled data from argument :obj:`feature_range`.
:return: Data set with scaled features.
:rtype: :obj:`pandas.DataFrame`
"""
self._minmax_suffix = col_suffix
self._minmax_feature_range = feature_range
f_min, f_max = self._minmax_feature_range
cols = self._col_to_list(columns)
df = self.data if data is None else data.copy()
for c in cols:
# Retreive min and max
_min = self.scalers[c]['min']
_max = self.scalers[c]['max']
tmp = (df[c] - _min) / (_max - _min)
df[c + col_suffix] = tmp * (f_max - f_min) + f_min
return df
def unscaleMinMax(self, data=None, columns=None, columns_mapping={}):
"""Unscale from min-max.
Retreives data from the same range as the original features.
:param data: Data set to unscale, defaults to None, takes data provided in :py:meth:`__init__`.
:type data: :obj:`pandas.DataFrame`, optional
:param columns: Columns to be unscaled, defaults to None, takes the list provided in :py:meth:`__init__`.
:type columns: :obj:`list`, optional
:param columns_mapping: Mapping between eventual renamed columns and original scaled column.
:type columns_mapping: :obj:`dict`, optional
:formula: .. math:: x_{\\text{unscaled}} = x_{\\text{scaled}} \\cdot \\left(\\max(x) - \\min(x) \\right) + \\min(x)
:return: Unscaled data set.
:rtype: :obj:`pandas.DataFrame`
"""
cols = self._col_to_list(columns)
df = self.data if data is None else data.copy()
unscale_suffix = '_unscaled' if self._minmax_suffix != '' else ''
f_min, f_max = self._minmax_feature_range
for c in cols:
# Apply eventual column name mapping
_c = c if columns_mapping.get(c) is None else columns_mapping.get(c)
# Retreive min and max
_min = self.scalers[_c]['min']
_max = self.scalers[_c]['max']
tmp = (df[c + self._minmax_suffix] - f_min) / (f_max - f_min)
df[c + unscale_suffix] = (tmp * (_max - _min)) + _min
return df
def Normalize(self, center=True, reduce=True, data=None, columns=None, col_suffix=''):
"""Data normalizer.
Centers and reduces features (from mean and standard deviation).
:param center: Center the variable, i.e. substract the mean, defaults to True.
:type center: :obj:`bool`, optional
:param reduce: Reduce the variable, i.e. devide by standard deviation, defaults to True.
:type reduce: :obj:`bool`, optional
:param data: Data set to normalize, defaults to None, takes data provided in :py:meth:`__init__`.
:type data: :obj:`pandas.DataFrame`, optional
:param columns: Columns to be normalize, defaults to None, takes the list provided in :py:meth:`__init__`.
:type columns: :obj:`list`, optional
:param col_suffix: [description], defaults to ''
:type col_suffix: :obj:`str`, optional
:formula: .. math:: x_{\\text{scaled}} = \\dfrac{x - \\bar{x}}{\\sqrt{\\mathbb{V}(x)}}
:return: Data set with normalized features.
:rtype: :obj:`pandas.DataFrame`
"""
self._standard_suffix = col_suffix
cols = self._col_to_list(columns)
df = self.data if data is None else data.copy()
for c in cols:
# Retreive mean
if center:
_mean = self.scalers[c]['mean']
self.scalers[c].update({'center': True})
else:
_mean = 0.
self.scalers[c].update({'center': False})
# Retreive std
if reduce:
_std = self.scalers[c]['std']
self.scalers[c].update({'reduce': True})
else:
_std = 1.
self.scalers[c].update({'reduce': False})
df[c + col_suffix] = (df[c] - _mean) / _std
return df
def unscaleNormalize(self, data=None, columns=None, columns_mapping={}):
"""Denormalize data to retreive the same range as the original data set.
:param data: Data set to unscale, defaults to None, takes data provided in :py:meth:`__init__`.
:type data: :obj:`pandas.DataFrame`, optional
:param columns: Columns to be unscaled, defaults to None, takes the list provided in :py:meth:`__init__`.
:type columns: :obj:`list`, optional
:param columns_mapping: Mapping between eventual renamed columns and original scaled column.
:type columns_mapping: :obj:`dict`, optional
:formula: .. math:: x_{\\text{unscaled}} = x_{\\text{scaled}} \\cdot \\sqrt{\\mathbb{V}(x)} + \\bar{x}
:return: De-normalized data set.
:rtype: :obj:`pandas.DataFrame`
"""
cols = self._col_to_list(columns)
df = self.data if data is None else data.copy()
unscale_suffix = '_unscaled' if self._standard_suffix != '' else ''
for c in cols:
# Apply eventual column name mapping
_c = c if columns_mapping.get(c) is None else columns_mapping.get(c)
# Retreive min and max
_mean = self.scalers[_c]['mean'] if self.scalers[_c]['center'] else 0.
_std = self.scalers[_c]['std'] if self.scalers[_c]['reduce'] else 1.
df[c + unscale_suffix] = (df[c + self._standard_suffix] * _std) + _mean
return df
|
[
"florian.website.mail@gmail.com"
] |
florian.website.mail@gmail.com
|
397bd82f3974cc589904835d4c50f7e3f60e39a9
|
8cb6b6b233be0f53d86230e7be7fe2c12e04d3ad
|
/Titanic/code.py
|
f8fdce6d968749c2356532056f03dd6f3281448e
|
[] |
no_license
|
benignavesh/kaggle
|
09a6b9f0773962f22efdc042d776110712d4cf41
|
c5a3f4722d2baf63dbe678e5817a651fb30da967
|
refs/heads/master
| 2020-03-20T02:26:37.284051
| 2018-06-12T18:39:45
| 2018-06-12T18:39:45
| 137,113,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
import pandas as pd
dataset_train = pd.read_csv('train.csv')
dataset_test = pd.read_csv('test.csv')
labels_train = dataset_train.iloc[:,[1]].values
features_train = dataset_train.iloc[:,[2,4,5,6,7]].values
features_test = dataset_test.iloc[:,[1,3,4,5,6]].values
#Missing data analysis for age
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN',strategy = 'median',axis = 0)
imputer = imputer.fit(features_train[:,[2]])
features_train[:,[2]] = imputer.transform(features_train[:,[2]])
imputer = imputer.fit(features_test[:,[2]])
features_test[:,[2]] = imputer.transform(features_test[:,[2]])
# Converting Male/ female to 1/0
from sklearn.preprocessing import LabelEncoder
labelencoder= LabelEncoder()
features_train[:,1] = labelencoder.fit_transform (features_train[:,1])
features_test[:,1] = labelencoder.fit_transform (features_test[:,1])
#OneHotEncoding Pclass
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
features_train = onehotencoder.fit_transform(features_train).toarray()
features_test = onehotencoder.fit_transform(features_test).toarray()
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(features_train, labels_train)
labels_pred = classifier.predict(features_test)
df = pd.DataFrame(labels_pred)
df.to_csv("submission.csv")
|
[
"noreply@github.com"
] |
benignavesh.noreply@github.com
|
cc7e2f7e8d161493bd6b230d519996a73308c768
|
7b6377050fba4d30f00e9fb5d56dfacb22d388e1
|
/brownies/bin/lev-vis.py
|
f897e5f5de2f2be76fa12584493985dffe688620
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LLNL/fudge
|
0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370
|
6ba80855ae47cb32c37f635d065b228fadb03412
|
refs/heads/master
| 2023-08-16T21:05:31.111098
| 2023-08-01T22:09:32
| 2023-08-01T22:09:32
| 203,678,373
| 21
| 4
|
NOASSERTION
| 2023-06-28T20:51:02
| 2019-08-21T23:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 7,520
|
py
|
#! /usr/bin/env python
import sys
import os
import argparse
import brownies.BNL.RIPL.level_density as LD
import brownies.BNL.RIPL.level_scheme as LS
from PoPs.chemicalElements .misc import symbolFromZ as elementSymbolFromZ
import numpy as np
import matplotlib.pyplot as plt
HOME=os.environ['HOME']
DESKTOP=HOME+'/Desktop'
ATLAS=DESKTOP+"/atlas/"
RIPL=DESKTOP+"/empire.trunk/RIPL/"
# --------------------------------------------------------------------
# Command line
# --------------------------------------------------------------------
def parse_args():
parser = argparse.ArgumentParser(description="Plot ENTIRE level schemes, as determined from RIPL and the Atlas")
parser.add_argument('Z', type=int, help='Nucleus charge')
parser.add_argument('A', type=int, help="Nuclear atomic number")
parser.add_argument('-v', dest='verbose', default=False, action='store_true', help='Run verbosely')
parser.add_argument('-q', dest='verbose', action='store_false', help='Run quietly')
parser.add_argument('--RIPL', default=RIPL, help="Path to RIPL files")
parser.add_argument('--ATLAS', default=ATLAS, help="Path to atlas project")
parser.add_argument('--dEmax', type=float, default=2.0, help="Plot from Emin=0 to Emax=Esep+dEmax, dEmax in MeV")
return parser.parse_args()
# -----------------------------------------------------------------------
# Set defaults
# -----------------------------------------------------------------------
args = parse_args()
Z, A=args.Z, args.A
elem=elementSymbolFromZ[Z]
sym='%s%i'%(elem, A)
symTarget='%s%i'%(elem, A-1) # for n+target = isotope of interest
ld, D, levs=None, None, None
sys.path.append(args.ATLAS)
import atlas.io as aio
# -----------------------------------------------------------------------
# Get data
# -----------------------------------------------------------------------
# Get the level scheme for Z,A. From RIPL-3.
fname=args.RIPL+"/levels/z%s.dat"%str(Z).zfill(3)
with open(fname, mode='r') as f:
levMap=LS.readRIPLLevelScheme(f.read(), verbose=False)
levs=levMap[sym]
spinTarget = levMap[symTarget].levels[0].spin
parityTarget = levMap[symTarget].levels[0].parity
spin0=levMap[sym].levels[0].spin
parity0=levMap[sym].levels[0].parity
if args.verbose:
print('target:', symTarget, spinTarget, parityTarget)
print('compound:', sym, spin0, parity0)
print(levs.name, levs.Sn)
print(levs.levelReport())
# Get the HFB level density for Z,A. From RIPL-3.
fname=args.RIPL+"/densities/total/level-densities-hfb/z%s.tab"%str(Z).zfill(3)
with open(fname, mode='r') as f:
ld=LD.readHFBMLevelDensityTable(f.read(), Z, A, verbose=True)
# Get the mean level spacing for the resonance region for Z,A-1.
# The compound nucleus # for neutron + Z,A-1 is Z,A. From RIPL-3.
# FIXME
# Get the resonances for Z,A-1. The compound nucleus for neutron + Z,A-1 is Z,A.
# From the Atlas of Neutron Resonances, 6th edition.
try:
res = aio.read_atlas(isotope=None, element=None, Z=Z, A=A-1, ZA=None, verbose=False)
except KeyError:
res = None
if args.verbose and res is not None:
for r in res.resonance_parameters:
print('\t'.join([str(x) for x in r]))
icut = levs.lastLevelInCompleteScheme
Ecut = levs.levels[icut].energy.value
Esep = levs.Sn.value
Emin = 0.0
Emax = Esep+args.dEmax
# -----------------------------------------------------------------------
# Hi!
# -----------------------------------------------------------------------
for NAME in ["Angie", 'Nathaniel', "Mami"]:
print('hi '+NAME)
# -----------------------------------------------------------------------
# Make plot
# -----------------------------------------------------------------------
# Set up axes and title
plt.title(levs.name)
plt.xlabel("$\Pi*J$")
plt.ylabel("$E^*$ (MeV)")
# Widget to get J & Pi, a common theme
def get_J_and_Pi(__lev, useNone=True):
if __lev.spin is None:
if useNone:
J = None
else:
J = -11.33333
else:
J = float(lev.spin)
if lev.parity is None:
if useNone:
Pi = None
else:
Pi = 1.0
else:
Pi = float(str(lev.parity))
return J, Pi
# Plot discrete levels with complete information
if True:
x, y, xerr = [], [], [] # for completely know levels
for lev in levs.levels:
J, Pi=get_J_and_Pi(lev, True)
if J is None or Pi is None:
pass
else:
if lev.energy.value > Emax:
continue
y.append(lev.energy.value)
x.append(J*Pi)
xerr.append(0.25)
plt.errorbar(x=x, y=y, xerr=xerr, linewidth=0, elinewidth=2, color='k')
# Highlight ground state
if True:
plt.errorbar(x=[float(spin0)*int(parity0)], y=[0.0], xerr=[0.25], linewidth=0, elinewidth=2, color='g')
# Highlight gamma transitions
if False:
raise NotImplementedError("Highlight gamma transitions")
# Plot discrete levels missing either J or Pi
if True:
x, y, xerr = [], [], [] # for completely know levels
for lev in levs.levels:
J, Pi=get_J_and_Pi(lev, True)
if J is None or Pi is None:
J, Pi=get_J_and_Pi(lev, False)
if lev.energy.value > Emax:
continue
y.append(lev.energy.value)
x.append(J*Pi)
xerr.append(0.25)
else:
pass
plt.errorbar(x=x, y=y, xerr=xerr, linewidth=0, elinewidth=2, color='blue')
# Highlight rotational bands
if False:
raise NotImplementedError("Highlight rotational bands")
# Highlight vibrational "bands"
if False:
raise NotImplementedError('Highlight vibrational "bands"')
# Plot Ecut
if True:
plt.axhline(y=Ecut, color='black', alpha=0.25, linestyle=':')
plt.text(11, Ecut+0.1, r'$E_{cut}$')
# Plot level density contour plot
if True:
JPigrid = []
for Pi in ld.spin_dep_level_density:
for twoJ in ld.spin_dep_level_density[Pi].keys():
if twoJ > 30: continue
JPigrid.append(Pi*twoJ/2.0)
JPigrid.sort()
JPigrid = np.array(JPigrid)
Exgrid = np.arange(Emin, Emax, 0.1) # np.arange(Ecut, 5.0+Esep, 0.1)
X, Y = np.meshgrid(JPigrid, Exgrid)
vevaluate = np.vectorize(ld.evaluate)
Z = vevaluate(Y, np.abs(X), np.sign(X))
CS = plt.contour(X, Y, Z, levels=[0, 1, 5]+[int(x) for x in np.logspace(1,3,14)])
plt.clabel(CS, fontsize=9, inline=1)
# Plot Esep
if True:
plt.axhline(y=Esep, color='black', alpha=0.25)
plt.text(11, Esep+0.1, r'$E_{sep}$')
# Plot box of levels that can be excited by n+target reactions
if False:
raise NotImplementedError("Plot box of levels that can be excited by n+target reactions")
# Plot resonances with known J
if True and res is not None:
x, y, xerr, yerr = [], [], [], [] # for completely know levels
for r in res.resonance_parameters:
Er=r[0].value*1e-6
if Er < 0.0:
continue
Er += Esep
J, L = r[1].spin, r[2].spin
if J is None or L is None:
continue
Pi = pow(-1, int(L)) * int(parityTarget) # FIXME: check math!
y.append(Er)
x.append(float(J)*Pi)
# FIXME: fuzzy band from width
xerr.append(0.25)
yerr.append(0.0)
plt.errorbar(x=x, y=y, xerr=xerr, linewidth=0, elinewidth=2, color='red')
# Highlight primary gammas
if False:
raise NotImplementedError("Highlight primary gammas")
# Plot resonances with unknown J
if False:
raise NotImplementedError("Plot resonances with unknown J")
plt.show()
|
[
"mattoon1@llnl.gov"
] |
mattoon1@llnl.gov
|
ead54b3adea54e5e8b36d168ada47fa6b99b2957
|
513b9bd0fca9e219a8d33cd9352f45f401a6b533
|
/python/test/test_actions.py
|
e0d04fe5a6a610a1fa991f18417870ef0b07e9ce
|
[] |
no_license
|
EdginAround/edgin_around_api
|
a50b3cd03b992b68f2f42b3616c86534a9ad1b8f
|
0c97610385c409b1c4ceb3472d977bbb8d2dd45e
|
refs/heads/main
| 2023-06-01T00:13:23.219528
| 2021-06-27T19:21:20
| 2021-06-30T14:14:26
| 354,341,869
| 0
| 0
| null | 2021-06-30T14:14:27
| 2021-04-03T16:36:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,949
|
py
|
import unittest
from typing import Any, Dict
from . import common
from edgin_around_api import actions
class ActionsTest(common.SerdeTest):
def test_serde_actor_deletion(self) -> None:
"""Test serialisation and deserialisation of DeleteActorsAction."""
original: Dict[str, Any] = {
"type": "actor_deletion",
"actor_ids": [1, 4, 2],
}
self.assert_serde(original, actions.ActionSchema(), actions.ActorDeletionAction)
def test_serde_damage(self) -> None:
"""Test serialisation and deserialisation of DamageAction."""
original: Dict[str, Any] = {
"type": "damage",
"dealer_id": 3,
"receiver_id": 8,
"variant": "CHOP",
"hand": "RIGHT",
}
self.assert_serde(original, actions.ActionSchema(), actions.DamageAction)
def test_serde_stat_update(self) -> None:
"""Test serialisation and deserialisation of StatUpdateAction."""
original: Dict[str, Any] = {
"type": "stat_update",
"actor_id": 3,
"stats": {
"hunger": 40.0,
"max_hunger": 100.0,
},
}
self.assert_serde(original, actions.ActionSchema(), actions.StatUpdateAction)
d = '{"actor_id": 0, "stats": {"hunger": 0.0, "max_hunger": 100.0}, "type": "stat_update"}'
def test_serde_to_string(self) -> None:
"""Test if `Action.to_string` works correctly."""
action = actions.MotionAction(
actor_id=0,
speed=7.0,
bearing=30.0,
duration=1.0,
)
string = action.to_string()
self.assertTrue('"actor_id": 0' in string)
self.assertTrue('"bearing": 30.0' in string)
self.assertTrue('"duration": 1.0' in string)
self.assertTrue('"speed": 7.0' in string)
self.assertTrue('"type": "motion"' in string)
|
[
"wojciech.kluczka@gmail.com"
] |
wojciech.kluczka@gmail.com
|
2f0309d995be662395d1be6753bdcb2d17c2133e
|
7c1a0ee337c4407768c343518ebee7f8a1b540ef
|
/env/lib/python3.6/sre_parse.py
|
19de139eab2af7ff41ea9fcb49c30ccd7c952d3b
|
[] |
no_license
|
spmarisa/flask_demo
|
8dcce02b43664da0b6afe97975e70a675425e22f
|
e5be66cfe1ebddc130875fb9fddc294d81085a0e
|
refs/heads/master
| 2020-03-23T19:58:39.017626
| 2018-07-23T12:51:14
| 2018-07-23T12:51:14
| 142,013,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
/home/phaneendra/anaconda3/lib/python3.6/sre_parse.py
|
[
"phaneendra.marisa@gmail.com"
] |
phaneendra.marisa@gmail.com
|
83518dea03fcf81599d027eab229b0560c3ccfbe
|
7de280a2cb54bdec1eaa3b683097974b7a2c0964
|
/main.py
|
6bb5ae1f4496ffa7314e3a9a4a160bc53285b558
|
[] |
no_license
|
kpiaskowski/DL_hand_gestures
|
f1d2e748a9587d07e9593380f6a9ce01171d073c
|
3d65b18821208195b89b5ce1f63e002da03f315e
|
refs/heads/master
| 2021-09-10T06:48:20.220804
| 2018-03-21T20:44:22
| 2018-03-21T20:44:22
| 125,994,782
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,811
|
py
|
import os
import tensorflow as tf
from architectures.decoder import decoder_model
from architectures.latent_space import latent_space
from architectures.pretrained_encoder import encoder_model
from vae_dataprovider import DataProvider
epochs = 50
batch_size = 20
latent_units = 200
l_rate = 0.00001
label_w = 64
label_h = 48
model_name = 'model'
saver_checkpoint = 2000
# data
data_provider = DataProvider(batch_size, root_folder='../data', label_w=label_w, label_h=label_h)
train_num_batches, val_num_batches = data_provider.get_num_batches()
training_dataset_init, val_dataset_init, images, labels = data_provider.get_data()
# model
encoder = encoder_model(images)
latent_vector, mean, stddev = latent_space(encoder, latent_units)
predictions = decoder_model(latent_vector)
# losses
generative_loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(predictions, labels), axis=1), axis=1)
latent_loss = -0.5 * tf.reduce_mean(1.0 + 2.0 * stddev - tf.square(mean) - tf.exp(2.0 * stddev), 1)
loss = tf.reduce_mean(generative_loss + latent_loss)
# summaries
preview_images = tf.reshape(images, [-1, 240, 320, 3])
preview_predictions = tf.expand_dims(predictions, -1)
preview_predictions = tf.image.grayscale_to_rgb(preview_predictions)
preview_predictions = tf.image.resize_images(preview_predictions, [240, 320])
preview_labels = tf.reshape(labels, [-1, label_h, label_w])
preview_labels = tf.expand_dims(preview_labels, -1)
preview_labels = tf.image.grayscale_to_rgb(preview_labels)
preview_labels = tf.image.resize_images(preview_labels, [240, 320])
concat_preview = tf.concat([preview_images, preview_labels, preview_predictions], 2)
tf.summary.image('predictions', concat_preview, max_outputs=5)
tf.summary.scalar('loss', loss)
train_op = tf.train.AdamOptimizer(l_rate).minimize(loss)
merged = tf.summary.merge_all()
pretrained_loader = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='yolo'))
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)
with tf.Session() as sess:
train_writer = tf.summary.FileWriter('summaries/train/' + model_name, flush_secs=60)
val_writer = tf.summary.FileWriter('summaries/val/' + model_name, flush_secs=60)
sess.run(tf.global_variables_initializer())
if any([model_name in os.listdir('saved_model')]):
saver.restore(sess, 'saved_model/' + model_name + '.ckpt')
print('Continuing training')
else:
pretrained_loader.restore(sess, '../pretrained_imagenet/pretrained_imagenet.ckpt') # load encoder pretrained on imagenet
print('Training from scratch')
for epoch in range(epochs):
# training
sess.run(training_dataset_init)
i = 0
while True:
try:
_, cost, summary = sess.run([train_op, loss, merged])
train_writer.add_summary(summary, epoch * train_num_batches + i)
train_writer.flush()
print('Training epoch: {} of {}, batch: {} of {}, cost: {}'.format(epoch, epochs, i, train_num_batches, cost))
i += 1
if i % saver_checkpoint == 0:
saver.save(sess, 'saved_model/' + model_name + '.ckpt', global_step=epoch * train_num_batches + i)
except tf.errors.OutOfRangeError:
break
# validation
sess.run(val_dataset_init)
i = 0
while True:
try:
cost, summary = sess.run([loss, merged])
val_writer.add_summary(summary, epoch * val_num_batches + i)
val_writer.flush()
print('Validation epoch: {} of {}, batch: {} of {}, cost: {}'.format(epoch, epochs, i, val_num_batches, cost))
i += 1
except tf.errors.OutOfRangeError:
break
|
[
"kar.piaskowski@gmail.com"
] |
kar.piaskowski@gmail.com
|
56c82dd9a2f16f67ef47c7062fa1ce5db1ae45cf
|
029948b3fd0e41d80d66c84d808abff4fcb24ac8
|
/test/test_path_response_result_response_egress_physical_interface.py
|
b1100af354156581698006d61033889305c3445f
|
[] |
no_license
|
yijxiang/dnac-api-client
|
842d1da9e156820942656b8f34342d52c96d3c37
|
256d016e2df8fc1b3fdad6e28f441c6005b43b07
|
refs/heads/master
| 2021-09-25T21:10:09.502447
| 2018-10-25T14:39:57
| 2018-10-25T14:39:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# coding: utf-8
"""
Cisco DNA Center Platform v. 1.2.x (EFT)
REST API (EFT) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dnac_api_client
from dnac_api_client.models.path_response_result_response_egress_physical_interface import PathResponseResultResponseEgressPhysicalInterface # noqa: E501
from dnac_api_client.rest import ApiException
class TestPathResponseResultResponseEgressPhysicalInterface(unittest.TestCase):
"""PathResponseResultResponseEgressPhysicalInterface unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPathResponseResultResponseEgressPhysicalInterface(self):
"""Test PathResponseResultResponseEgressPhysicalInterface"""
# FIXME: construct object with mandatory attributes with example values
# model = dnac_api_client.models.path_response_result_response_egress_physical_interface.PathResponseResultResponseEgressPhysicalInterface() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"cunningr@cisco.com"
] |
cunningr@cisco.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.